ceilometer-6.0.0/0000775000567000056710000000000012701406364015011 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/devstack/0000775000567000056710000000000012701406364016615 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/devstack/upgrade/0000775000567000056710000000000012701406364020244 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/devstack/upgrade/shutdown.sh0000775000567000056710000000121112701406223022443 0ustar jenkinsjenkins00000000000000#!/bin/bash # # set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $BASE_DEVSTACK_DIR/functions source $BASE_DEVSTACK_DIR/stackrc # needed for status directory source $BASE_DEVSTACK_DIR/lib/tls source $BASE_DEVSTACK_DIR/lib/apache # Locate the ceilometer plugin and get its functions CEILOMETER_DEVSTACK_DIR=$(dirname $(dirname $0)) source $CEILOMETER_DEVSTACK_DIR/plugin.sh set -o xtrace stop_ceilometer # ensure everything is stopped SERVICES_DOWN="ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api" ensure_services_stopped $SERVICES_DOWN ceilometer-6.0.0/devstack/upgrade/settings0000664000567000056710000000105412701406223022021 0ustar jenkinsjenkins00000000000000register_project_for_upgrade ceilometer devstack_localrc base enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer devstack_localrc base enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api tempest devstack_localrc target enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer devstack_localrc target enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api tempest ceilometer-6.0.0/devstack/upgrade/upgrade.sh0000775000567000056710000000573512701406224022237 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # ``upgrade-ceilometer`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "*********************************************************************" echo "ERROR: Abort $0" echo "*********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Save mongodb state (replace with snapshot) # TODO(chdent): There used to be a 'register_db_to_save ceilometer' # which may wish to consider putting back in. if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then mongodump --db ceilometer --out $SAVE_DIR/ceilometer-dump.$BASE_RELEASE fi # Upgrade Ceilometer # ================== # Locate ceilometer devstack plugin, the directory above the # grenade plugin. CEILOMETER_DEVSTACK_DIR=$(dirname $(dirname $0)) # Get functions from current DevStack source $TARGET_DEVSTACK_DIR/functions source $TARGET_DEVSTACK_DIR/stackrc source $TARGET_DEVSTACK_DIR/lib/apache # Get ceilometer functions from devstack plugin source $CEILOMETER_DEVSTACK_DIR/settings # Print the commands being run so that we can see the command that triggers # an error. set -o xtrace # Install the target ceilometer source $CEILOMETER_DEVSTACK_DIR/plugin.sh stack install # calls upgrade-ceilometer for specific release upgrade_project ceilometer $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH # Migrate the database # NOTE(chdent): As we evolve BIN_DIR is likely to be defined, but # currently it is not. CEILOMETER_BIN_DIR=$(dirname $(which ceilometer-dbsync)) $CEILOMETER_BIN_DIR/ceilometer-dbsync || die $LINENO "DB sync error" # Start Ceilometer start_ceilometer # Note these are process names, not service names ensure_services_started "ceilometer-polling --polling-namespaces compute" \ "ceilometer-polling --polling-namespaces central" \ "ceilometer-polling --polling-namespaces ipmi" \ ceilometer-agent-notification \ ceilometer-api \ ceilometer-collector # Save mongodb state (replace with snapshot) if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then mongodump --db ceilometer --out $SAVE_DIR/ceilometer-dump.$TARGET_RELEASE fi set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ceilometer-6.0.0/devstack/settings0000664000567000056710000000427412701406224020402 0ustar jenkinsjenkins00000000000000# turn on all the ceilometer services by default # Pollsters enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi # Notification Agent enable_service ceilometer-anotification # Data Collector enable_service ceilometer-collector # API service enable_service ceilometer-api # Default directories CEILOMETER_DIR=$DEST/ceilometer CEILOMETER_CONF_DIR=/etc/ceilometer CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer} # Set up database backend CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} # Gnocchi default archive_policy for Ceilometer GNOCCHI_ARCHIVE_POLICY=${GNOCCHI_ARCHIVE_POLICY:-low} # Ceilometer connection info. CEILOMETER_SERVICE_PROTOCOL=http CEILOMETER_SERVICE_HOST=$SERVICE_HOST CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} CEILOMETER_USE_MOD_WSGI=${CEILOMETER_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} # To enable OSprofiler change value of this variable to "notifications,profiler" CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications} CEILOMETER_EVENTS=${CEILOMETER_EVENTS:-True} CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-redis://localhost:6379} CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-} # Cache Options # NOTE(cdent): These are incomplete and specific for this testing. CEILOMETER_CACHE_BACKEND=${CEILOMETER_CACHE_BACKEND:-dogpile.cache.redis} CEILOMETER_CACHE_URL=${CEILOMETER_CACHE_URL:-redis://localhost:6379} CEILOMETER_EVENT_ALARM=${CEILOMETER_EVENT_ALARM:-False} # Tell Tempest this project is present TEMPEST_SERVICES+=,ceilometer # Set up default directories for client and middleware GITREPO["python-ceilometerclient"]=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git} GITBRANCH["python-ceilometerclient"]=${CEILOMETERCLIENT_BRANCH:-master} GITDIR["python-ceilometerclient"]=$DEST/python-ceilometerclient GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware # Get rid of this before done. # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: ceilometer-6.0.0/devstack/apache-ceilometer.template0000664000567000056710000000076212701406223023720 0ustar jenkinsjenkins00000000000000Listen %PORT% WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup ceilometer-api WSGIScriptAlias / %WSGIAPP% WSGIApplicationGroup %{GLOBAL} = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/ceilometer.log CustomLog /var/log/%APACHE_NAME%/ceilometer_access.log combined WSGISocketPrefix /var/run/%APACHE_NAME% ceilometer-6.0.0/devstack/plugin.sh0000664000567000056710000004676512701406224020464 0ustar jenkinsjenkins00000000000000# Install and start **Ceilometer** service in devstack # # To enable Ceilometer in devstack add an entry to local.conf that # looks like # # [[local|localrc]] # enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer # # By default all ceilometer services are started (see # devstack/settings). To disable a specific service use the # disable_service function. # # NOTE: Currently, there are two ways to get the IPMI based meters in # OpenStack. One way is to configure Ironic conductor to report those meters # for the nodes managed by Ironic and to have Ceilometer notification # agent to collect them. Ironic by default does NOT enable that reporting # functionality. So in order to do so, users need to set the option of # conductor.send_sensor_data to true in the ironic.conf configuration file # for the Ironic conductor service, and also enable the # ceilometer-anotification service. If you do this disable the IPMI # polling agent: # # disable_service ceilometer-aipmi # # The other way is to use Ceilometer ipmi agent only to get the IPMI based # meters. To avoid duplicated meters, users need to make sure to set the # option of conductor.send_sensor_data to false in the ironic.conf # configuration file if the node on which Ceilometer ipmi agent is running # is also managed by Ironic. # # Several variables set in the localrc section adjust common behaviors # of Ceilometer (see within for additional settings): # # CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600. # CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es') # CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz. # CEILOMETER_EVENTS: Set to True to enable event collection # CEILOMETER_EVENT_ALARM: Set to True to enable publisher for event alarming # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace # TODO(liusheng) Temporarily add this to avoid integration test failue, see bug1548634 export SERVICE_TENANT_NAME=$SERVICE_PROJECT_NAME # Support potential entry-points console scripts in VENV or not if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["ceilometer"]=${CEILOMETER_DIR}.venv CEILOMETER_BIN_DIR=${PROJECT_VENV["ceilometer"]}/bin else CEILOMETER_BIN_DIR=$(get_python_exec_prefix) fi # Test if any Ceilometer services are enabled # is_ceilometer_enabled function is_ceilometer_enabled { [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0 return 1 } function ceilometer_service_url { echo "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" } # _ceilometer_install_mongdb - Install mongodb and python lib. function _ceilometer_install_mongodb { # Server package is the same on all local packages=mongodb-server if is_fedora; then # mongodb client packages="${packages} mongodb" fi install_package ${packages} if is_fedora; then restart_service mongod else restart_service mongodb fi # give time for service to restart sleep 5 } # _ceilometer_install_redis() - Install the redis server and python lib. function _ceilometer_install_redis { if is_ubuntu; then install_package redis-server restart_service redis-server else # This will fail (correctly) where a redis package is unavailable install_package redis restart_service redis fi pip_install_gr redis } # Configure mod_wsgi function _ceilometer_config_apache_wsgi { sudo mkdir -p $CEILOMETER_WSGI_DIR local ceilometer_apache_conf=$(apache_site_config_for ceilometer) local apache_version=$(get_apache_version) local venv_path="" # Copy proxy vhost and wsgi file sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["ceilometer"]}/lib/$(python_version)/site-packages" fi sudo cp $CEILOMETER_DIR/devstack/apache-ceilometer.template $ceilometer_apache_conf sudo sed -e " s|%PORT%|$CEILOMETER_SERVICE_PORT|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%WSGIAPP%|$CEILOMETER_WSGI_DIR/app|g; s|%USER%|$STACK_USER|g; s|%VIRTUALENV%|$venv_path|g " -i $ceilometer_apache_conf } # Install required services for coordination function _ceilometer_prepare_coordination { if echo $CEILOMETER_COORDINATION_URL | grep -q '^memcached:'; then install_package memcached elif [[ "${CEILOMETER_COORDINATOR_URL%%:*}" == "redis" || "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then _ceilometer_install_redis fi } # Install required services for storage backends function _ceilometer_prepare_storage_backend { if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then pip_install_gr pymongo _ceilometer_install_mongodb fi if [ "$CEILOMETER_BACKEND" = 'es' ] ; then ${TOP_DIR}/pkg/elasticsearch.sh download ${TOP_DIR}/pkg/elasticsearch.sh install fi } # Install the python modules for inspecting nova virt instances function _ceilometer_prepare_virt_drivers { # Only install virt drivers if we're running nova compute if is_service_enabled n-cpu ; then if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then pip_install_gr libvirt-python fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then pip_install_gr oslo.vmware fi fi } # Create ceilometer related accounts in Keystone function _ceilometer_create_accounts { if is_service_enabled ceilometer-api; then create_service_user "ceilometer" "admin" get_or_create_service "ceilometer" "metering" "OpenStack Telemetry Service" get_or_create_endpoint "metering" \ "$REGION_NAME" \ "$(ceilometer_service_url)" \ "$(ceilometer_service_url)" \ "$(ceilometer_service_url)" if is_service_enabled swift; then # Ceilometer needs ResellerAdmin role to access Swift account stats. get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_PROJECT_NAME fi fi } # Activities to do before ceilometer has been installed. function preinstall_ceilometer { echo_summary "Preinstall not in virtualenv context. Skipping." } # Remove WSGI files, disable and remove Apache vhost file function _ceilometer_cleanup_apache_wsgi { if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then sudo rm -f "$CEILOMETER_WSGI_DIR"/* sudo rmdir "$CEILOMETER_WSGI_DIR" sudo rm -f $(apache_site_config_for ceilometer) fi } function _drop_database { if is_service_enabled ceilometer-collector ceilometer-api ; then if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then mongo ceilometer --eval "db.dropDatabase();" elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then curl -XDELETE "localhost:9200/events_*" fi fi } # cleanup_ceilometer() - Remove residual data files, anything left over # from previous runs that a clean run would need to clean up function cleanup_ceilometer { _ceilometer_cleanup_apache_wsgi _drop_database sudo rm -f "$CEILOMETER_CONF_DIR"/* sudo rmdir "$CEILOMETER_CONF_DIR" if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "False" ]; then sudo rm -f "$CEILOMETER_API_LOG_DIR"/* sudo rmdir "$CEILOMETER_API_LOG_DIR" fi } # Set configuraiton for cache backend. # NOTE(cdent): This currently only works for redis. Still working # out how to express the other backends. function _ceilometer_configure_cache_backend { iniset $CEILOMETER_CONF cache backend $CEILOMETER_CACHE_BACKEND iniset $CEILOMETER_CONF cache backend_argument url:$CEILOMETER_CACHE_URL iniadd_literal $CEILOMETER_CONF cache backend_argument distributed_lock:True if [[ "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then iniadd_literal $CEILOMETER_CONF cache backend_argument db:0 iniadd_literal $CEILOMETER_CONF cache backend_argument redis_expiration_time:600 fi } # Set configuration for storage backend. function _ceilometer_configure_storage_backend { if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then iniset $CEILOMETER_CONF database event_connection $(database_connection_url ceilometer) iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer) elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then # es is only supported for events. we will use sql for metering. iniset $CEILOMETER_CONF database event_connection es://localhost:9200 iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer) ${TOP_DIR}/pkg/elasticsearch.sh start elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer elif [ "$CEILOMETER_BACKEND" = 'gnocchi' ] ; then gnocchi_url=$(gnocchi_service_url) iniset $CEILOMETER_CONF DEFAULT meter_dispatchers gnocchi # FIXME(sileht): We shouldn't load event_dispatchers if store_event is False iniset $CEILOMETER_CONF DEFAULT event_dispatchers "" iniset $CEILOMETER_CONF notification store_events False # NOTE(gordc): set higher retry in case gnocchi is started after ceilometer on a slow machine iniset $CEILOMETER_CONF storage max_retries 20 # NOTE(gordc): set batching to better handle recording on a slow machine iniset $CEILOMETER_CONF collector batch_size 50 iniset $CEILOMETER_CONF collector batch_timeout 5 iniset $CEILOMETER_CONF dispatcher_gnocchi url $gnocchi_url iniset $CEILOMETER_CONF dispatcher_gnocchi archive_policy ${GNOCCHI_ARCHIVE_POLICY} if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "True" iniset $CEILOMETER_CONF dispatcher_gnocchi filter_project "gnocchi_swift" else iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "False" fi else die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND" fi _drop_database } # Configure Ceilometer function configure_ceilometer { local conffile iniset_rpc_backend ceilometer $CEILOMETER_CONF iniset $CEILOMETER_CONF DEFAULT notification_topics "$CEILOMETER_NOTIFICATION_TOPICS" iniset $CEILOMETER_CONF DEFAULT verbose True iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" if [[ -n "$CEILOMETER_COORDINATION_URL" ]]; then iniset $CEILOMETER_CONF coordination backend_url $CEILOMETER_COORDINATION_URL iniset $CEILOMETER_CONF compute workload_partitioning True iniset $CEILOMETER_CONF notification workload_partitioning True iniset $CEILOMETER_CONF notification workers $API_WORKERS fi if [[ -n "$CEILOMETER_CACHE_BACKEND" ]]; then _ceilometer_configure_cache_backend fi # Install the policy file and declarative configuration files to # the conf dir. # NOTE(cdent): Do not make this a glob as it will conflict # with rootwrap installation done elsewhere and also clobber # ceilometer.conf settings that have already been made. # Anyway, explicit is better than implicit. for conffile in policy.json api_paste.ini pipeline.yaml \ event_definitions.yaml event_pipeline.yaml \ gnocchi_resources.yaml; do cp $CEILOMETER_DIR/etc/ceilometer/$conffile $CEILOMETER_CONF_DIR done iniset $CEILOMETER_CONF oslo_policy policy_file $CEILOMETER_CONF_DIR/policy.json if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml fi if [ "$CEILOMETER_EVENT_ALARM" == "True" ]; then if ! grep -q '^ *- notifier://?topic=alarm.all$' $CEILOMETER_CONF_DIR/event_pipeline.yaml; then sed -i '/^ *publishers:$/,+1s|^\( *\)-.*$|\1- notifier://?topic=alarm.all\n&|' $CEILOMETER_CONF_DIR/event_pipeline.yaml fi fi # The compute and central agents need these credentials in order to # call out to other services' public APIs. iniset $CEILOMETER_CONF service_credentials auth_type password iniset $CEILOMETER_CONF service_credentials user_domain_id default iniset $CEILOMETER_CONF service_credentials project_domain_id default iniset $CEILOMETER_CONF service_credentials project_name $SERVICE_PROJECT_NAME iniset $CEILOMETER_CONF service_credentials username ceilometer iniset $CEILOMETER_CONF service_credentials password $SERVICE_PASSWORD iniset $CEILOMETER_CONF service_credentials region_name $REGION_NAME iniset $CEILOMETER_CONF service_credentials auth_url $KEYSTONE_SERVICE_URI configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR iniset $CEILOMETER_CONF notification store_events $CEILOMETER_EVENTS # Configure storage if is_service_enabled ceilometer-collector ceilometer-api; then _ceilometer_configure_storage_backend iniset $CEILOMETER_CONF collector workers $API_WORKERS fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP" iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER" iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD" fi if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then iniset $CEILOMETER_CONF api pecan_debug "False" _ceilometer_config_apache_wsgi fi if is_service_enabled ceilometer-aipmi; then # Configure rootwrap for the ipmi agent configure_rootwrap ceilometer fi } # init_ceilometer() - Initialize etc. function init_ceilometer { # Get ceilometer keystone settings in place _ceilometer_create_accounts # Create cache dir sudo install -d -o $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* if is_service_enabled ceilometer-collector ceilometer-api && is_service_enabled mysql postgresql ; then if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'es' ] ; then recreate_database ceilometer $CEILOMETER_BIN_DIR/ceilometer-dbsync fi fi } # Install Ceilometer. # The storage and coordination backends are installed here because the # virtualenv context is active at this point and python drivers need to be # installed. The context is not active during preinstall (when it would # otherwise makes sense to do the backend services). function install_ceilometer { if is_service_enabled ceilometer-acentral ceilometer-anotification ceilometer-alarm-evaluator ; then _ceilometer_prepare_coordination fi if is_service_enabled ceilometer-collector ceilometer-api; then _ceilometer_prepare_storage_backend fi if is_service_enabled ceilometer-acompute ; then _ceilometer_prepare_virt_drivers fi install_ceilometerclient setup_develop $CEILOMETER_DIR sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "False" ]; then sudo install -d -o $STACK_USER -m 755 $CEILOMETER_API_LOG_DIR fi } # install_ceilometerclient() - Collect source and prepare function install_ceilometerclient { if use_library_from_git "python-ceilometerclient"; then git_clone_by_name "python-ceilometerclient" setup_dev_lib "python-ceilometerclient" sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-ceilometerclient"]}/tools/,/etc/bash_completion.d/}ceilometer.bash_completion else pip_install_gr python-ceilometerclient fi } # start_ceilometer() - Start running processes, including screen function start_ceilometer { run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF" run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF" run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces ipmi --config-file $CEILOMETER_CONF" if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" elif is_service_enabled ceilometer-api; then enable_apache_site ceilometer restart_apache_server tail_log ceilometer /var/log/$APACHE_NAME/ceilometer.log tail_log ceilometer-api /var/log/$APACHE_NAME/ceilometer_access.log fi # run the collector after restarting apache as it needs # operational keystone if using gnocchi run_process ceilometer-collector "$CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" # Start the compute agent late to allow time for the collector to # fully wake up and connect to the message bus. See bug #1355809 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" fi # Only die on API if it was actually intended to be turned on if is_service_enabled ceilometer-api; then echo "Waiting for ceilometer-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $(ceilometer_service_url)/v2/; then die $LINENO "ceilometer-api did not start" fi fi } # stop_ceilometer() - Stop running processes function stop_ceilometer { if is_service_enabled ceilometer-api ; then if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then disable_apache_site ceilometer restart_apache_server else stop_process ceilometer-api fi fi # Kill the ceilometer screen windows for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector; do stop_process $serv done } # This is the main for plugin.sh if is_service_enabled ceilometer; then if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then # Set up other services echo_summary "Configuring system services for Ceilometer" preinstall_ceilometer elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Ceilometer" # Use stack_install_service here to account for vitualenv stack_install_service ceilometer elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Ceilometer" configure_ceilometer elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Ceilometer" # Tidy base for ceilometer init_ceilometer # Start the services start_ceilometer fi if [[ "$1" == "unstack" ]]; then echo_summary "Shutting Down Ceilometer" stop_ceilometer fi if [[ "$1" == "clean" ]]; then echo_summary "Cleaning Ceilometer" cleanup_ceilometer fi fi # Restore xtrace $XTRACE ceilometer-6.0.0/devstack/files/0000775000567000056710000000000012701406364017717 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/devstack/files/rpms/0000775000567000056710000000000012701406364020700 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/devstack/files/rpms/ceilometer0000664000567000056710000000003012701406223022736 0ustar jenkinsjenkins00000000000000selinux-policy-targeted ceilometer-6.0.0/devstack/README.rst0000664000567000056710000000060612701406223020300 0ustar jenkinsjenkins00000000000000=============================== Enabling Ceilometer in DevStack =============================== 1. Download Devstack:: git clone https://git.openstack.org/openstack-dev/devstack cd devstack 2. Add this repo as an external repository in ``local.conf`` file:: [[local|localrc]] enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer 3. Run ``stack.sh``. ceilometer-6.0.0/etc/0000775000567000056710000000000012701406364015564 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/etc/ceilometer/0000775000567000056710000000000012701406364017714 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/etc/ceilometer/event_definitions.yaml0000664000567000056710000003550512701406224024317 0ustar jenkinsjenkins00000000000000--- - event_type: compute.instance.* traits: &instance_traits tenant_id: fields: payload.tenant_id user_id: fields: payload.user_id instance_id: fields: payload.instance_id host: fields: publisher_id.`split(., 1, 1)` service: fields: publisher_id.`split(., 0, -1)` memory_mb: type: int fields: payload.memory_mb disk_gb: type: int fields: payload.disk_gb root_gb: type: int fields: payload.root_gb ephemeral_gb: type: int fields: payload.ephemeral_gb vcpus: type: int fields: payload.vcpus instance_type_id: type: int fields: payload.instance_type_id instance_type: fields: payload.instance_type state: fields: payload.state os_architecture: fields: payload.image_meta.'org.openstack__1__architecture' os_version: fields: payload.image_meta.'org.openstack__1__os_version' os_distro: fields: payload.image_meta.'org.openstack__1__os_distro' launched_at: type: datetime fields: payload.launched_at deleted_at: type: datetime fields: payload.deleted_at - event_type: compute.instance.exists traits: <<: *instance_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] traits: &cinder_traits user_id: fields: payload.user_id project_id: fields: payload.tenant_id availability_zone: fields: payload.availability_zone display_name: fields: payload.display_name replication_status: fields: payload.replication_status status: fields: payload.status created_at: fields: payload.created_at - event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*'] traits: <<: *cinder_traits resource_id: fields: payload.volume_id host: fields: payload.host size: fields: payload.size type: fields: payload.volume_type replication_status: fields: payload.replication_status - event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] traits: <<: *cinder_traits resource_id: fields: payload.snapshot_id volume_id: fields: payload.volume_id - event_type: ['image_volume_cache.*'] traits: image_id: fields: payload.image_id host: fields: payload.host - event_type: ['image.update', 'image.upload', 'image.delete'] traits: &glance_crud project_id: fields: payload.owner resource_id: fields: payload.id name: fields: payload.name status: fields: payload.status created_at: fields: payload.created_at user_id: fields: payload.owner deleted_at: fields: payload.deleted_at size: fields: payload.size - event_type: image.send traits: &glance_send receiver_project: fields: payload.receiver_tenant_id receiver_user: fields: payload.receiver_user_id user_id: fields: payload.owner_id image_id: fields: payload.image_id destination_ip: fields: payload.destination_ip bytes_sent: fields: payload.bytes_sent - event_type: orchestration.stack.* traits: &orchestration_crud project_id: fields: payload.tenant_id user_id: fields: ['_context_trustor_user_id', '_context_user_id'] resource_id: fields: payload.stack_identity - event_type: sahara.cluster.* traits: &sahara_crud project_id: fields: payload.project_id user_id: fields: _context_user_id resource_id: fields: payload.cluster_id - event_type: sahara.cluster.health traits: &sahara_health <<: *sahara_crud verification_id: fields: payload.verification_id health_check_status: fields: payload.health_check_status health_check_name: fields: payload.health_check_name health_check_description: fields: payload.health_check_description created_at: type: datetime fields: payload.created_at updated_at: type: datetime fields: payload.updated_at - event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*', 'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*'] traits: &identity_crud resource_id: fields: payload.resource_info initiator_id: fields: payload.initiator.id project_id: fields: payload.initiator.project_id domain_id: fields: payload.initiator.domain_id - event_type: identity.role_assignment.* traits: &identity_role_assignment role: fields: payload.role group: fields: payload.group domain: fields: payload.domain user: fields: payload.user project: fields: payload.project - event_type: identity.authenticate traits: &identity_authenticate typeURI: fields: payload.typeURI id: fields: payload.id action: fields: payload.action eventType: fields: payload.eventType eventTime: fields: payload.eventTime outcome: fields: payload.outcome initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_name: fields: payload.initiator.name initiator_host_agent: fields: payload.initiator.host.agent initiator_host_addr: fields: payload.initiator.host.address target_typeURI: fields: payload.target.typeURI target_id: fields: payload.target.id observer_typeURI: fields: payload.observer.typeURI observer_id: fields: payload.observer.id - event_type: objectstore.http.request traits: &objectstore_request typeURI: fields: payload.typeURI id: fields: payload.id action: fields: payload.action eventType: fields: payload.eventType eventTime: fields: payload.eventTime outcome: fields: payload.outcome initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_project_id: fields: payload.initiator.project_id target_typeURI: fields: payload.target.typeURI target_id: fields: payload.target.id target_action: fields: payload.target.action target_metadata_path: fields: payload.target.metadata.path target_metadata_version: fields: payload.target.metadata.version target_metadata_container: fields: payload.target.metadata.container target_metadata_object: fields: payload.target.metadata.object observer_id: fields: payload.observer.id - event_type: magnetodb.table.* traits: &kv_store resource_id: fields: payload.table_uuid user_id: fields: _context_user_id project_id: fields: _context_tenant - event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*'] traits: &network_traits user_id: fields: _context_user_id project_id: fields: _context_tenant_id - event_type: network.* traits: <<: *network_traits resource_id: fields: ['payload.network.id', 'payload.id'] - event_type: subnet.* traits: <<: *network_traits resource_id: fields: ['payload.subnet.id', 'payload.id'] - event_type: port.* traits: <<: *network_traits resource_id: fields: ['payload.port.id', 'payload.id'] - event_type: router.* traits: <<: *network_traits resource_id: fields: ['payload.router.id', 'payload.id'] - event_type: floatingip.* traits: <<: *network_traits resource_id: fields: ['payload.floatingip.id', 'payload.id'] - event_type: pool.* traits: <<: *network_traits resource_id: fields: ['payload.pool.id', 'payload.id'] - event_type: vip.* traits: <<: *network_traits resource_id: fields: ['payload.vip.id', 'payload.id'] - event_type: member.* traits: <<: *network_traits resource_id: fields: ['payload.member.id', 'payload.id'] - event_type: health_monitor.* traits: <<: *network_traits resource_id: fields: ['payload.health_monitor.id', 'payload.id'] - event_type: healthmonitor.* traits: <<: *network_traits resource_id: fields: ['payload.healthmonitor.id', 'payload.id'] - event_type: listener.* traits: <<: *network_traits resource_id: fields: ['payload.listener.id', 'payload.id'] - event_type: loadbalancer.* traits: <<: *network_traits resource_id: fields: ['payload.loadbalancer.id', 'payload.id'] - event_type: firewall.* traits: <<: *network_traits resource_id: fields: ['payload.firewall.id', 'payload.id'] - event_type: firewall_policy.* traits: <<: *network_traits resource_id: fields: ['payload.firewall_policy.id', 'payload.id'] - event_type: firewall_rule.* traits: <<: *network_traits resource_id: fields: ['payload.firewall_rule.id', 'payload.id'] - event_type: vpnservice.* traits: <<: *network_traits resource_id: fields: ['payload.vpnservice.id', 'payload.id'] - event_type: ipsecpolicy.* traits: <<: *network_traits resource_id: fields: ['payload.ipsecpolicy.id', 'payload.id'] - event_type: ikepolicy.* traits: <<: *network_traits resource_id: fields: ['payload.ikepolicy.id', 'payload.id'] - event_type: ipsec_site_connection.* traits: <<: *network_traits resource_id: fields: ['payload.ipsec_site_connection.id', 'payload.id'] - event_type: '*http.*' traits: &http_audit project_id: fields: payload.initiator.project_id user_id: fields: payload.initiator.id typeURI: fields: payload.typeURI eventType: fields: payload.eventType action: fields: payload.action outcome: fields: payload.outcome id: fields: payload.id eventTime: fields: payload.eventTime requestPath: fields: payload.requestPath observer_id: fields: payload.observer.id target_id: fields: payload.target.id target_typeURI: fields: payload.target.typeURI target_name: fields: payload.target.name initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_name: fields: payload.initiator.name initiator_host_address: fields: payload.initiator.host.address - event_type: '*http.response' traits: <<: *http_audit reason_code: fields: payload.reason.reasonCode - event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete'] traits: &dns_domain_traits status: fields: payload.status retry: fields: payload.retry description: fields: payload.description expire: fields: payload.expire email: fields: payload.email ttl: fields: payload.ttl action: fields: payload.action name: fields: payload.name resource_id: fields: payload.id created_at: fields: payload.created_at updated_at: fields: payload.updated_at version: fields: payload.version parent_domain_id: fields: parent_domain_id serial: fields: payload.serial - event_type: dns.domain.exists traits: <<: *dns_domain_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: trove.* traits: &trove_base_traits state: fields: payload.state_description instance_type: fields: payload.instance_type user_id: fields: payload.user_id resource_id: fields: payload.instance_id instance_type_id: fields: payload.instance_type_id launched_at: type: datetime fields: payload.launched_at instance_name: fields: payload.instance_name state: fields: payload.state nova_instance_id: fields: payload.nova_instance_id service_id: fields: payload.service_id created_at: type: datetime fields: payload.created_at region: fields: payload.region - event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete'] traits: &trove_common_traits name: fields: payload.name availability_zone: fields: payload.availability_zone instance_size: type: int fields: payload.instance_size volume_size: type: int fields: payload.volume_size nova_volume_id: fields: payload.nova_volume_id - event_type: trove.instance.create traits: <<: [*trove_base_traits, *trove_common_traits] - event_type: trove.instance.modify_volume traits: <<: [*trove_base_traits, *trove_common_traits] old_volume_size: type: int fields: payload.old_volume_size modify_at: type: datetime fields: payload.modify_at - event_type: trove.instance.modify_flavor traits: <<: [*trove_base_traits, *trove_common_traits] old_instance_size: type: int fields: payload.old_instance_size modify_at: type: datetime fields: payload.modify_at - event_type: trove.instance.delete traits: <<: [*trove_base_traits, *trove_common_traits] deleted_at: type: datetime fields: payload.deleted_at - event_type: trove.instance.exists traits: <<: *trove_base_traits display_name: fields: payload.display_name audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: profiler.* traits: project: fields: payload.project service: fields: payload.service name: fields: payload.name base_id: fields: payload.base_id trace_id: fields: payload.trace_id parent_id: fields: payload.parent_id timestamp: fields: payload.timestamp host: fields: payload.info.host path: fields: payload.info.request.path query: fields: payload.info.request.query method: fields: payload.info.request.method scheme: fields: payload.info.request.scheme db.statement: fields: payload.info.db.statement db.params: fields: payload.info.db.params ceilometer-6.0.0/etc/ceilometer/gnocchi_resources.yaml0000664000567000056710000001370512701406223024304 0ustar jenkinsjenkins00000000000000--- resources: - resource_type: identity archive_policy: low metrics: - 'identity.authenticate.success' - 'identity.authenticate.pending' - 'identity.authenticate.failure' - 'identity.user.created' - 'identity.user.deleted' - 'identity.user.updated' - 'identity.group.created' - 'identity.group.deleted' - 'identity.group.updated' - 'identity.role.created' - 'identity.role.deleted' - 'identity.role.updated' - 'identity.project.created' - 'identity.project.deleted' - 'identity.project.updated' - 'identity.trust.created' - 'identity.trust.deleted' - 'identity.role_assignment.created' - 'identity.role_assignment.deleted' - resource_type: ceph_account metrics: - 'radosgw.objects' - 'radosgw.objects.size' - 'radosgw.objects.containers' - 'radosgw.api.request' - 'radosgw.containers.objects' - 'radosgw.containers.objects.size' - resource_type: instance metrics: - 'instance' - 'memory' - 'memory.usage' - 'memory.resident' - 'vcpus' - 'cpu' - 'cpu.delta' - 'cpu_util' - 'disk.root.size' - 'disk.ephemeral.size' - 'disk.read.requests' - 'disk.read.requests.rate' - 'disk.write.requests' - 'disk.write.requests.rate' - 'disk.read.bytes' - 'disk.read.bytes.rate' - 'disk.write.bytes' - 'disk.write.bytes.rate' - 'disk.latency' - 'disk.iops' - 'disk.capacity' - 'disk.allocation' - 'disk.usage' attributes: host: resource_metadata.host image_ref: resource_metadata.image_ref display_name: resource_metadata.display_name flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)) server_group: resource_metadata.user_metadata.server_group - resource_type: instance_network_interface metrics: - 'network.outgoing.packets.rate' - 'network.incoming.packets.rate' - 'network.outgoing.packets' - 'network.incoming.packets' - 'network.outgoing.bytes.rate' - 'network.incoming.bytes.rate' - 'network.outgoing.bytes' - 'network.incoming.bytes' attributes: name: resource_metadata.vnic_name instance_id: resource_metadata.instance_id - resource_type: instance_disk metrics: - 'disk.device.read.requests' - 'disk.device.read.requests.rate' - 'disk.device.write.requests' - 'disk.device.write.requests.rate' - 'disk.device.read.bytes' - 'disk.device.read.bytes.rate' - 'disk.device.write.bytes' - 'disk.device.write.bytes.rate' - 'disk.device.latency' - 'disk.device.iops' - 'disk.device.capacity' - 'disk.device.allocation' - 'disk.device.usage' attributes: name: resource_metadata.disk_name instance_id: resource_metadata.instance_id - resource_type: image metrics: - 'image' - 'image.size' - 'image.download' - 'image.serve' attributes: name: resource_metadata.name container_format: resource_metadata.container_format disk_format: resource_metadata.disk_format - resource_type: ipmi metrics: - 'hardware.ipmi.node.power' - 'hardware.ipmi.node.temperature' - 'hardware.ipmi.node.inlet_temperature' - 'hardware.ipmi.node.outlet_temperature' - 'hardware.ipmi.node.fan' - 'hardware.ipmi.node.current' - 'hardware.ipmi.node.voltage' - 'hardware.ipmi.node.airflow' - 'hardware.ipmi.node.cups' - 'hardware.ipmi.node.cpu_util' - 'hardware.ipmi.node.mem_util' - 'hardware.ipmi.node.io_util' - resource_type: network metrics: - 'bandwidth' - 'network' - 'network.create' - 'network.update' - 'subnet' - 'subnet.create' - 'subnet.update' - 'port' - 'port.create' - 'port.update' - 'router' - 'router.create' - 'router.update' - 'ip.floating' - 'ip.floating.create' - 'ip.floating.update' - resource_type: stack metrics: - 'stack.create' - 'stack.update' - 'stack.delete' - 'stack.resume' - 'stack.suspend' - resource_type: swift_account metrics: - 'storage.objects.incoming.bytes' - 'storage.objects.outgoing.bytes' - 'storage.api.request' - 'storage.objects.size' - 'storage.objects' - 'storage.objects.containers' - 'storage.containers.objects' - 'storage.containers.objects.size' - resource_type: volume metrics: - 'volume' - 'volume.size' - 'volume.create' - 'volume.delete' - 'volume.update' - 'volume.resize' - 'volume.attach' - 'volume.detach' attributes: display_name: resource_metadata.display_name - resource_type: host metrics: - 'hardware.cpu.load.1min' - 'hardware.cpu.load.5min' - 'hardware.cpu.load.15min' - 'hardware.cpu.util' - 'hardware.memory.total' - 'hardware.memory.used' - 'hardware.memory.swap.total' - 'hardware.memory.swap.avail' - 'hardware.memory.buffer' - 'hardware.memory.cached' - 'hardware.network.ip.outgoing.datagrams' - 'hardware.network.ip.incoming.datagrams' - 'hardware.system_stats.cpu.idle' - 'hardware.system_stats.io.outgoing.blocks' - 'hardware.system_stats.io.incoming.blocks' attributes: host_name: resource_metadata.resource_url - resource_type: host_disk metrics: - 'hardware.disk.size.total' - 'hardware.disk.size.used' attributes: host_name: resource_metadata.resource_url device_name: resource_metadata.device - resource_type: host_network_interface metrics: - 'hardware.network.incoming.bytes' - 'hardware.network.outgoing.bytes' - 'hardware.network.outgoing.errors' attributes: host_name: resource_metadata.resource_url device_name: resource_metadata.name ceilometer-6.0.0/etc/ceilometer/rootwrap.d/0000775000567000056710000000000012701406364022013 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/etc/ceilometer/rootwrap.d/ipmi.filters0000664000567000056710000000036012701406223024334 0ustar jenkinsjenkins00000000000000# ceilometer-rootwrap command filters for IPMI capable nodes # This file should be owned by (and only-writeable by) the root user [Filters] # ceilometer/ipmi/nodemanager/node_manager.py: 'ipmitool' ipmitool: CommandFilter, ipmitool, root ceilometer-6.0.0/etc/ceilometer/rootwrap.conf0000664000567000056710000000172712701406223022441 0ustar jenkinsjenkins00000000000000# Configuration for ceilometer-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/ceilometer/rootwrap.d,/usr/share/ceilometer/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, user0, user1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR ceilometer-6.0.0/etc/ceilometer/policy.json0000664000567000056710000000066012701406223022102 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "segregation": "rule:context_is_admin", "telemetry:get_samples": "", "telemetry:get_sample": "", "telemetry:query_sample": "", "telemetry:create_samples": "", "telemetry:compute_statistics": "", "telemetry:get_meters": "", "telemetry:get_resource": "", "telemetry:get_resources": "", "telemetry:events:index": "", "telemetry:events:show": "" } ceilometer-6.0.0/etc/ceilometer/examples/0000775000567000056710000000000012701406364021532 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml0000664000567000056710000002137512701406223031225 0ustar jenkinsjenkins00000000000000metric: # LBaaS V2 - name: "loadbalancer.create" event_type: - "loadbalancer.create.end" type: "delta" unit: "loadbalancer" volume: 1 resource_id: $.payload.loadbalancer.id project_id: $.payload.loadbalancer.tenant_id metadata: name: $.payload.loadbalancer.name description: $.payload.loadbalancer.description listeners: $.payload.loadbalancer.listeners operating_status: $.payload.loadbalancer.operating_status vip_address: $.payload.loadbalancer.vip_address vip_subnet_id: $.payload.loadbalancer.vip_subnet_id admin_state_up: $.payload.loadbalancer.admin_state_up - name: "loadbalancer.update" event_type: - "loadbalancer.update.end" type: "delta" unit: "loadbalancer" volume: 1 resource_id: $.payload.loadbalancer.id project_id: $.payload.loadbalancer.tenant_id metadata: name: $.payload.loadbalancer.name description: $.payload.loadbalancer.description listeners: $.payload.loadbalancer.listeners operating_status: $.payload.loadbalancer.operating_status vip_address: $.payload.loadbalancer.vip_address vip_subnet_id: $.payload.loadbalancer.vip_subnet_id admin_state_up: $.payload.loadbalancer.admin_state_up - name: "loadbalancer.delete" event_type: - "loadbalancer.delete.end" type: "delta" unit: "loadbalancer" volume: 1 resource_id: $.payload.loadbalancer.id project_id: $.payload.loadbalancer.tenant_id metadata: name: $.payload.loadbalancer.name description: $.payload.loadbalancer.description listeners: $.payload.loadbalancer.listeners operating_status: $.payload.loadbalancer.operating_status vip_address: $.payload.loadbalancer.vip_address vip_subnet_id: $.payload.loadbalancer.vip_subnet_id admin_state_up: $.payload.loadbalancer.admin_state_up - name: "listener.create" event_type: - "listener.create.end" type: "delta" unit: "listener" volume: 1 resource_id: $.payload.listener.id project_id: $.payload.listener.tenant_id metadata: name: $.payload.listener.name description: $.payload.listener.description admin_state_up: $.payload.listener.admin_state_up loadbalancers: $.payload.listener.loadbalancers default_pool_id: $.payload.listener.default_pool_id protocol: $.payload.listener.protocol connection_limit: $.payload.listener.connection_limit - name: "listener.update" event_type: - "listener.update.end" type: "delta" unit: "listener" volume: 1 resource_id: $.payload.listener.id project_id: $.payload.listener.tenant_id metadata: name: $.payload.listener.name description: $.payload.listener.description admin_state_up: $.payload.listener.admin_state_up loadbalancers: $.payload.listener.loadbalancers default_pool_id: $.payload.listener.default_pool_id protocol: $.payload.listener.protocol connection_limit: $.payload.listener.connection_limit - name: "listener.delete" event_type: - "listener.delete.end" type: "delta" unit: "listener" volume: 1 resource_id: $.payload.listener.id project_id: $.payload.listener.tenant_id metadata: name: $.payload.listener.name description: $.payload.listener.description admin_state_up: $.payload.listener.admin_state_up loadbalancers: $.payload.listener.loadbalancers default_pool_id: $.payload.listener.default_pool_id protocol: $.payload.listener.protocol connection_limit: $.payload.listener.connection_limit - name: "healthmonitor.create" event_type: - "healthmonitor.create.end" type: "delta" unit: "healthmonitor" volume: 1 resource_id: $.payload.healthmonitor.id project_id: $.payload.healthmonitor.tenant_id metadata: name: $.payload.healthmonitor.name description: $.payload.healthmonitor.description admin_state_up: $.payload.healthmonitor.admin_state_up max_retries: $.payload.healthmonitor.max_retries delay: $.payload.healthmonitor.delay timeout: $.payload.healthmonitor.timeout pools: $.payload.healthmonitor.pools type: $.payload.healthmonitor.type - name: "healthmonitor.update" event_type: - "healthmonitor.update.end" type: "delta" unit: "healthmonitor" volume: 1 resource_id: $.payload.healthmonitor.id project_id: $.payload.healthmonitor.tenant_id metadata: name: $.payload.healthmonitor.name description: $.payload.healthmonitor.description admin_state_up: $.payload.healthmonitor.admin_state_up max_retries: $.payload.healthmonitor.max_retries delay: $.payload.healthmonitor.delay timeout: $.payload.healthmonitor.timeout pools: $.payload.healthmonitor.pools type: $.payload.healthmonitor.type - name: "healthmonitor.delete" event_type: - "healthmonitor.delete.end" type: "delta" unit: "healthmonitor" volume: 1 resource_id: $.payload.healthmonitor.id project_id: $.payload.healthmonitor.tenant_id metadata: name: $.payload.healthmonitor.name description: $.payload.healthmonitor.description admin_state_up: $.payload.healthmonitor.admin_state_up max_retries: $.payload.healthmonitor.max_retries delay: $.payload.healthmonitor.delay timeout: $.payload.healthmonitor.timeout pools: $.payload.healthmonitor.pools type: $.payload.healthmonitor.type - name: "pool.create" event_type: - "pool.create.end" type: "delta" unit: "pool" volume: 1 resource_id: $.payload.pool.id project_id: $.payload.pool.tenant_id metadata: name: $.payload.pool.name description: $.payload.pool.description admin_state_up: $.payload.pool.admin_state_up lb_method: $.payload.pool.lb_method protocol: $.payload.pool.protocol subnet_id: $.payload.pool.subnet_id vip_id: $.payload.pool.vip_id status: $.payload.pool.status status_description: $.payload.pool.status_description - name: "pool.update" event_type: - "pool.update.end" type: "delta" unit: "pool" volume: 1 resource_id: $.payload.pool.id project_id: $.payload.pool.tenant_id metadata: name: $.payload.pool.name description: $.payload.pool.description admin_state_up: $.payload.pool.admin_state_up lb_method: $.payload.pool.lb_method protocol: $.payload.pool.protocol subnet_id: $.payload.pool.subnet_id vip_id: $.payload.pool.vip_id status: $.payload.pool.status status_description: $.payload.pool.status_description - name: "pool.delete" event_type: - "pool.delete.end" type: "delta" unit: "pool" volume: 1 resource_id: $.payload.pool.id project_id: $.payload.pool.tenant_id metadata: name: $.payload.pool.name description: $.payload.pool.description admin_state_up: $.payload.pool.admin_state_up lb_method: $.payload.pool.lb_method protocol: $.payload.pool.protocol subnet_id: $.payload.pool.subnet_id vip_id: $.payload.pool.vip_id status: $.payload.pool.status status_description: $.payload.pool.status_description - name: "member.create" event_type: - "member.create.end" type: "delta" unit: "member" volume: 1 resource_id: $.payload.member.id project_id: $.payload.member.tenant_id metadata: address: $.payload.member.address status: $.payload.member.status status_description: $.payload.member.status_description weight: $.payload.member.weight admin_state_up: $.payload.member.admin_state_up protocol_port: $.payload.member.protocol_port pool_id: $.payload.member.pool_id - name: "member.update" event_type: - "member.update.end" type: "delta" unit: "member" volume: 1 resource_id: $.payload.member.id project_id: $.payload.member.tenant_id metadata: address: $.payload.member.address status: $.payload.member.status status_description: $.payload.member.status_description weight: $.payload.member.weight admin_state_up: $.payload.member.admin_state_up protocol_port: $.payload.member.protocol_port pool_id: $.payload.member.pool_id - name: "member.delete" event_type: - "member.delete.end" type: "delta" unit: "member" volume: 1 resource_id: $.payload.member.id project_id: $.payload.member.tenant_id metadata: address: $.payload.member.address status: $.payload.member.status status_description: $.payload.member.status_description weight: $.payload.member.weight admin_state_up: $.payload.member.admin_state_up protocol_port: $.payload.member.protocol_port pool_id: $.payload.member.pool_id ceilometer-6.0.0/etc/ceilometer/examples/osprofiler_event_definitions.yaml0000664000567000056710000000130212701406223030364 0ustar jenkinsjenkins00000000000000--- - event_type: profiler.* traits: project: fields: payload.project service: fields: payload.service name: fields: payload.name base_id: fields: payload.base_id trace_id: fields: payload.trace_id parent_id: fields: payload.parent_id timestamp: fields: payload.timestamp host: fields: payload.info.host path: fields: payload.info.request.path query: fields: payload.info.request.query method: fields: payload.info.request.method scheme: fields: payload.info.request.scheme db.statement: fields: payload.info.db.statement db.params: fields: payload.info.db.params ceilometer-6.0.0/etc/ceilometer/ceilometer-config-generator.conf0000664000567000056710000000050312701406223026132 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/ceilometer/ceilometer.conf wrap_width = 79 namespace = ceilometer namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.policy namespace = oslo.service.service namespace = keystonemiddleware.auth_token ceilometer-6.0.0/etc/ceilometer/README-ceilometer.conf.txt0000664000567000056710000000020012701406223024446 0ustar jenkinsjenkins00000000000000To generate the sample ceilometer.conf file, run the following command from the top-level ceilometer directory: tox -egenconfigceilometer-6.0.0/etc/ceilometer/event_pipeline.yaml0000664000567000056710000000032412701406224023600 0ustar jenkinsjenkins00000000000000--- sources: - name: event_source events: - "*" sinks: - event_sink sinks: - name: event_sink transformers: triggers: publishers: - notifier:// ceilometer-6.0.0/etc/ceilometer/pipeline.yaml0000664000567000056710000000500312701406223022375 0ustar jenkinsjenkins00000000000000--- sources: - name: meter_source interval: 600 meters: - "*" sinks: - meter_sink - name: cpu_source interval: 600 meters: - "cpu" sinks: - cpu_sink - cpu_delta_sink - name: disk_source interval: 600 meters: - "disk.read.bytes" - "disk.read.requests" - "disk.write.bytes" - "disk.write.requests" - "disk.device.read.bytes" - "disk.device.read.requests" - "disk.device.write.bytes" - "disk.device.write.requests" sinks: - disk_sink - name: network_source interval: 600 meters: - "network.incoming.bytes" - "network.incoming.packets" - "network.outgoing.bytes" - "network.outgoing.packets" sinks: - network_sink sinks: - name: meter_sink transformers: publishers: - notifier:// - name: cpu_sink transformers: - name: "rate_of_change" parameters: target: name: "cpu_util" unit: "%" type: "gauge" scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" publishers: - notifier:// - name: cpu_delta_sink transformers: - name: "delta" parameters: target: name: "cpu.delta" growth_only: True publishers: - notifier:// - name: disk_sink transformers: - name: "rate_of_change" parameters: source: map_from: name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)" unit: "(B|request)" target: map_to: name: "\\1.\\2.\\3.rate" unit: "\\1/s" type: "gauge" publishers: - notifier:// - name: network_sink transformers: - name: "rate_of_change" parameters: source: map_from: name: "network\\.(incoming|outgoing)\\.(bytes|packets)" unit: "(B|packet)" target: map_to: name: "network.\\1.\\2.rate" unit: "\\1/s" type: "gauge" publishers: - notifier:// ceilometer-6.0.0/etc/ceilometer/api_paste.ini0000664000567000056710000000130412701406223022352 0ustar jenkinsjenkins00000000000000# Ceilometer API WSGI Pipeline # Define the filters that make up the pipeline for processing WSGI requests # Note: This pipeline is PasteDeploy's term rather than Ceilometer's pipeline # used for processing samples # Remove authtoken from the pipeline if you don't want to use keystone authentication [pipeline:main] pipeline = cors request_id authtoken api-server [app:api-server] paste.app_factory = ceilometer.api.app:app_factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = ceilometer ceilometer-6.0.0/etc/apache2/0000775000567000056710000000000012701406364017067 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/etc/apache2/ceilometer0000664000567000056710000000264212701406223021140 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using the # ceilometer API through mod_wsgi. # Note: If you are using a Debian-based system then the paths # "/var/log/httpd" and "/var/run/httpd" will use "apache2" instead # of "httpd". # # The number of processes and threads is an example only and should # be adjusted according to local requirements. Listen 8777 WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP} WSGIProcessGroup ceilometer-api WSGIScriptAlias / /var/www/ceilometer/app WSGIApplicationGroup %{GLOBAL} = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/httpd/ceilometer_error.log CustomLog /var/log/httpd/ceilometer_access.log combined WSGISocketPrefix /var/run/httpd ceilometer-6.0.0/babel.cfg0000664000567000056710000000002112701406223016522 0ustar jenkinsjenkins00000000000000[python: **.py] ceilometer-6.0.0/.coveragerc0000664000567000056710000000014212701406223017121 0ustar jenkinsjenkins00000000000000[run] branch = True source = ceilometer omit = ceilometer/tests/* [report] ignore_errors = True ceilometer-6.0.0/MAINTAINERS0000664000567000056710000000071412701406223016502 0ustar jenkinsjenkins00000000000000= Generalist Code Reviewers = The current members of ceilometer-core are listed here: https://launchpad.net/~ceilometer-drivers/+members#active This group can +2 and approve patches in Ceilometer. However, they may choose to seek feedback from the appropriate specialist maintainer before approving a patch if it is in any way controversial or risky. = IRC handles of maintainers = cdent gordc ildikov jd__ liusheng llu _nadya_ pradk rohit_ sileht zqfan ceilometer-6.0.0/setup.cfg0000664000567000056710000004043512701406364016640 0ustar jenkinsjenkins00000000000000[metadata] name = ceilometer summary = OpenStack Telemetry description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://docs.openstack.org/developer/ceilometer/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Topic :: System :: Monitoring [global] setup-hooks = pbr.hooks.setup_hook [files] packages = ceilometer [entry_points] ceilometer.notification = instance = ceilometer.compute.notifications.instance:Instance instance_scheduled = ceilometer.compute.notifications.instance:InstanceScheduled network = ceilometer.network.notifications:Network subnet = ceilometer.network.notifications:Subnet port = ceilometer.network.notifications:Port router = ceilometer.network.notifications:Router floatingip = ceilometer.network.notifications:FloatingIP http.request = ceilometer.middleware:HTTPRequest http.response = ceilometer.middleware:HTTPResponse hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification network.services.lb.pool = ceilometer.network.notifications:Pool network.services.lb.vip = ceilometer.network.notifications:Vip network.services.lb.member = ceilometer.network.notifications:Member network.services.lb.health_monitor = ceilometer.network.notifications:HealthMonitor network.services.firewall = ceilometer.network.notifications:Firewall network.services.firewall.policy = ceilometer.network.notifications:FirewallPolicy network.services.firewall.rule = ceilometer.network.notifications:FirewallRule network.services.vpn = ceilometer.network.notifications:VPNService network.services.vpn.ipsecpolicy = ceilometer.network.notifications:IPSecPolicy network.services.vpn.ikepolicy = ceilometer.network.notifications:IKEPolicy network.services.vpn.connections = ceilometer.network.notifications:IPSecSiteConnection _sample = ceilometer.telemetry.notifications:TelemetryIpc meter = ceilometer.meter.notifications:ProcessMeterNotifications ceilometer.discover = local_instances = ceilometer.compute.discovery:InstanceDiscovery endpoint = ceilometer.agent.discovery.endpoint:EndpointDiscovery tenant = ceilometer.agent.discovery.tenant:TenantDiscovery local_node = ceilometer.agent.discovery.localnode:LocalNodeDiscovery lb_pools = ceilometer.network.services.discovery:LBPoolsDiscovery lb_vips = ceilometer.network.services.discovery:LBVipsDiscovery lb_members = ceilometer.network.services.discovery:LBMembersDiscovery lb_listeners = ceilometer.network.services.discovery:LBListenersDiscovery lb_loadbalancers = ceilometer.network.services.discovery:LBLoadBalancersDiscovery lb_health_probes = ceilometer.network.services.discovery:LBHealthMonitorsDiscovery vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery fw_services = ceilometer.network.services.discovery:FirewallDiscovery fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery tripleo_overcloud_nodes = ceilometer.hardware.discovery:NodesDiscoveryTripleO ceilometer.poll.compute = disk.read.requests = ceilometer.compute.pollsters.disk:ReadRequestsPollster disk.write.requests = ceilometer.compute.pollsters.disk:WriteRequestsPollster disk.read.bytes = ceilometer.compute.pollsters.disk:ReadBytesPollster disk.write.bytes = ceilometer.compute.pollsters.disk:WriteBytesPollster disk.read.requests.rate = ceilometer.compute.pollsters.disk:ReadRequestsRatePollster disk.write.requests.rate = ceilometer.compute.pollsters.disk:WriteRequestsRatePollster disk.read.bytes.rate = ceilometer.compute.pollsters.disk:ReadBytesRatePollster disk.write.bytes.rate = ceilometer.compute.pollsters.disk:WriteBytesRatePollster disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster disk.device.read.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsRatePollster disk.device.write.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsRatePollster disk.device.read.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceReadBytesRatePollster disk.device.write.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesRatePollster disk.latency = ceilometer.compute.pollsters.disk:DiskLatencyPollster disk.device.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskLatencyPollster disk.iops = ceilometer.compute.pollsters.disk:DiskIOPSPollster disk.device.iops = ceilometer.compute.pollsters.disk:PerDeviceDiskIOPSPollster cpu = ceilometer.compute.pollsters.cpu:CPUPollster cpu_util = ceilometer.compute.pollsters.cpu:CPUUtilPollster network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster instance = ceilometer.compute.pollsters.instance:InstancePollster memory.usage = ceilometer.compute.pollsters.memory:MemoryUsagePollster memory.resident = ceilometer.compute.pollsters.memory:MemoryResidentPollster disk.capacity = ceilometer.compute.pollsters.disk:CapacityPollster disk.allocation = ceilometer.compute.pollsters.disk:AllocationPollster disk.usage = ceilometer.compute.pollsters.disk:PhysicalPollster disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster ceilometer.poll.ipmi = hardware.ipmi.node.power = ceilometer.ipmi.pollsters.node:PowerPollster hardware.ipmi.node.temperature = ceilometer.ipmi.pollsters.node:InletTemperaturePollster hardware.ipmi.node.outlet_temperature = ceilometer.ipmi.pollsters.node:OutletTemperaturePollster hardware.ipmi.node.airflow = ceilometer.ipmi.pollsters.node:AirflowPollster hardware.ipmi.node.cups = ceilometer.ipmi.pollsters.node:CUPSIndexPollster hardware.ipmi.node.cpu_util = ceilometer.ipmi.pollsters.node:CPUUtilPollster hardware.ipmi.node.mem_util = ceilometer.ipmi.pollsters.node:MemUtilPollster hardware.ipmi.node.io_util = ceilometer.ipmi.pollsters.node:IOUtilPollster hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster ceilometer.poll.central = ip.floating = ceilometer.network.floatingip:FloatingIPPollster image = ceilometer.image.glance:ImagePollster image.size = ceilometer.image.glance:ImageSizePollster rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster rgw.usage = ceilometer.objectstore.rgw:UsagePollster storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster storage.objects = ceilometer.objectstore.swift:ObjectsPollster storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster energy = ceilometer.energy.kwapi:EnergyPollster power = ceilometer.energy.kwapi:PowerPollster switch.port = ceilometer.network.statistics.port:PortPollster switch.port.receive.packets = ceilometer.network.statistics.port:PortPollsterReceivePackets switch.port.transmit.packets = ceilometer.network.statistics.port:PortPollsterTransmitPackets switch.port.receive.bytes = ceilometer.network.statistics.port:PortPollsterReceiveBytes switch.port.transmit.bytes = ceilometer.network.statistics.port:PortPollsterTransmitBytes switch.port.receive.drops = ceilometer.network.statistics.port:PortPollsterReceiveDrops switch.port.transmit.drops = ceilometer.network.statistics.port:PortPollsterTransmitDrops switch.port.receive.errors = ceilometer.network.statistics.port:PortPollsterReceiveErrors switch.port.transmit.errors = ceilometer.network.statistics.port:PortPollsterTransmitErrors switch.port.receive.frame_error = ceilometer.network.statistics.port:PortPollsterReceiveFrameErrors switch.port.receive.overrun_error = ceilometer.network.statistics.port:PortPollsterReceiveOverrunErrors switch.port.receive.crc_error = ceilometer.network.statistics.port:PortPollsterReceiveCRCErrors switch.port.collision.count = ceilometer.network.statistics.port:PortPollsterCollisionCount switch.table = ceilometer.network.statistics.table:TablePollster switch.table.active.entries = ceilometer.network.statistics.table:TablePollsterActiveEntries switch.table.lookup.packets = ceilometer.network.statistics.table:TablePollsterLookupPackets switch.table.matched.packets = ceilometer.network.statistics.table:TablePollsterMatchedPackets switch = ceilometer.network.statistics.switch:SWPollster switch.flow = ceilometer.network.statistics.flow:FlowPollster switch.flow.bytes = ceilometer.network.statistics.flow:FlowPollsterBytes switch.flow.duration.nanoseconds = ceilometer.network.statistics.flow:FlowPollsterDurationNanoseconds switch.flow.duration.seconds = ceilometer.network.statistics.flow:FlowPollsterDurationSeconds switch.flow.packets = ceilometer.network.statistics.flow:FlowPollsterPackets network.services.lb.pool = ceilometer.network.services.lbaas:LBPoolPollster network.services.lb.vip = ceilometer.network.services.lbaas:LBVipPollster network.services.lb.member = ceilometer.network.services.lbaas:LBMemberPollster network.services.lb.listener = ceilometer.network.services.lbaas:LBListenerPollster network.services.lb.loadbalancer = ceilometer.network.services.lbaas:LBLoadBalancerPollster network.services.lb.health_monitor = ceilometer.network.services.lbaas:LBHealthMonitorPollster network.services.lb.total.connections = ceilometer.network.services.lbaas:LBTotalConnectionsPollster network.services.lb.active.connections = ceilometer.network.services.lbaas:LBActiveConnectionsPollster network.services.lb.incoming.bytes = ceilometer.network.services.lbaas:LBBytesInPollster network.services.lb.outgoing.bytes = ceilometer.network.services.lbaas:LBBytesOutPollster network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster ceilometer.builder.poll.central = hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster ceilometer.event.storage = es = ceilometer.event.storage.impl_elasticsearch:Connection log = ceilometer.event.storage.impl_log:Connection mongodb = ceilometer.event.storage.impl_mongodb:Connection mysql = ceilometer.event.storage.impl_sqlalchemy:Connection postgresql = ceilometer.event.storage.impl_sqlalchemy:Connection sqlite = ceilometer.event.storage.impl_sqlalchemy:Connection hbase = ceilometer.event.storage.impl_hbase:Connection db2 = ceilometer.event.storage.impl_db2:Connection ceilometer.metering.storage = log = ceilometer.storage.impl_log:Connection mongodb = ceilometer.storage.impl_mongodb:Connection mysql = ceilometer.storage.impl_sqlalchemy:Connection postgresql = ceilometer.storage.impl_sqlalchemy:Connection sqlite = ceilometer.storage.impl_sqlalchemy:Connection hbase = ceilometer.storage.impl_hbase:Connection db2 = ceilometer.storage.impl_db2:Connection ceilometer.compute.virt = libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector xenapi = ceilometer.compute.virt.xenapi.inspector:XenapiInspector ceilometer.hardware.inspectors = snmp = ceilometer.hardware.inspector.snmp:SNMPInspector ceilometer.transformer = accumulator = ceilometer.transformer.accumulator:TransformerAccumulator delta = ceilometer.transformer.conversions:DeltaTransformer unit_conversion = ceilometer.transformer.conversions:ScalingTransformer rate_of_change = ceilometer.transformer.conversions:RateOfChangeTransformer aggregator = ceilometer.transformer.conversions:AggregatorTransformer arithmetic = ceilometer.transformer.arithmetic:ArithmeticTransformer ceilometer.publisher = test = ceilometer.publisher.test:TestPublisher notifier = ceilometer.publisher.messaging:SampleNotifierPublisher udp = ceilometer.publisher.udp:UDPPublisher file = ceilometer.publisher.file:FilePublisher direct = ceilometer.publisher.direct:DirectPublisher kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher ceilometer.event.publisher = test = ceilometer.publisher.test:TestPublisher direct = ceilometer.publisher.direct:DirectPublisher notifier = ceilometer.publisher.messaging:EventNotifierPublisher kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher ceilometer.event.trait_plugin = split = ceilometer.event.trait_plugins:SplitterTraitPlugin bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin console_scripts = ceilometer-api = ceilometer.cmd.api:main ceilometer-polling = ceilometer.cmd.polling:main ceilometer-agent-notification = ceilometer.cmd.agent_notification:main ceilometer-send-sample = ceilometer.cmd.sample:send_sample ceilometer-dbsync = ceilometer.cmd.storage:dbsync ceilometer-expirer = ceilometer.cmd.storage:expirer ceilometer-rootwrap = oslo_rootwrap.cmd:main ceilometer-collector = ceilometer.cmd.collector:main ceilometer.dispatcher.meter = database = ceilometer.dispatcher.database:DatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher http = ceilometer.dispatcher.http:HttpDispatcher gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher ceilometer.dispatcher.event = database = ceilometer.dispatcher.database:DatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher http = ceilometer.dispatcher.http:HttpDispatcher network.statistics.drivers = opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver opencontrail = ceilometer.network.statistics.opencontrail.driver:OpencontrailDriver oslo.config.opts = ceilometer = ceilometer.opts:list_opts oslo.config.opts.defaults = ceilometer = ceilometer.conf.defaults:set_cors_middleware_defaults keystoneauth1.plugin = password-ceilometer-legacy = ceilometer.keystone_client:LegacyCeilometerKeystoneLoader tempest.test_plugins = ceilometer_tests = ceilometer.tests.tempest.plugin:CeilometerTempestPlugin [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [pbr] warnerrors = true autodoc_index_modules = true [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = ceilometer/locale/ceilometer.pot [compile_catalog] directory = ceilometer/locale domain = ceilometer [update_catalog] domain = ceilometer output_dir = ceilometer/locale input_file = ceilometer/locale/ceilometer.pot [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 ceilometer-6.0.0/tools/0000775000567000056710000000000012701406364016151 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/tools/pretty_tox.sh0000775000567000056710000000065212701406223020726 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash set -o pipefail TESTRARGS=$1 # --until-failure is not compatible with --subunit see: # # https://bugs.launchpad.net/testrepository/+bug/1411804 # # this work around exists until that is addressed if [[ "$TESTARGS" =~ "until-failure" ]]; then python setup.py testr --slowest --testr-args="$TESTRARGS" else python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f fi ceilometer-6.0.0/tools/lintstack.py0000775000567000056710000001433412701406223020521 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """pylint error checking.""" from __future__ import print_function import json import re import sys from pylint import lint from six.moves import cStringIO as StringIO # noqa # These variables will be useful if we will need to skip some pylint checks ignore_codes = [] ignore_messages = [] ignore_modules = [] KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" class LintOutput(object): _cached_filename = None _cached_content = None def __init__(self, filename, lineno, line_content, code, message, lintoutput): self.filename = filename self.lineno = lineno self.line_content = line_content self.code = code self.message = message self.lintoutput = lintoutput @classmethod def from_line(cls, line): m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line) matched = m.groups() filename, lineno, code, message = (matched[0], int(matched[1]), matched[2], matched[-1]) if cls._cached_filename != filename: with open(filename) as f: cls._cached_content = list(f.readlines()) cls._cached_filename = filename line_content = cls._cached_content[lineno - 1].rstrip() return cls(filename, lineno, line_content, code, message, line.rstrip()) @classmethod def from_msg_to_dict(cls, msg): """From the output of pylint msg, to a dict. Each key is a unique error identifier, value is a list of LintOutput """ result = {} for line in msg.splitlines(): obj = cls.from_line(line) if obj.is_ignored(): continue key = obj.key() if key not in result: result[key] = [] result[key].append(obj) return result def is_ignored(self): if self.code in ignore_codes: return True if any(self.filename.startswith(name) for name in ignore_modules): return True if any(msg in self.message for msg in ignore_messages): return True return False def key(self): if self.code in ["E1101", "E1103"]: # These two types of errors are like Foo class has no member bar. # We discard the source code so that the error will be ignored # next time another Foo.bar is encountered. return self.message, "" return self.message, self.line_content.strip() def json(self): return json.dumps(self.__dict__) def review_str(self): return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" "%(code)s: %(message)s" % { "filename": self.filename, "lineno": self.lineno, "line_content": self.line_content, "code": self.code, "message": self.message, }) class ErrorKeys(object): @classmethod def print_json(cls, errors, output=sys.stdout): print("# automatically generated by tools/lintstack.py", file=output) for i in sorted(errors.keys()): print(json.dumps(i), file=output) @classmethod def from_file(cls, filename): keys = set() for line in open(filename): if line and line[0] != "#": d = json.loads(line) keys.add(tuple(d)) return keys def run_pylint(): buff = StringIO() args = ["--msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}", "-E", "ceilometer"] lint.Run(args, exit=False) val = buff.getvalue() buff.close() return val def generate_error_keys(msg=None): print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE) if msg is None: msg = run_pylint() errors = LintOutput.from_msg_to_dict(msg) with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: ErrorKeys.print_json(errors, output=f) def validate(newmsg=None): print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE) known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) if newmsg is None: print("Running pylint. Be patient...") newmsg = run_pylint() errors = LintOutput.from_msg_to_dict(newmsg) print("Unique errors reported by pylint: was %d, now %d." % (len(known), len(errors))) passed = True for err_key, err_list in errors.items(): for err in err_list: if err_key not in known: print(err.lintoutput) print() passed = False if passed: print("Congrats! pylint check passed.") redundant = known - set(errors.keys()) if redundant: print("Extra credit: some known pylint exceptions disappeared.") for i in sorted(redundant): print(json.dumps(i)) print("Consider regenerating the exception file if you will.") else: print("Please fix the errors above. If you believe they are false" " positives, run 'tools/lintstack.py generate' to overwrite.") sys.exit(1) def usage(): print("""Usage: tools/lintstack.py [generate|validate] To generate pylint_exceptions file: tools/lintstack.py generate To validate the current commit: tools/lintstack.py """) def main(): option = "validate" if len(sys.argv) > 1: option = sys.argv[1] if option == "generate": generate_error_keys() elif option == "validate": validate() else: usage() if __name__ == "__main__": main() ceilometer-6.0.0/tools/__init__.py0000664000567000056710000000000012701406223020242 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/tools/make_test_data.py0000775000567000056710000001647112701406223021476 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for creating test data for Ceilometer. Usage: Generate testing data for e.g. for default time span source .tox/py27/bin/activate ./tools/make_test_data.py --user 1 --project 1 --resource 1 --counter cpu_util --volume 20 """ import argparse import datetime import logging import random import sys import uuid from oslo_config import cfg from oslo_utils import timeutils from ceilometer.publisher import utils from ceilometer import sample from ceilometer import storage def make_test_data(name, meter_type, unit, volume, random_min, random_max, user_id, project_id, resource_id, start, end, interval, resource_metadata=None, source='artificial'): resource_metadata = resource_metadata or {'display_name': 'toto', 'host': 'tata', 'image_ref': 'test', 'instance_flavor_id': 'toto', 'server_group': 'toto', } # Compute start and end timestamps for the new data. if isinstance(start, datetime.datetime): timestamp = start else: timestamp = timeutils.parse_strtime(start) if not isinstance(end, datetime.datetime): end = timeutils.parse_strtime(end) increment = datetime.timedelta(minutes=interval) print('Adding new samples for meter %s.' % (name)) # Generate samples n = 0 total_volume = volume while timestamp <= end: if (random_min >= 0 and random_max >= 0): # If there is a random element defined, we will add it to # user given volume. if isinstance(random_min, int) and isinstance(random_max, int): total_volume += random.randint(random_min, random_max) else: total_volume += random.uniform(random_min, random_max) c = sample.Sample(name=name, type=meter_type, unit=unit, volume=total_volume, user_id=user_id, project_id=project_id, resource_id=resource_id, timestamp=timestamp.isoformat(), resource_metadata=resource_metadata, source=source, ) data = utils.meter_message_from_counter( c, cfg.CONF.publisher.telemetry_secret) # timestamp should be string when calculating signature, but should be # datetime object when calling record_metering_data. data['timestamp'] = timestamp yield data n += 1 timestamp = timestamp + increment if (meter_type == 'gauge' or meter_type == 'delta'): # For delta and gauge, we don't want to increase the value # in time by random element. So we always set it back to # volume. total_volume = volume print('Added %d new samples for meter %s.' % (n, name)) def record_test_data(conn, *args, **kwargs): for data in make_test_data(*args, **kwargs): conn.record_metering_data(data) def get_parser(): parser = argparse.ArgumentParser( description='generate metering data', ) parser.add_argument( '--interval', default=10, type=int, help='The period between samples, in minutes.', ) parser.add_argument( '--start', default=31, help='Number of days to be stepped back from now or date in the past (' '"YYYY-MM-DDTHH:MM:SS" format) to define timestamps start range.', ) parser.add_argument( '--end', default=2, help='Number of days to be stepped forward from now or date in the ' 'future ("YYYY-MM-DDTHH:MM:SS" format) to define timestamps end ' 'range.', ) parser.add_argument( '--type', choices=('gauge', 'cumulative'), default='gauge', dest='meter_type', help='Counter type.', ) parser.add_argument( '--unit', default=None, help='Counter unit.', ) parser.add_argument( '--project', dest='project_id', help='Project id of owner.', ) parser.add_argument( '--user', dest='user_id', help='User id of owner.', ) parser.add_argument( '--random_min', help='The random min border of amount for added to given volume.', type=int, default=0, ) parser.add_argument( '--random_max', help='The random max border of amount for added to given volume.', type=int, default=0, ) parser.add_argument( '--resource', dest='resource_id', default=str(uuid.uuid4()), help='The resource id for the meter data.', ) parser.add_argument( '--counter', default='instance', dest='name', help='The counter name for the meter data.', ) parser.add_argument( '--volume', help='The amount to attach to the meter.', type=int, default=1, ) return parser def main(): cfg.CONF([], project='ceilometer') args = get_parser().parse_args() # Set up logging to use the console console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) # Connect to the metering database conn = storage.get_connection_from_config(cfg.CONF) # Find the user and/or project for a real resource if not (args.user_id or args.project_id): for r in conn.get_resources(): if r.resource_id == args.resource_id: args.user_id = r.user_id args.project_id = r.project_id break # Compute the correct time span format = '%Y-%m-%dT%H:%M:%S' try: start = datetime.datetime.utcnow() - datetime.timedelta( days=int(args.start)) except ValueError: try: start = datetime.datetime.strptime(args.start, format) except ValueError: raise try: end = datetime.datetime.utcnow() + datetime.timedelta( days=int(args.end)) except ValueError: try: end = datetime.datetime.strptime(args.end, format) except ValueError: raise args.start = start args.end = end record_test_data(conn=conn, **args.__dict__) return 0 if __name__ == '__main__': main() ceilometer-6.0.0/tools/make_test_data.sh0000775000567000056710000000273712701406223021460 0ustar jenkinsjenkins00000000000000#!/bin/bash bindir=$(dirname $0) project_name="$1" if [ -z "$project_name" ] then project_name=demo fi if [ -z "$OS_USERNAME" ] then user=demo else user=$OS_USERNAME fi # Convert a possible project name to an id, if we have # keystone installed. if which keystone >/dev/null then project=$(keystone tenant-list | grep " $project_name " | cut -f2 -d'|' | cut -f2 -d' ') else # Assume they gave us the project id as argument. project="$project_name" fi if [ -z "$project" ] then echo "Could not determine project id for \"$project_name\"" 1>&2 exit 1 fi early1="2012-08-27T07:00:00" early2="2012-08-27T17:00:00" start="2012-08-28T00:00:00" middle1="2012-08-28T08:00:00" middle2="2012-08-28T18:00:00" middle3="2012-08-29T09:00:00" middle4="2012-08-29T19:00:00" end="2012-08-31T23:59:00" late1="2012-08-31T10:00:00" late2="2012-08-31T20:00:00" mkdata() { ${bindir}/make_test_data.py --project "$project" \ --user "$user" --start "$2" --end "$3" \ --resource "$1" --counter instance --volume 1 } dates=(early1 early2 start middle1 middle2 middle3 middle4 end late1 late2) echo $project for i in $(seq 0 $((${#dates[@]} - 2)) ) do iname=${dates[$i]} eval "ivalue=\$$iname" for j in $(seq $((i + 1)) $((${#dates[@]} - 1)) ) do jname=${dates[$j]} eval "jvalue=\$$jname" resource_id="${project_name}-$iname-$jname" echo "$resource_id" mkdata "$resource_id" "$ivalue" "$jvalue" [ $? -eq 0 ] || exit $? done echo done ceilometer-6.0.0/tools/ceilometer-test-event.py0000775000567000056710000000463012701406223022747 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool help you debug your event definitions. Feed it a list of test notifications in json format, and it will show you what events will be generated. """ import json import sys from oslo_config import cfg from stevedore import extension from ceilometer.event import converter from ceilometer import service cfg.CONF.register_cli_opts([ cfg.StrOpt('input-file', short='i', help='File to read test notifications from.' ' (Containing a json list of notifications.)' ' defaults to stdin.'), cfg.StrOpt('output-file', short='o', help='File to write results to. Defaults to stdout.'), ]) TYPES = {1: 'text', 2: 'int', 3: 'float', 4: 'datetime'} service.prepare_service() output_file = cfg.CONF.output_file input_file = cfg.CONF.input_file if output_file is None: out = sys.stdout else: out = open(output_file, 'w') if input_file is None: notifications = json.load(sys.stdin) else: with open(input_file, 'r') as f: notifications = json.load(f) out.write("Definitions file: %s\n" % cfg.CONF.event.definitions_cfg_file) out.write("Notifications tested: %s\n" % len(notifications)) event_converter = converter.setup_events( extension.ExtensionManager( namespace='ceilometer.event.trait_plugin')) for notification in notifications: event = event_converter.to_event(notification) if event is None: out.write("Dropped notification: %s\n" % notification['message_id']) continue out.write("Event: %s at %s\n" % (event.event_type, event.generated)) for trait in event.traits: dtype = TYPES[trait.dtype] out.write(" Trait: name: %s, type: %s, value: %s\n" % ( trait.name, dtype, trait.value)) ceilometer-6.0.0/tools/make_test_event_data.py0000775000567000056710000000733212701406223022673 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for creating event test data for Ceilometer. Usage: Generate testing data for e.g. for default time span source .tox/py27/bin/activate ./tools/make_test_event_data.py --event_types 3 """ import argparse import datetime import logging import random import sys import uuid from oslo_config import cfg from oslo_utils import timeutils from ceilometer.event.storage import models from ceilometer import storage def make_test_data(conn, start, end, interval, event_types): # Compute start and end timestamps for the new data. if isinstance(start, datetime.datetime): timestamp = start else: timestamp = timeutils.parse_strtime(start) if not isinstance(end, datetime.datetime): end = timeutils.parse_strtime(end) increment = datetime.timedelta(minutes=interval) print('Adding new events') n = 0 while timestamp <= end: data = [] for i in range(event_types): traits = [models.Trait('id1_%d' % i, 1, str(uuid.uuid4())), models.Trait('id2_%d' % i, 2, random.randint(1, 10)), models.Trait('id3_%d' % i, 3, random.random()), models.Trait('id4_%d' % i, 4, timestamp)] data.append(models.Event(str(uuid.uuid4()), 'event_type%d' % i, timestamp, traits, {})) n += 1 conn.record_events(data) timestamp = timestamp + increment print('Added %d new events' % n) def main(): cfg.CONF([], project='ceilometer') parser = argparse.ArgumentParser( description='generate event data', ) parser.add_argument( '--interval', default=10, type=int, help='The period between events, in minutes.', ) parser.add_argument( '--start', default=31, type=int, help='The number of days in the past to start timestamps.', ) parser.add_argument( '--end', default=2, type=int, help='The number of days into the future to continue timestamps.', ) parser.add_argument( '--event_types', default=3, type=int, help='The number of unique event_types.', ) args = parser.parse_args() # Set up logging to use the console console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) # Connect to the event database conn = storage.get_connection_from_config(cfg.CONF, 'event') # Compute the correct time span start = datetime.datetime.utcnow() - datetime.timedelta(days=args.start) end = datetime.datetime.utcnow() + datetime.timedelta(days=args.end) make_test_data(conn=conn, start=start, end=end, interval=args.interval, event_types=args.event_types) if __name__ == '__main__': main() ceilometer-6.0.0/tools/show_data.py0000775000567000056710000000710112701406223020470 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Copyright 2012 New Dream Network (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg import six from ceilometer import storage def show_users(db, args): for u in sorted(db.get_users()): print(u) def show_resources(db, args): if args: users = args else: users = sorted(db.get_users()) for u in users: print(u) for resource in db.get_resources(user=u): print(' %(resource_id)s %(timestamp)s' % resource) for k, v in sorted(six.iteritems(resource['metadata'])): print(' %-10s : %s' % (k, v)) for meter in resource['meter']: totals = db.get_statistics(storage.SampleFilter( user=u, meter=meter['counter_name'], resource=resource['resource_id'], )) # FIXME(dhellmann): Need a way to tell whether to use # max() or sum() by meter name without hard-coding. if meter['counter_name'] in ['cpu', 'disk']: value = totals[0]['max'] else: value = totals[0]['sum'] print(' %s (%s): %s' % (meter['counter_name'], meter['counter_type'], value)) def show_total_resources(db, args): if args: users = args else: users = sorted(db.get_users()) for u in users: print(u) for meter in ['disk', 'cpu', 'instance']: stats = db.get_statistics(storage.SampleFilter( user=u, meter=meter, )) if meter in ['cpu', 'disk']: total = stats['max'] else: total = stats['sum'] print(' ', meter, total) def show_raw(db, args): fmt = ' %(timestamp)s %(counter_name)10s %(counter_volume)s' for u in sorted(db.get_users()): print(u) for resource in db.get_resources(user=u): print(' ', resource['resource_id']) for sample in db.get_samples(storage.SampleFilter( user=u, resource=resource['resource_id'], )): print(fmt % sample) def show_help(db, args): print('COMMANDS:') for name in sorted(COMMANDS.keys()): print(name) def show_projects(db, args): for u in sorted(db.get_projects()): print(u) COMMANDS = { 'users': show_users, 'projects': show_projects, 'help': show_help, 'resources': show_resources, 'total_resources': show_total_resources, 'raw': show_raw, } def main(argv): extra_args = cfg.CONF( sys.argv[1:], # NOTE(dhellmann): Read the configuration file(s) for the # ceilometer collector by default. default_config_files=['/etc/ceilometer/ceilometer.conf'], ) db = storage.get_connection_from_config(cfg.CONF) command = extra_args[0] if extra_args else 'help' COMMANDS[command](db, extra_args[1:]) if __name__ == '__main__': main(sys.argv) ceilometer-6.0.0/tools/lintstack.sh0000775000567000056710000000414412701406223020501 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Copyright (c) 2012-2013, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Use lintstack.py to compare pylint errors. # We run pylint twice, once on HEAD, once on the code before the latest # commit for review. set -e TOOLS_DIR=$(cd $(dirname "$0") && pwd) # Get the current branch name. GITHEAD=`git rev-parse --abbrev-ref HEAD` if [[ "$GITHEAD" == "HEAD" ]]; then # In detached head mode, get revision number instead GITHEAD=`git rev-parse HEAD` echo "Currently we are at commit $GITHEAD" else echo "Currently we are at branch $GITHEAD" fi cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py if git rev-parse HEAD^2 2>/dev/null; then # The HEAD is a Merge commit. Here, the patch to review is # HEAD^2, the master branch is at HEAD^1, and the patch was # written based on HEAD^2~1. PREV_COMMIT=`git rev-parse HEAD^2~1` git checkout HEAD~1 # The git merge is necessary for reviews with a series of patches. # If not, this is a no-op so won't hurt either. git merge $PREV_COMMIT else # The HEAD is not a merge commit. This won't happen on gerrit. # Most likely you are running against your own patch locally. # We assume the patch to examine is HEAD, and we compare it against # HEAD~1 git checkout HEAD~1 fi # First generate tools/pylint_exceptions from HEAD~1 $TOOLS_DIR/lintstack.head.py generate # Then use that as a reference to compare against HEAD git checkout $GITHEAD $TOOLS_DIR/lintstack.head.py echo "Check passed. FYI: the pylint exceptions are:" cat $TOOLS_DIR/pylint_exceptions ceilometer-6.0.0/tools/send_test_data.py0000775000567000056710000001101612701406223021500 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for sending test data for Ceilometer via oslo.messaging. Usage: Send messages with samples generated by make_test_data source .tox/py27/bin/activate ./tools/send_test_data.py --count 1000 --resources_count 10 --topic metering """ import argparse import datetime import functools import json import random import uuid import make_test_data from oslo_config import cfg import oslo_messaging from six import moves from ceilometer import messaging from ceilometer.publisher import utils from ceilometer import service def send_batch_notifier(notifier, topic, batch): notifier.sample({}, event_type=topic, payload=batch) def get_notifier(config_file): service.prepare_service(argv=['/', '--config-file', config_file]) return oslo_messaging.Notifier( messaging.get_transport(), driver='messagingv2', publisher_id='telemetry.publisher.test', topic='metering', ) def generate_data(send_batch, make_data_args, samples_count, batch_size, resources_count, topic): make_data_args.interval = 1 make_data_args.start = (datetime.datetime.utcnow() - datetime.timedelta(minutes=samples_count)) make_data_args.end = datetime.datetime.utcnow() make_data_args.resource_id = None resources_list = [str(uuid.uuid4()) for _ in moves.xrange(resources_count)] resource_samples = {resource: 0 for resource in resources_list} batch = [] count = 0 for sample in make_test_data.make_test_data(**make_data_args.__dict__): count += 1 resource = resources_list[random.randint(0, len(resources_list) - 1)] resource_samples[resource] += 1 sample['resource_id'] = resource # need to change the timestamp from datetime.datetime type to iso # format (unicode type), because collector will change iso format # timestamp to datetime.datetime type before recording to db. sample['timestamp'] = sample['timestamp'].isoformat() # need to recalculate signature because of the resource_id change sig = utils.compute_signature(sample, cfg.CONF.publisher.telemetry_secret) sample['message_signature'] = sig batch.append(sample) if len(batch) == batch_size: send_batch(topic, batch) batch = [] if count == samples_count: send_batch(topic, batch) return resource_samples send_batch(topic, batch) return resource_samples def get_parser(): parser = argparse.ArgumentParser() parser.add_argument( '--batch-size', dest='batch_size', type=int, default=100 ) parser.add_argument( '--config-file', default='/etc/ceilometer/ceilometer.conf' ) parser.add_argument( '--topic', default='perfmetering' ) parser.add_argument( '--samples-count', dest='samples_count', type=int, default=1000 ) parser.add_argument( '--resources-count', dest='resources_count', type=int, default=100 ) parser.add_argument( '--result-directory', dest='result_dir', default='/tmp' ) return parser def main(): args = get_parser().parse_known_args()[0] make_data_args = make_test_data.get_parser().parse_known_args()[0] notifier = get_notifier(args.config_file) send_batch = functools.partial(send_batch_notifier, notifier) result_dir = args.result_dir del args.notify del args.config_file del args.result_dir resource_writes = generate_data(send_batch, make_data_args, **args.__dict__) result_file = "%s/sample-by-resource-%s" % (result_dir, random.getrandbits(32)) with open(result_file, 'w') as f: f.write(json.dumps(resource_writes)) return result_file if __name__ == '__main__': main() ceilometer-6.0.0/tools/test_hbase_table_utils.py0000775000567000056710000000254712701406223023240 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from oslo_config import cfg from ceilometer import storage def main(argv): cfg.CONF([], project='ceilometer') if os.getenv("CEILOMETER_TEST_STORAGE_URL", "").startswith("hbase://"): url = ("%s?table_prefix=%s" % (os.getenv("CEILOMETER_TEST_STORAGE_URL"), os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test"))) conn = storage.get_connection(url, 'ceilometer.metering.storage') event_conn = storage.get_connection(url, 'ceilometer.event.storage') for arg in argv: if arg == "--upgrade": conn.upgrade() event_conn.upgrade() if arg == "--clear": conn.clear() event_conn.clear() if __name__ == '__main__': main(sys.argv[1:]) ceilometer-6.0.0/run-functional-tests.sh0000775000567000056710000000035512701406223021451 0ustar jenkinsjenkins00000000000000#!/bin/bash -x set -e # Use a mongodb backend by default if [ -z $CEILOMETER_TEST_BACKEND ]; then CEILOMETER_TEST_BACKEND="mongodb" fi for backend in $CEILOMETER_TEST_BACKEND; do overtest $backend ./tools/pretty_tox.sh $* done ceilometer-6.0.0/doc/0000775000567000056710000000000012701406364015556 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/doc/source/0000775000567000056710000000000012701406364017056 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/doc/source/2-1-collection-notification.png0000664000567000056710000010077612701406223024705 0ustar jenkinsjenkins00000000000000‰PNG  IHDRw°âôVpbKGD˙˙˙ ˝§“ pHYs  šśtIMEß ;.ˇ IDATxÚěÝpTužď˙—÷úÝ˝b2_@‡8!YýN"VI ΀5$^·ô’€SŽb9°&(+¬dw…pFćňÓ Ě qä‚«w&żF«$ S€I ľ0żbC4®- agjoő÷Źđ99Ý}Nw'äWĂóQe•tNwźţôůôçsŢç}ŢźŰü~ż_€ňźh=w Ü€DpbÁ]Aw ÝNDÇçó©©©‰†0 ˛˛˛Âţťŕn5aÂŔ€ILLÔW_}ĺúwĘ2Dˇ˘˘‚F0 |>źęęę\˙NćnÝ™4ZwÝ=š†Đ/ÎýÉ««íť·#¸ŰCÓźČŇł‹ž¤!ô‹źäŻŃéăg"nGYAw Ü€DpbÁ]Aw ÝN ’ÓÇĎčÜŮóşg\ŠâFÄéŢq©4 0ČČÜ‹~°BŹŹ›­s×DÜv͢ z|ÜlíŮú^źĽ÷ĹĎ/©˘ěýÇ7[ŹŹ›­‹ź_˛ë¸rU‹~°B?É_Ł7_/ÓOň×hí˘Ť®Ű”Ž+Wµg[h{lzĺ—}ÚV}i~Îb=>n¶N;C@ź ¸;Ŕ:®\Őął^IŇ©cg"ű>ýÓyIŇ=÷ĄÜđ{W”˝Żů9‹uµ˝3ŕńO®ďĎđř8Ýu÷hëńŁţ¨sg˝§™ůŹifţcšţD–ëöᓳ^-ţÁ ­=ň·sž®¶zŕÁű‡Üwn‚ŕ}ń=eÜąëÁÚáńqşÚŢ©ÚßÔkĽK0ňâç—ş‚}P ˇr÷’BźwÝ=ZëĘVix|\ŔăGtPsžČŇóŻĚµď¸rŐqűđŃ?ęâç—”3++äo VäwµŐ  šďüΤŃqÇp:úÁÝvúz¦î”G&ëÔ±3Ş­¨×ł‹žtĚ€=ׇ˛öěŃŕ`ň;†;MÖđ”G&Gµý@0™ÎNŮąă‡XĆnđw>T÷±‰ŕîűäO^I]ٲSrľŁĘňTQţ~@f¬a2>ÝĘ |rÖ«Ź~וÉ:"~¸î—˘ďý×ď„d‡šŃĚűž>~&`a´ÓÇ»‚Ź—ž˘w ·ţm‚Á·ÝÖµMđßÍżí:®\ŐŠCęhżŞ«W:5üŽ8ÍśókĆęĹĎ/©¶˘^WŻt•Š~Gś|H ô“ł^uvtęÓëĄ:Ű;öéâç—ôeë%Ý™4:$ŢqĺŞŐNçţt^÷Ü—âřÁďe^űÔ±3:}üŚ.~~I÷Ţ—ŞGfMëQ®ůÎďą/ĹjźOţäŐ]wŹVά,ÇŔ˝ůµlďş{´*Ë?ĐŠCzöĹ'C†&KőŢűRď¸rUŻ-ŢčXŻwÄĂőú®•V@đ“ł^ý$ż{á¶‹ź_ŇOň×č{Ź|G«¶.—$ëď;kK¬żŰ™—üzťFÜ1<`{ű>W– =ŰŢSÇ•«ĎŻ,˙ `źŚŻ—«˘ü}Çv˙ŕýzµd™őú›˙y»•É,Ioľ^&Iú·?ü«őďŹüQ VäkÖÜÇ­íŽÖ׎uĺ ż™z˝ÁďaĽ2o­:®\Őżýá_µvńĆ€úľµŞ×ţŠz­Űµ2ęŻůÎ/¶^ŇsÓ˙! }*Ë?PÁŠ|M"°ĚDEůűŞ,˙@3ós îş}Ţ=[ßs\lîÍ×ËôňφĽb Ş { Ý»îmeTv\ąŞĘňB¶˙Ôa°Ž+WőÜôĐ©cgôŔäűUňëuz˙ě>í¬-±^Ë'Íű¬+[eŐˇť™˙Ö•­˛2…M€Řd€šíMMŰď=ň­+[Ąue«tď¸ÔíŤ7_/ӛݗÉď÷kÁŠ|í¬-QÉŻ×YűôÚâŤAÍM˙ôKU”żŻ;“Fëĺź-TÉŻ×igm‰^ţŮB ŹŹÓ©cgÚdÁŠ|ÍĚLRW¬Ů'`µžłBŹÖ×ÚĹuńóKzöĹ'µł¶DďźÝ§•%ËtĎ}):uěŚ6˙óöď¨ăĘUÝu÷h­·V?űR+K–igm‰V–,Óđř8ť;ëUíoę{üťW– GfMSɯשä×ë”3+KW®jó?ý2 ř,ÉĘ´ľ'(°o.źw϶÷¬öy˙ě>ýŰţUĎľř¤$©4(Č €ŘFpw9ŐĐť~=Z[,üä¬× †Ú3AŇď=ňýĽ|••Őy×ÝŁµjërëÖ|4őqżlm“ÔU?wüő¬aű>Ý3.%`{«>ďőň¦„Ađöf_+Ë?Đđř8­+[ĄYs×]wŹÖ˝ăRµjërÝ™4Ú*‰ u”kSŻáńqÚú›ušţD–î—Ş»î­éOdiéĎZŰö <¸ONő„;®\µ·/˙la@]ă)9“µňzÖňŃÚăúÄ–l>ßĹĎ/éžômýÍĎ5%gňő2“•s=óőj{gŹľs©+@ýü+suď¸TÝ;.UK__¨&wíop°Ř”ľpZέ~ň©ëĎ™™˙¸őř;†ëŮEOęÉ÷}w nÚł1§äL¶‚źűm7łíťIŁ­ěÔSÇÎčÔ±3g@™ĹĎŽřŁőÉF•Bő2ď\úÁ)kŘm{S`Ę#“K<»čI+{TęÎ&¶gŢÚu¸NݲY»Űµ;ZYţ:®\Ő“ďw,Ep×ÝŁ­Ŕęi[ŮĽÖđř8-x%?äy&¨:<>®GßůťIŁĘ'ćű°˛íÁf§ö4űëř•¤ő!Ą1ž]ô¤V–,ł‚Ó}ÔÜ@&€`ť•˙v¬+׊z+yÎ,ÂeË=z «öë”G&»Ö{}ŕÁűĄmŹ™ěQĚ ř›C× .:m˙Ńő@˛)›,8¸jĘ?HÝŞ]űy^íÝŮĄöĎnß6¸ýşťÝűj2ˇgąě“ŮŹÓÇłpÍw4+˙qÇ6v*‡Íwţě˘'˙î$¶Ň›ěĽŕ›ÓEIĘ™•ĄĘňtęŘ=7ý43˙1kÁ6·Ĺă»î ·lŘś'˛´gŰ{:uěŚ.~~IwÝ=ÚĘRo đť>~V’4ĺ‘︾‡SMU+,í¸rµ»Ě‚- Ô$‹NŰ›m‡ÇÇ9f™:é¸rU;Ö•;–°;íÁËOJZďŻŮ'{ťŰ)9“#¶Őť¶×3äď9´±[9„hľs·ďĚ©Ľ ě»˝‡őýýýŢq©ZW¶J›^ůĄľl˝Ôµ¸ÚÖ÷”óD–¬ČŹz8Ä‚»Ä­†®ÔUuĘ#“U[QŻŠň÷őü+s»k®Ú¦VÍŢ0AşÓŮÁ§2[ĄŔröŔźSÖ°Űö&Řé¶đW0ł \Ç•«ş3©«Ćî=÷ĄhřĂ­ŚŢÇÇÍi'·lU)4hnm“F‡Ýó<,ŽTÁ©üC4ßą˝´FČ6\Äw{źpőxÇ?xżv(ŃŃÚăŞ(˙@§ŹwŐ7>÷§óZ·k%^€›ÁÝâTC×îŮEO޶˘^řŁrfu—1čéíôý®«D‚=đg‚ÁÁŔÓnA_—Ú¶NŰé)lwęŘ}ŮzI—ž˘{ÇĄj϶÷¬Z¸?/_˛ýŃÚ®ŇÁ™Ŕ§]JZ„+!qŰmá÷«ăĘU ŹŹë^,.B9·öŠôťŹHp¨v\ąj•´°g w/Zú>öşĚćó^üü’ľl˝¤¸Ým6%g˛¦äLÖ©cg´vŃť»ľčť[yÄT nٰ†Yŕëâç—¬€_p0Öd˘^ Z,˨({ßĘ5ĺěĂÁÁO“5ôu©më´ýťw‡ĎŽÝüOżÔ¦W~i:MŕŘ­LAĺGŕľžs[ŕ͡ž°)í`üűź¶Eŕ‚żŁHĺ˘ÍÜ5ŻwΖđYË?°ľWóÝŘł‡ËOt\ąjí·ýóž:vF?É_ŁŻ—‡ĽÇřď׬üÇö7‚»Ä5ď “ői˙Ús=€ŕś’ÓÝó‹˙ňÜSÇÎhÇş®ŕŢóŻä[Ź›ĚZ§lT§ĹÁÂŐ¶uÚŢBO?Rď÷Í×ËtńóKVů©;`y±54ă×Ô¶żnwűuďWŔăő„ď—jÂMŐnÓ?ýR§ŽťŃđř8-°µUwƲsđÖ­fr¤ď|x|ś*ĘŢřŰ'g˝Ö÷Ľôő…ÖăN5xĄî:ĹV Űç5ű{úř™€ŕ°áôÄ>Ę2 “ .08%g˛îL­/[MŰŔ`Üł/>©ŁµÔął^-úÁ Mź•Ą;ď­ŁŽ[‹“=űâ“‹™ŔާžóÚńzą†ÇÇéŮEO,:f¤şŐ¶uŰţ®»GkfţcŞ,˙@‹°BĎľř¤†ß§ÚßÔ[Ô•[—YŰç<‘ĄÚŠzU– Űt›¦ä|GçΞ×ŃÇuęŘÝs_еNţç/ţ—îą/EßűŻßŃ˝ăR]ë /}}ˇVĚ]Ł=ŰŢÓ9ĎyĺĚĘŇŐö«űµ®lU@‰ §:ÇöĎď–î;7íëĘuÎs^wÝ=Z?żd}_/˙la@{Ţs_ІÇÇéj{§Ö.ި{îKŃŐ+ť:zฆÇÇ)gVWűŮÚ»w\Ş|żN?ŁWć­Uά,MÉůŽ.~~I§ŽťQmE˝†ÇÇYĽ¸9ÜöŰá#ŐĐť•˙•qÇpýĽ|•6˝ňKť>~FoÚ˛4ďą/%$°+uS+Ę?Đ—­—TQţľ|żž•­®«K^·ĚY§¬Öç_™+©+KöÍ×ˬÇ|żĽ’ ˙ŕýZ°"_{¶˝§Šň÷UQţľµmÉŻ×éŁÔą?ťÉ6AÍŁµÇu´ö¸fÎéĘrv+!1ţÁűµ®¬«­ĚsěŻ53˙±€ý2ß‘SƲýó»Őă ö‰U77EĎ.zRíW˛ďL­ç_ÉůľFÜ1\ ^™«Ż—ě÷ĚüÇôü+sµč+®żnjŔóVn]¦Ęň¬65íj˙îŠPB±ĺ6żßď§Â[˝zµŠ‹‹%ueĆ…E©ě™´Á‹Ź91ÁË»îÝoA>ű>EzźŽ+W­ Ýh÷铳^]mďŚęó:=Żż?OÚ'Rßľí=÷Ą8.Âé»ěĎ €ŢůIţ+©ńŕÁĘÎÎvÜŽĚÝŐÓ Ýř(ëÄÔ>Ť¸cxŹ÷©'ÝľxŢ`¶ĎŤeâ»ŔŕcA5Aw Ü€DpbÁ]A·Ó=s±ő’N?CCčW;:ŁÚŽŕnŐţ¦^µż©§! *Ę2D!11‘F0ŕÂĹ&É܍¬YłÔŘŘ(Ż×Kc™™™ĘĚĚtýűm~żßO3@lˇ,Ä Ę2 GőŮgźőč9#GŽTFF†âââh@ ŠŁGŹJ’¦L™BcěŔ’¤Gy„ĆčŹčŹý¸Awµ#Gލ¬¬¬WĎ]¸paŘú @ٵk—Üőx<š7oŤ ţŘŇŇBčŹčŹý¸A˙yőęŐ«iDٱ±QÍÍÍ˝zî7żůMĄ§§Ó´Y’>űě3]ľ|™ ý ?Ňú#ú#n dî˘WFŢť QÉß»M[ËWşüů×4†ÄŔlǸ Đú#ý ? ?"ÖÜEŻŚJţ†ĆMą'ě6guŽŕ.†ÄŔ<öţoJ’.śů34@čŹôG€ţ€ţ›Á]7ýŔ<ńżÝoý› ?ôGú#@@ÄÍ‚š»ZssłUswÔ·5:RY†ĎľŇĺĎ|’¤´´4jîbĐć1˙Ďhu^ą¦Ż/uH˘†@čŹôG€ţĐéŹmw5‚»ĺ™ ?ôGú#@@Ä͆ŕ.˘Fp±>03@ôG€ţHčŹý‘ţ› Á]DŤŕ.n†™ ?ôGú#@čŹôGÜ,î"jwqł Ě Đý ?Ňú#@¤?âf@pQ#¸‹›i`f€čŹý‘ţĐú#ý±îvš@¬Ě·˙Ííž'ĎG^Ý1z¸ĆÜ;:Ş×ůâ“Kşr骆'Äéöżą]˙ń×˙$ëµçÍ›Gc7ЇÝń_4ćŢQúżţövú#@čŹôG€ţô‚»bĘ‘#GfIúŹżţ‡Î=gýű»yă5ćŢQć6ýˇę´ëߏ=Ş´´4Mť:•Fúą?^řżĐÉĎŇú#pS8pŕý`|̢ Ül|_^é“m LĽzĺZÄmâââhL€ţĐéŹýBć.€2uęT]»vMťťťŹ{<}üńÇ˝zÍoűŰ!5ˇGŽIÝ$ Šţ(I—/_¦?ôG×=ňČ#6lý`|Á]19avŇŰÁ9==]ąąą4,pfú#@@čŹŔŔŁ,Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1čvšŔͦĺĚźuů3_Řm:Żü; Đú#ý ?ôGú#bÁ]7ťÎ+˙Îŕ ĐĐú#ú#nz”epSČĚĚ”ç ?ôGôG€ţ –Űü~żźf@4Ş««USS#IJ˙^ŞĆMą'ěögŹž“ç#Ż$iĆŚĘÍÍĄŃŻ:;;ŐŇŇŇŁç$''+..ŽĆčŹý‘ţĐú#ý1‡˛ nqqqJOO§!ú#ú#@@Ä-˛ î@ "¸ 1ŕ.Ä ‚»î@ şť&z®±±Qź}ö ôłaÆ)33S#Gޤ?C´?^ľ|YŤŤŤşvíŤôł‘#G*##CqqqÖcťťťjjjŇĺË—i `ç¦---jjj˘‘€‘‘ˇäädâ:‚»@9rDeee40@<Źţţď˙žţ ŃţřÎ;ďp2  … *33Óúwcc#c!0ć¦7näB'0@Ş««µyó怋ť·2Ę2=DV0°ÂŤčŹŔŕ÷G»ŔŔjiia,QgggČcʇŔ.0Čăá­ŚĚ]ŕÜűÍJKş†úÁ'[éŹ@ŚőÇÇ&&ŃX@?hn˝˘OţÜÁX ’ËíŐ±ŹŰ˘Úö#ţFßKEŁýŕŁć6}ŐńW"Á]ŕ¤%ݡÇ'}‹†úAO»ôG`đű#}č/źEÜe,úÇÇ­W˘îţß#ţ†~ô“ćÖ+wP–bÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]A·Ó˘Q]ß ¦ćťo˝¬ó_´I’¦MJWFZ˛rł&Đ@Ŕ x­´J’”8"N‹žÉéŃs˝­mzű·G$Ißź®¬Ié7´/ö×űŃźŞÔ¤Q|A¸iő¶˙”WÖ…?_ÖŘoŽT~îC4dżo}ńűp\áfŕkďTu}Łšš/čTs‹$)eĚ(Ą$ŤŚęľŃąZý Ź~Ň#Izµ ď¦×˛Ů±ms§e*#}, †4‚»"N‚wב·µÍq€—¤Ô¤QÚ±j>“ä^Ř]sDľö«ZüĚt=VY×`M@gdeöh’^˛·V[÷ŐJ’Ž˝˝ę†÷ĺü—µvG÷I3Á]ÜĚěÇ{b|ś<•ë”Őoţˇ“M›ŢçÁ]_{§ 7˝ŁŇ˘ů7E›ö}µ Źů8®pKóµwjëľZëř ä±ţ/#-YŻä)/{BżĚŐťđXH†Jp××Ţ©×J«”›5ˇW}Ú<żdo­kŰ®ÝQĄ¬IéZ˙ňÓaĽ[÷ÖęűÓcPP–€ë@÷č ëµvG•ŘꖬܬL˝Z§iÓ•2f¤¤®+ťŹľ°^ĺŐ‡i¸xô…őúqńŻäkżFc WĎîÎÖ­®očŃsk5ZýšI(pcăeÁšťşő'}ÝqM[÷:ĽŞ®Áşhc蝪şUŐ5¸fKő·C'<ňµwňEŔMÄ×Ţ©Gn°~ßsł2µaélÇŚŰňęĂÖś°do­ľnżÖçwrL›”®W5tĘ1ś˙âň Ť}%{÷śďľUô\Č]8ľöN•ě­ŐkĄUňµwę©ü…<•ëBöĂĽ0XČÜbŮĆ}Ö5gĆT˝·a‘kfßâg¦«tU÷ÄaÁ g/·’Äř8+ämmsĚ&pReËň¬`pł)Xł“+ ĎPÂcÝĺ—›•U@51>NoŮ’n“"ź÷vµď„óáŶ…‹ťäîK =”eŔ^·ł'…ň_]§’˝ű•5)Ýşblg^ś-5i”ćĚŞEłsŐG_X/IZ˙ňÓJIĄÂM飯ľÁşŠťš4Jë_~ÚĘ>lň\ĐkoU3Ň’µ~éěrĺŐ‡őöoŹčo'kă˛ŮV1}óÚ‰ńqZüLNÄ}űŃźęş0βŤűtúă®,č·†<&Io˙öµň¬Ů&R»e¤%kń3Ó5gĆTÚ[\FúXĄŚ©ó_\V͡Fm\6;ěööľ1gĆC®“Ýź–VëĐIOźwŃôÓűA“ç‚ 7żc=^ÂŁź–V”5ÉËž WśkÝa°»ć^+í®ž§Ľ¬ Zżôi×É{ý Ź¶î« ¸˝=1>NÓ&ĄkŃěĘÍ Ě ßt}Ý~M‡Nzú¤<Ăîš#·Š†+M˙8˙ĹeÇţÖÔܢÓ·(eĚ ćź IDAT(ÇŰs}íťzŞp›$YcˇÓďą+'x1™ú˝]s$`\6}Ň­ß÷éĺ›öYőŁ] ŐŇżľţž‘ąz2V­ůi%ÄÇé§ĄŐu¬«Šf<Šf¬±Ź7ážë4'´ŹoUu óÚhçâĚ)‡¦’˝ű­˙ß°tvŹć…‹fçhëľ®sšpc’Ű1ěöűí6W‹f 3ë¶„ ˘š$Ł®§ÝÇ3ŽŘ·)Ü´O‰ńq®Ç8çżh‹¸MjŇ(mXú´|í×4ÍÖ6Źľ°Ţq?śÚČĚŻÇ̬ësM§ď)Ňi?6íż»ćpHůżÔ¤Qš61]˙\ëšým~#Ü·×î¨ŇéŹ[ÂÎśÎÂ}>ô‚»‚?Ťi´˛&Ą»žŚ5y.č©üEŔŹĽ}X»ŁJ»kŽčßţÇ߇LhÍŔÔÔÜâřŢÖ6ý°p›6,}ZÓ&¦ÔĄ˛ŢżąEŹľ°^n/ ŘÇó_\Vý Źü~éÁg‹­IHÂaúşăš|íťZ»ŁJUu *]5ßußľ?Ńý$ôTsKČŐÝŕÇĽ­mŽmÓäąŕřyĚgúqńŻTUßµod_ŢęÁĄ-ßôŽ•9®ďÚ3óÝ&îÁćŕă®dď~}¸˝0ęă.šľbúcČDłăšőxyőaÇ,”Şş:á‘§rť 7˝˛¨”Ż˝Sĺ5‡ŐÔ|AÇö…¶ÉŢZ-sX|Äś™ß·Ŕ4PZ4_>[¬Ż;®©`ÍNM›”Ţăßeł©SÝ>·±ŇŢ?śú[jŇČë}Ţăxqă-3¬©ąĹńd­şľAő'ić„RWf^ÁšťJ3Rď®ŃÚĆ>˝VZđškwTY´Ü¬Ly*×Y}ĆSąÎŞçićĽá~ćĚjý>ţćkď´‚ŽýĆ´yµ Ďzű\Ľ©ą…9ĺa˙-Îp¸#2{ŕî”[°ńúńh?Çxwý‹J3˛Wóž‚5;ĘHcô/Ç߲ưŕcŃţyx˝ľ°éWö1ĹĚă–ozGUu ÖŘgď—ćüĘţX$öąđ˛Mű”>s…–oÚף˛ nűa?ŻłźŻ9}>3ż.Ż9¬B—‹¦ć|×>f¦Ś©Ü¬ ×ۦ{Í{~_{§Öý>™…âĚ÷b~űţrü-kÝÔÜâz,E:0żo‘ć¸1w8JŃ7WíÍʢć$lĺ‚îŰqLzł ›·µMË7ísťnXú´V.ČSFúX%ĆÇ)?÷!ë ŞŻ˝S~ż_Çö)?÷!Ą&Ť˛N¶ÍdŰmŇ+u]ąÝż˝0ŕʸ}±¸ľ,žź‘>6 c2ęĚg{»˝»a‘?3Ýj7óąĚD˘Ş®Áő*:n‘ţŐqnżÚďT’ÁśP&ڦýŰ •źűPŔq÷ކEÖI`ý Ď€/(Ľ_écőVŃsÖ‰·µMąY™Ú˙fW–~b|ś˛&ĄëÝő/ZŻaż}Ý×Ţi}ćńiÉÚż˝0 °”‘>Vűß,´UN'Ü€}Ě0ÇJU]CŹúGyu÷-”Żäiă˛ŮÖÉąďö_?Q4PćoY“Ň•2¦{ě2ăIjŇ(«l‹ä|á§ćPŕďEđ ­=`”wýŇŢoRĆŚtí7ćDrëľZÇ»SLźn®úą6.›­• ňt|O‘ë…e»Hfîeć“ö˛MÍ-J3RÇö)/{‚µŤ}< ‚,ĘiÓCĹJMĄŤËf[Żďmm ~Ůçy–>­·Šžł~L WRaů¦}Öë}¸˝P+äYďoćâö9%2źŻăZŔyBońČ-“S’Ţ]˙bŔ9F^öŰSdÇŃÎ{ěÇ͢Ů9!‹qŰÇ0_{gČůž}ţiúUŔ˛˝ĐÚ§ňšĂÖŘ7>­ű=Ƨuť_őäNŽĽě ÉBf|ťţüzýíäëÁg‹#{ÝöĂ~^éó˝·aQ@€×íĽÎď÷k˙öBkĚl®úąR“F…¬§aďăö÷0Aňŕs…’˝Ýă´9&Ěół&uť#O s÷ťąăĎ|ľŕó‡ŤËf[çáć¸1wś8Y'\ńĂúä5Í@“›•éz–źű5 ą¦F sĽĆžId&ŘÁ˘™mp©Ĺ™źű5™ŘĹbU}ő=÷ Wësń3Ó­}sËJĆ­Ăxq *™@NnVfČńŢäą`M&?3ÝurĽrAžuÜ™“Ő ž9őS{ťď•µÂS“FYűlĎ^Ü]sŘúÝ[¦śÉě`ÁHDRZÔ}’hî8‰†˝\ŠŰ˘9éc­NOúžÉęůýÉć€Çí%LVŤ˝4Sp1'ŁUu ¶ěžŮ®ýĆ^#Ň~aÉnÎŚ‡˘şKČŘ5YAvŃ_¦Mt.5fĎ \üŚóz f^j¨ůÚ;µhvŽrł2ďšqz}űóM†eĘ‘ŽsaÓßĚoŹŰ\|ÎŚ©QÍ)ÝJG`ŕś€ŕWnV¦ăť‰ńqwV:ÝŃĚ~ŚF3†ŮĎ÷Ľ­móO§~•§Ü¬ aŚ˝•źűŽí)ŇśSCúPSs‹ěMźą˘Wç‚Uu ÖXnĚ´/bîv^——=Áq~îkďÔ«yš61=`ˇe·s…`¦Žr¸cÂ-#:řüÁ­„ý·ĎmN€Cp@Ŕ·ńuűµ~˝€âüYáë÷ÚSNW+3Ň"_…íí•í”1#Ă^ĺ5޵2Şýd:Ң2&¨uúăĎ8€oqyŮÂfčŐźč^Ü ß!k×~|ç]żŘ)X.¤_N¸]úý÷Á­/;ý>Ř÷?Ü­ŕöż‘%Źp‚Ë3,ňgs,N‹đ›o‚?n%śq$¸ľ»kě¬ŕqÎü–Ř/üV„é7fńIÖ˘ˇnűNpĆî»ŘĹ Ś5ÁŮyŃŽ5&;ö˝ ‹\űŚŻ˝S·é6Çż™ 3ąać҉ńqŽseű)0f>wđ… î9YżÍĂOö>ÍůŹ˝>p¸}7Ç ý|Ď~Ţn ܸl¶öżY¨÷6,ę—±ű­˘çôĺÁ}¸˝Đ ”Úy[Űôăâ_© ¸gĄKě}0ÜŘ•g3›Ă¶źŰÜc˙›…®ó`ok›.Řî^ł?nćáĆdűť@nçáľ?űç;5Ŕç· T0 R",`˙ű×uwS’FF|ŹŢÔ¤r›»ń¶¶őxQž˛ś^+­ ›ˇen1çöH]"^+­Ru}Ł|íťěݶ¬ §“Kźí‚N¤[ÚěŻŰäął‹ťo˝l}łJz$LHÉâg¦«Ş®Q‡Nv- R˛wżk¶]đ‰ßďO6‡=íwŘŘoŰ {ź=ÁZ(´şľÁÚ¸ť6)= ű˝ţ„Ç:Á3'ě‹‘^ď7ŃdPŤOK¶ęÚ;I"€<VŐ5D†ŞC'=:ĺi‘ŻŁS‡Nxtţ‹ËaçqöUëĂ™6)Ýaß }Édč…›Sş­“ЇÁ]·Śîpçfös_„„űńrúăϢĂĚůž˝\ÖPřm.©PU× Şú+k·Ľć°î1ĚqŇpóF§Ŕ¨[vë)QśúÚ;uęă:Ńő:§š[ÂţĆŘŰ|„dŞ”1ٶ>>~ZZĄź–†i‹Ź[¬ß@ô=‚»‚~´Gęü—­ßšĽö2Ă­©ą%$řM6ˇź®rO›”.•v€ýÜ n ZsfLµq©Şk°2ěu3ťjíöôdÎ>AŤ6Ŕ4”ąÝ1ôViŃ|=řl±ľî¸¦×J«•›5!ޱ#8»¶ŻäeOĐîš#Ş?᱂»&3(/+S©IŁ4>-Y§š»Nł&Ą«ÉsÁú]ČŤp÷Ť›ľĘ>K1Licuč¤G[÷Ő*/{^Äś×J«T˛·ÖqĽµăv7:6Ů/=™SD2˘›gť˙˘wc‚9–˘ąű1üąČ…¨ĎUz2†9ťď EyŮ”—=A‹gçhú ëőuÇ5mÝW«ĹĎäô¨ŹôöÓžĚe 7˝ăZV%eĚH%ÄÇőy’‚ý÷Śąôŕ"¸ @nÖmÝWkÝňŮ“ŚĽżťüce¤%kÎŚ©µ»zjÚ;ał©{ű™zËľŇ*IjŇ(ĺfeŞşľQ[÷ŐZÁ]{ŤL·EWz„±_µč>ŃƧ%Ô 'qÄ04DŐW.ČÓňMďXĺ˘ů=µ /ę1°'w«L›®Ý5G¬€® Ü&ŚfŤóÓ&¦[Á]IŞ˛ÝfŰŰ O_dš…S’F)-ď'úşăš¬Ů©?Ľ˝j@n]úBAńN+čbąÓ&Ąw•KKVFúXŐźđ„d=öö®4'Ą«ćG•ůg~Ă0¸cąŕÖ›2öőnôĽ*RpŘ>/š3cŞkA°Áž?–ěÝŻęúF=đí䨲p3ŇÇjĂŇŮ*¸^n©§ ?_wô_F|pů˘„Ă4mRş2ŇĆ*#-ŮÇ×î¨ęóŕ®}ćĽupÜ /{‚u dyÍmŚ2¸kpjjnŃ÷Żß˛©Ô‚ÝůA,+Đ“Áv &»Á“˛“ĐŁ>ś5AŐőŤjjn±2oě %D›=n;{Ŕćf8ĽŇqŤ~†>çTža°~óó˛'¨`ÍN+KÝŞ·k{ݬIéÚşŻÖĘöŞľ^¶!?č‚Yp5š;|ĚIäřPŮx4sok›Öú¶X`0UŐ5XÝܬL˝Uôśă… §;ŢěŰEĘŕ<ä¸fEwßKIĹXCňgLµ~ďĘ«÷¨Ö¸˝śŤŰEýpěóĽÄ‹lŰFÄőřłż~¸Ä"ok›Nܢ[íÖë—Ýăs´cIOÎm»ź3R:]ć|¤,i7»k[Ż?gĆÔ€ĹŮzrŢyČV–ÉÉ…?_ŽřZ\<,¨ ä¤ŇüČoÝWŐ­5ľöNýô­jۉXNČ„Ňi§€ ©í6´ľĚRFSsKŘě"s‚Ű›‰DoĘ[´[]řv+(Ţ©‚âť®AÜ‚'ąYőŐŞëV!ΓM°xF„ŰŞjugôőD¸ÓężýašËBSNżqŹľ°^Ë7í‹Ř'»Ň˘ůV|­´ÚńÄ(5i”5îFę%{÷뇅ŰôZiUŹĘ7t­4žiŤ+¦ŹŰOâěżĺŐÝ'ŠÁcźyN¤EÝ|íť¶Ű‚űf\_üĚtk¶î«ĺPÄ{PgĂŇŮ®çnc¤é»á28íýÍÎ>Nďvą]ŰX»ŁJĹ;­ňN\sftĎç 7żőo~yőaë·qÚÄt×`[¸ůL@ćo”őŐíóĂpŻ<†Ů_?\ôĐ Źž\ľMÓź_ő‚˘áäegŚ­Qť×Ů‚ŻŃ–´ľHsČîĹ3{ôYŞęşŰ=\`×i~oźü>L-Ü&ĎÇc°'ç?,ÜÖuCőa:x? ¸ „ýöäďţhMÄ“·ÂMÝŽ93¦Z“Äř8ëjńîš#®ŻÓäą`©·ŻČ=Öî¨rť„‰†}8{\źŰ›[R3ŇÇZ“¤·{Äu2WÂŁňšĂ*·]­Ě éw&X”0bXŘÚfö ;?}«ÚőŘ-Ż>l“ůQfăŮíÄ´ţ„gPŽa{6Ë‚5î«—ě­˝žyYŰoµ˝qs2ĺĚxéö{núlSs‹ëIŹŻ˝SŻ•VweVv=awë»&(űű“+d?é´Żd]¸ůI]Y8ÁYTöú»f»Hăj^VßŐU´Ě\ĎFbť·µÍŞŤĚÔÉö¶¶iů¦}ŽŰnzÇő7Čôëęúưsń×J«T^sEz‡Äř8+PçkďÔS…Ű"5Ë«[eF SiŃ|×mßţíÇßO{ŇŽÓŕÄüĆ{[Ű\ĄľöNn~'d ËH0˙tc˛‘ŁÝ§hćĘf,YľéťÇ®>ŇÝ.ö‹Łö ÁwŁšEM%…˝p˛vG•ő}ôĺiźk»-dfłęOxŰÁ|w7zţ`îb^” }ŕ.€Đ:{‚ôđµwęÁ­QAń΀ÁdÓg®°n5sŞ[ąxvŽ5 =şpCH–Mý Ź]¸Áš„¬,Ȕϼu_mČ„ąŞ®Áš ŤOKYíÜ\U­?áŃÖ˝«yoÝ[«‚5;]W¨µ«®oС“®iG_{§ľűŁ5!WzëOxôTá6«Ý^¤vĂĐd&iMÍ-Úşď@@đ(sÜy[Űôč ëCNđĚqm&¶ŃÖU3'–ŢÖ6cŞë¬cy Ą&ŤŇ˘ŮÝÚŕĎÜL«˛&ăÓ&¦sK+zѧGĚ|ZüL÷XY°&4sÎôIÓw‚ÇZ{ćŚ)­0AŮ&ŰĘÝÁ'ČÓlYąöç„ë7?,ÜЧMż1'âÓ&¦÷é˘9©IŁ‚]ngˇÂ~ŰóOK«.˙mᆰfĚśłdo­~X¸M‡Nz¬lÝG_XďşR×ďĹÓVß|táÇ9Ą™‹KbN9ÄÎÉĚďmSs‹üŃ-ß´/ŕ÷˝kŃÜ=úÂzkŽfƉH%¶ść=öǢ]Ź`ń39Öqľ|Ó;®cXw2ĐC®óĎn ٧‚âťÖŘĺv|ľ]sÄqěs“đů ÖěÔŁ/¬×îëŻcö§şľAĹ;őŕŹÖ¸ŽÁöĤęCŤçu‰ńqÖŐÔÜňů$Ě5ǧ%÷¨GđŔéĐîš#!sműďÍâg¦[‰kvjů¦}:tŇc}ţG_XöNű÷÷Ý­ ŮvwÍ+8ś0buž‚ľEÍ]ŽŢ*zN #âlőw‡ť8ŽOKÖţí…!Y·¦řüňMű¬ ąýăü—­Á-aÄ0mX:»O®ÄöFÂa*Ů[«’˝µĘš”°o)cFę­UˇWľWäéĐ Źľî¸¦e›öi٦}JMň™ \2ÍÂWMÍ-šţ|מĘuJ˝^­tŐ|«Fâ ·)1>NiÉű&IďnXD}#„?ĚBćX‰f"•—=A–>­ĺ›ŢQSs‹Ňg®PFZ˛ăăĘ—¤Ś©w׿u–ýâgr´»ć°ľî¸fý–d¤%,ü`Ţw m\6[çżčĘŞ?áQúĚÖo”ý3ŹOKÖ»^äŕBŻ”Í×ĎëëŽk®'™ű·Z+qŻÝQĄµ;Ş”5)˝«ü-ł}Ńěś`©=xd‚ťÓ&¦k˙›…Žż Á''ĄÝ˙vËÎ߸l¶ľîčÔîš#ŞŞkPU]uác úÍĘyŞŞoĐ©ćmÝW«Ľě \xÁ•źűJöŐęTs‹ĘkëĐIŹŐgís:ű8xľµM˛Óo=§é/¬×©ć«ĎĎĂ%9.–”‘>6dN))dľ+u-şĆśrhٸl¶2Ň’­ó sľâ&eĚH•=ń7qüőyXúĚŽc͆ĄOG}a.1>Nď®1d 3­öם3cŞuG‹}ţąhvŽ¶î« SěÂ93¦>íłf~<öEę›’¬¶­?á Ä4çvNíbÎmíýó/Ç߲Ƭó_´Ś™NóëńiÉzw}ĎÇL3Ď>˙Ĺe•ě­Uu}cČoLÂaV›ďÄ~Ś9Č©ćÇclÎŚ©:ßzŮ1ű×ţý™@ľ9oµ>ł@*‹ˇö2w„ťL|¸˝0lÝź”1#µaéÓ:ľ§Čő‡:?÷!í·˝Ž©j/ĺ°{aŹŻRöĄýŰ ­ĚŞŕ};¶§Č1čś‘>6ŕyćłI]Űc{ŠÂ®JĽ˛ /$›Ë~›J~îCúĐöúf!{ŤŞ·rB ç‰Ţěî`n¸škˇÄéúp{ˇu˘ŘÔܢú]BfbčÖ'ܤ&Ť é+özžű·j|ÚŘAk«÷6,ęZEüúDŘüF™Ďl~ŁŚ˘·ěĺÜd¤ŹŐ±=EĺBě%KRĆŚTéŞůŽ ż¤&ŤRéŞůw‹8Ő÷µ÷A§lâ¬IéÖk$ڶźżUô\@ż1'ĹľöNĄŚ©W ňÂÎ n”ý˘+ĺ0Ô9ÍÍśnÎŚ©ňT® Čž3 ˇÚgÇ÷©tŐü€ľŰu×Üőyř¸°A¬Ćaűśr|Z˛>äą8öűóT®ÓśS]ď ź–¬ŇUóulOQTç–ÎÖ«yJ1,d¬ywý‹!w,F’‘>VÍU?ŘǦ斀×ݰôiך°—ÍÖ»ë_ SÂ=71>.dě;tŇÓçmkć‚Çöąöűľ[s][qđiź_›1s˙öÂ^]\1ÁuűťrÁż1Çöiă˛ŮÖg ®Ám.2oXút@­ns\˝UôśŐ¶ ‹ě™¸y®9oíľ(Sű·Z"×­ŕ6żßď§ŤęęjŐÔÔH’Ňż—ŞqSî »ýŮŁçäůČ+Iš1c†rssoşvxlb’źô­[ćľ’Ů›1Ż ›«–aíŽî[`Ě•Uok›`íɾٟ×Óv1mîý†R» ”ťǬ˙óÍ7éŹŔ~\›ŔO_ľćP\U·?>óÍŢźţyë˙K ¤±úaĚíI_1Çđ@»ö~“!(ŚŢy˙Ägúŕd«ăĽš±0vôç|núóëuč¤'bćâ­8§ě ·^ŃżüöO’¤oűŰZľ|yŔß=Ź6mÚ$Iş÷›#´$÷ţ~Ű—&Ďůlw‚ÜČď˝ýxôűítîÍÖÓqˇ7cJ_Ť}ÁmŰÓöŤćĽ®?ÇĚţśÇţíäKę*Ťî‚uđ>ôőoĚ–ę3úäĎ’¤ĄK—*=ťąşDY=ĐCb|Ü–¤&ŤęU°©·Ď‹¶M†z»áćt#Çő@ľć­´`ĚŠÇ0ýčżyđÚUşđĹe}bškÖ Ż˝S§>î ĐŮłîSŢśú2Ř“ăÁ^> ?Ď{3¦ôŐ8tŁmM{öçŮ›×^¶qźn»-|Ť|{ů–ÄřaĚ † Ę20D•×¶jć:Ů]sŘú[ô—SV†/™Ţ7“ó_´©do­ 7żăúcD“ś[Ĺŕ#s€!(/+Óşţ»?ZŁW ň”’4Rcż9Rţ|Y»«ŹX‹çfeF˝ “n_L+%i$ sÉźńŞëĺmmÓS…Ű´ř™Ťýf×w|áĎ]‹´™2‹fç•;DÜ`ĘH«ŇUóU°f§Ľ­múqńŻ·ËÍĘt]¬ č­’˝µÖĹcÎ Ţ»™äeOТŮ9ÚşŻ6`!»`sfLu\ÔCÁ]·¬”1#W †Šü܇4mRş^+­RSs‹u{üř´dĄŚ©ü‘±‹~ażM|Z˛VäQúă&´qŮlĺeOĐîšĂ:tÂc-6>-YiÉš3ă!ľ÷!Žŕ.€[z˘ě¶00T¤&Ť"3nă˛ŮdkŢ"˛&ĄŔŤa,¨1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ şť&zďrű_ő˙}ŃNCôG}čÇ1ޱ<ź˙ďΨ·ý÷żţú!ĐOţýŻ˙‡Fp@p¸Ç>nÓ±ŹŰh€ţ@Ň5gi€±¸Ą}ţżŻ1P”ezhäČ‘40€ľő­oŃ!ÜĂőQý?Ť‹‹ŁQ€A–śśL#lÔ¨Q4Âudî=”™\6ľu IDAT™©––µ´´Đ@?‹‹‹Ó#Ź6·ůý~?Í€hTWW«¦¦F’”ţ˝TŤ›rOŘíĎ='ĎG^IŇŚ3”››K#}$bć®ĎçSSS“$)+++â Ö××K’222”Ř/;mö)!!!äĘu]]ťµ‰‰‰š9s¦]·Lf?ŁiW°‹Ü­¨¨Đüůó%I;wîÔĽyó\·­««ÓĂ?,Ię‹„`źĎ§ââbÍť;7 (»eËëń]»vYŹďÚµËÚW#%%EŤŤŤŽŰ”˛˛2}őŐWZ˛d‰ő×ëUvvvźµ€[KÄŐ­˙ăŤ7˘Ú6##ă†wĚëőęďţîď´sçÎl[ó>ÁŹżüňË’¤™3gjóćÍ***Rff¦ëöá‰'žĐĽy󔚚ęřČÚĐ3wMŇdŔÖŐŐY§nŰöEµ®®N>źĎ1řązőj-Y˛$ŕ}ĺóů” ŠŠŠŰł/Ám–™™©ö[é 7·Á]SvÉ’%zůĺ—µeË×னÍëö÷ž0b§×r ŇÖŐŐąţm°ęěš}JII ⦦¦†dó@´Âwí%–,Y˘Ő«W«˛˛R^Ż×10.s×ëőęŤ7ް¶ILLÔ¬Ył4wîÜ€íĚ‚h•••’şĆĹĹĹš9s¦Ubˇ˛˛R)))š7ožµ˝ ¤ŢvŰm*..¶ţĽ}°˛˛2UTTČçó)11QŮŮŮz饗ŰĂç󩬬ĚĘ*–ş‚ĎsçÎ hŻ×«˛˛2ëłď“$K’ŠŠŠBާ˛˛Rňz˝’şÁsçÎu t›÷2ŻíőzU\\l=wŢĽy!mloëĘĘJk?SSS­Ď`ó‡±yófż$VV–ßď÷ű_zé%ż$˙ÜąsC¶=xđ _’ßé%—,YbýM’?%%Ĺú˙ěěl˙W_}em;sćĚ€mÍ ~żßď/**ňKňżôŇK~żßďĎĘĘrÜ~ćĚ™ŽŰ ţÔÔTÇçfffě“ßď÷ďܹӟ踽$˙Îť;­mó›ß8nSTTd˝·$BBBŔ{|őŐWţěělkű„„„€çoŢĽŮő;***ňŻ^˝:ěűÚ™v ţ>$ůçÍ›çx=üđĂŞ««SVV–>ýôSů|>ůý~íÜąSRׂqćóG•••ÚĽył6oŢlíźŮ7“%lNqq±222ôé§źĘëőĘď÷ëŕÁJHHĐ®]»´k×.®|CXŘŕnp ÝÔÔTeeeÉçóiË–-Ű:,ëęęôĆo(!!!d!¶ĚĚLë5Ţxă Ç×r*C`ö)ř}$çÚ¶NŰżüňËňů|š;w®¶lŮb=ÇľOö’ö «WŻxŹ%K–XmÜ•ş‚µćßÁźĂéó­^˝ZŤŤŤVŔŰ^ćaŢĽyVů†ŕv7źďÓO?UccŁ–,Ybíź Ć3ďYłfĽOvv¶U6"8 `h‰*s×5ò˛˛€mť‚¨&(şdÉÇ:ĽŮŮŮJHHLt«Ýëőz­ŚáŕLÜh·ollT]]ťBĄf»˘˘˘€ŔčÁŐĐĐŕ,=ţĽkŰedd„üÍ,RgöÉÔ#–şŻNm%ÉŞE,uÍűlٲ%¤˛ů›icĂßx㍠îęŐ«uđŕA×Ď `hp]PÍôłMgÍšĄ””y˝^íÚµKóćÍsĚRőz˝Ök„ fffZÁN#8c8xź‚K?8•^pŰŢ”5kVH–ŻÔ•ťl/ÉĽź^Ż7 C·®®Îú·S6±SP;8s×t322Ş‹ô: Ž‹Ĺą˝çÍ›§-[¶čüůózřᇕťť­˘˘"egg[‹ĘÚ\3wÝ©Rhö®=Čh‚“öĚU§ ŞśőjĎF Jş•kp ;m®ä›ââb}ă߰ʬ^˝Z«WŻÖćÍ›őé§źJ - )ŕlßŢ<6kÖ,×}0űť’’ňZnźĹí‰jllÔÜąs­í~řa«ć/€ˇĎ5¸ë`•ş2?M]{†®S©„p]{¬ @ÚÁĎu[,Rm[űö&ě–!lÉ’%Z˝zµü~ż^zé%«Dßď—Ďçł2f{pţ ‘ö)xq;űcnÁ]§RFbb˘víÚĄO?ýT/˝ô’ő}>üđĂő† M®ÁÝHA“eZQQŃ«lX©»DÂĚ™3­ÇÂeŁת•zVŰV \ô,ĎçSqq±Š‹‹­ ±©…[WW§-[¶(;;;ŕőL Üŕ÷pŞ lß_§v5Ďqb˛¤íŮ˝áľ#§÷ňů|*++ XŔ.55U[¶l‘×ëµÚz»ŔĐçÜ — kş´eeeV6¬=Čhţßi±1ó&ČhŻ©$˝ôä^ţŔm{Ŕt ¤VTTX Š%&&‚ť¨uuuŽűks*iá”ŃköĎ-đlč Vp7Ňwd/˙`/•1oŢ<Çŕmbb˘µŔ\¸ 3€ˇÁ1¸kÁQ»ÔÔTeee©±±Ń1Ŕ9kÖ,%$$ČëőZACĂçóéᇖĎçSVVVT٨‘‚¸ŃnoˇöěUó:/żü˛¤îŔµSŔÖţď'žx"äuť¶µn·¶2űäô>óçĎ·öË|ćó9e,»µKvv¶$)ä; ×f†žŰ˙˙öî>*Ş;Ď÷ý'ť´ŽX%ś}­@´lő„‚éuî‰â™6̸ ŃčIîş±čhΚé$Ŕťľ­=‰c§[ĎqZéž5m‹™u“Ą#Ü#FŔłfşˇđš EޱröR%Mr˝”{[EíâI@ ߯µzĄŞöÓo˙jÓ1ż|VÔ: \YY™Ůú ĽBÔPYY©’’•——«¶¶Vůůůęéé‘ÇăQOOʞ˛˛TSSq̵k×$I%%%ĘĚĚÔćÍ›•źźłőqýˇ,¦&…RŹÇcö—ÍĎĎ×ŮłgÍ{÷î5ŹIJJRQQ‘jkkµlŮ2ąÝns1˛ššs[,mmmZ¶l™˛˛˛TYYs‘ąââb­]»VŐŐŐ× ×ćÍ›#*nűŽbm˙NŽ;fnohhPCCÍpŔÄeîÔ÷6\qq±222ôÉ'źÄ\x-33Seeefx(…‚ŕţaĄˇ˘˘Beeećbk;wî”dÝÎ` Ţ¶±4KJJRCCĺ*++#ŞĄP_ಲ2UWW›Ő®Ú»wŻÂöööĘëőšcp»ÝjhhбcÇÔĐĐ`¶g¨2Öăń(33SŹ'˘ŞvéŇĄŞ¨¨şŹÁŞlc…ŢĆwRQQˇšššp˝¨¨H•••C^lŔ­sÇ5ŁTvŚ- \.WDŐę@űeżŃÓP‚khhPffć°Âϑ܇lwş‘j•¶$ýć7żŃoű[IŇĽ˙©‹f¸˙‡'>Ö™˙~V’ô—ů—Z±b?qŔ(ąkĽ.”””4¬°r¬Ý‘Śi$aëHîc¸ňÍ ż.źľÁ@ü!Ü€8D¸ qpâá.Ä!Â]C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@ş‹)@}}}Úżż.\¸ńy˙÷ĂqâÄ }ôŃGźĄ¤¤hĹŠJIIaŇ€a"ÜE”ßýîw:qâĨžóÂ… –áđ·żýmĺćć2éŔ0Ń–QÇ ű|ËyϨěCŐ.02w\»víÓ€ţš››U]]mľżkĘ]úł‚§oNąS‰÷ŘőÍ©C+úţňĘWęý, /Ż~­?Ô˙żúęęWć¶µk×Rµ Ś•»°”››«µkךú•>lţxXÁ®$}sę]JĽÇ®›?&ŘFá.bęđ~~>¨ăű[ô啯†|Ž/Ż|Ąăű[ôůů ůÁ.pów1 › x v€±C¸‹AŤ$ŕ%ŘĆá.†d8/Á.0öw1dC x v€ńqǵk×®1 ŽććfUWW›ďgĚ´éˇŐ9’D° ŚÂ]ŚUŔ+‰`'„»±ţo8‚]`lŃs#ÖżŻ`{„»¸)ý^‚]`|Đ–ŁÂď÷+!!A)))L0w Ń–âá.ġ»‚řä÷űőĹ_0âBJJ =ąe„»qhßľ}zď˝÷q套^’Ăá`"%´eC~żźIwĽ^/“Ŕ(˘r7ÎMëűBßřú+&Ŕ„tuĘ}9u*Ŕ ÜŤs˙‹ß/[0ČD:ÓÓŐý­t&€1@[C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@ş‹) ŐŐ©SŐ›8CA»]_ßyç ű_™2Ĺ|}âÄ }ôŃGL"& ‡ĂˇŐ«W3âá.`P_L›¦˙饠Ý>âs\¸pA.\`21a|ôŃGZ´h‘“ .îô?gÍŇSďa"0)őőő1 âá.ŔŇ×wŢ©s™ęMJŠřÜ~O†ÓďÓTŰÝšjKb˘wÎţKťú.u1âá.ŔҧłfE»S¦'Ę™÷¸Óîcr×îüćT&Ŕ¤@¸ Ň™ž®K3SĚ÷ió˙Ů˙QwMů&€ ‚páęÔ©ęţVşůţnÇ|Ý÷ż>ĘÄ0Á|)„ëJO3_ß9ĺOäĚ{śI`"ÜDčIL4_Ď[ö$­ w¦Ţ¤$ýw…:öÜ9ĺOX< € Śp`ę›6Í|ť”Ę„0± ŔRbúÄŻÚ­}cÇ€Űl‰š—˝H÷νÔŻą¸pŤf¦;†tLăˇ}şĐĺ×`bëëëÓ{ď˝§””-Z´ ’waľH¸Qą;=ů[z¬çÚO©fĎŽ!í;?'WĎ˙dŻě‰7÷Ö^óšËźX?äăjßءóť~mŘőŽůYËűuŞŮłCŮŚ8Üí ôęÍŞMĂĎáý»u¦µYĎ˙t/=ĉßüć7zď˝÷$IgÎś‘ŰífRá.ŕ†ŻďĽÓ|}甩z¬çÚ?$MłÍĐ ?őDl;ßĺ×ůNżú‚˝:˛·N·4«ćŤzŞtë¨\3%Í1ä ¸/Đ«óť~IЍ’=ÝÚ,I7UU<’ńHŇ™Q¸6`|ůý~óő‰'$‰€îâÓąöS’Bĺ@­f¦9ôÖk/«©n˙M‡»F ;?;wŘÇ8ś #>a›ç¦çŕF@ĽpXÇímęâ€8GŔ $TÄ)Łju° Ő~Ă+hG~M#P^8ěqŽE•¬1žá„Í€ÉăĉňxťďôëŢą÷+ďŃ5Zľ:şď­Uu­qýţç—B ¤5Úg·3ÓĘy¨PEßű›¨Ö áă9×~JµoüťZŢŻ3?{ň…W˘ŞšköěĐoł®1{ýöOËűuŞ}ăďĚ1ä=şFOľđŠeë‡Ó-ÍjŞŰ§–ăőę ôjfşCOľđŠr–š×zň…WhŁ(áî4ő] ýĽÜިÜÄťÓ-ÍćëÁŞhí“ŮátKłN·4«/Řk}ţÖĐöŰŤ0ó|§ß¬ü}ëµ—Ułg‡RŇr8ę\ű)˝YµÉ\Ü,\xkhyż>tý°ŕY’vý¨DoVmŇĺ@ݞ*ĐĽě\]ôęđţ_jűó+Ő¸1ŢđJäó]~mvWź|tJó˛s•’ćĐąöSÚő·%QŐĘ­ÇCמ™ćšŹ[˘öĽZŞ]?*Ńĺ@Żćeçjšm†íÓ®ż-±śŰíĎ?®ĆCű”’:Ëﮕ¨ńĐ>5Ő틺ŕćeţűBýé—ůž ^n_TîâŽQQ:ŘBbFŔ(IŻyÖü|°ĹĚn´RXuMIşvíšţëŰ˙jV7Ú§=Ż–ęČţÝzxőłćçç;ýf ^AkŐŢátKłZŢŻ“ĂąPŻT˙ÎüĽ/Đ«mĎ=®sí§ÔxhźyĆ=HŇ›U›ôĚ‹Uf%®$˝ĽöĎĺ÷} –÷ë"îÝęÚĆýůőnĄ¤ÎŇĎ3çć\ű)mv7"P—BŐÍ{^-5´ ońÚŹJôÖk/«/Đ;ěĹŢCăĚ{\’ôÇŻ$*x¸]Qą ;F9ÝžŞ˛í÷ż#űwk׏J´çŐRIŇĽěÜŕÓ¨¦ŤŐŇáŚEřk\sšm†^Řć1\)Ô¶Ŕ¨ 6Ú"„Ž9e^?âüŢĐ€‡ľćbmýÚ($ŘÍp¶ĺx}Ôx$é…źz"îĎ8N’ú‚7ŞŤ€¶ŕjĚÇ´é3´ńőw"îŰ*ď ôjĎOĘ$Ië^¬Šs‚=Që^¬2Cíá.ö:gŢăTđp›Łr@˛Ó©TW–¦Řl vu«»­MÁÎN&¸ĹŚ0ň\ű)mîń÷]\¸FO•ľbľog`<č4ŰŚ×_Ă+sĂĺ,)”ß÷AdjŃo7Ľš7<85Îyd˙nM·'jůëÍ6ďŃ5Qá­qîĹ…k˘aIşĐşÇđ;Ö‚pĆçĹĎü ŞĘÖŘ6Í6Ăü¬ńĐ>őzĺp.TΒ¨kĎLw(%͡ ]~zíŔŁ‚€Űá.FŐ˙ŢpT’t%Сg˙ó‚ĐÝneą×J’ţ)٨Ź)ŮéÔŐË—ŁĆňpUĄRł˛ÔÝ֦åeۦÚíúîÎź)ŮéŚřüHYąr7nyÜ­´`ŐJ}řöĎŇ\.-ŻÜiŽ˝Ëëĺ!ŤŽĽ<ů™ú˝fŕčp.´ü•˙ůŮąš™î0˙ΨxŤu¬qîţU˝F Ü?dČŤö÷Gťż5oŢŁkĚĹÉjöěĐáý»őđęő!ŻŐxr–XΑU€m5žđ0ŰęެŽ1*š‹éöD]čňşŕŕćđpű"ÜĹj·+wĂu¸¬ü–Žá§źÖ‚'VéHYů°*n˙ěűě^ňuhzZŞ‚ÝÝn®“ťN-Ú¸AÉNgT¸‹řbKOWî†*Ő哿č,ÂŰ„÷¦Şˇ÷ŰŤ dŤJU«Ş]cźţÎXTîZőó5¬{)Ôâ fĎ]čňG„ĽEßűą_xőŻUĺ¬qŤţíÎxŻ·~ȶh#„µ »­‰‹y\Nü…»˙÷// Îđp{"ÜĹIuą´ŕ‰Uúđ×oß’ëß=gŽ<±*ćö‹íľ†›µx±$)ŘŐĄCëźŐ•@`HÇÝ łňň˘*Ś WAu·µ™Ż1±Í~䥺\LÄ Ś0Ňčq;Ňăcő‚mm UĄF¶3…™- föŃ˝~\xŘQ);H¸l´`hyżN‡÷ďÖ™ÖfŐě١iÓgD-¦6/F k؆WóZ/îf=ž3­'˘ćË–c1úŹô;ş•ü~?Á.€¸EŔ Ŕí‡pcę§ź–ż±iBö©ýýëŻÇÜ6Őn—$uÔżěvÜDsŃç›P­#€Ń0X9Ł­ŐńF/ŮĐö…Q׌Ąĺý:őz5Í6Ă NŤ@8j1µ~áňůNżŽěß­sľSÚ°ësżś%…ĘYR¨_ý¸TMuűÔrĽŢ wcőÎŤžŁčŠáţë`‹ËŤdľŹüz÷M}G·R__?d&´„»Ó4=9=ćv^n/„»—|şŰ9gB´g0ąaäÝߪzU UŁÖľ±CRt;#őű>P_ 7Ş‚·öŤż“ZlíĆ1Ńí Â'3‚Ďľ`Żď˙Ą9†ţçÎ{tŤšęö™ăO¬đÔŞm‚UĹp¬j^ó‹Ëe?T Öăőj<´/ę¸ĆCűĚăb…ĎńÂ~O†î/|†:q‡€€Űá.ĆÄąĆF»»äXĽxTÚ3LµŰ5ű‘‡ĺČË3? tvéł¶6uÔ×Gíźĺ^«„Ô4óý}Ź<˘TW–$©ÍSúCoaˇ¦§ŢŁËÝźÉWW'[zşć<ňpÄyŇ\YşăúboďV°ł3ę8+ÉN§ć­\){úŤ1řőń»‡Ł*ĂÍ)(Đ=YYÇ] ŐíőF›ćr)Ő•eŢ—qß’ÔímS—×qOĆřcÍmŞËĄ)6›yMc“ĺ܆_;|îć?ţ¸’ç:‡użIv:ĺČ[ŐŞŕb»Oź65 ş@Üś‚9ň›÷u±Ý§ÓďĽń=se5/ł/Ž8~$ó2çúÜ4/7ö‹ý]FďëŠxFüŤMú´©é¦ć;śďô[.6-ď×™ýjO·4ë­×^Ö´é3,Ď}¦µY)iő{őÖk/뙫$…ÂŃ=?)ÓąöSr8Şř™„ÝÎŔ*”˝wîýJIsčB—?âÜĆů›ęöIŠ ‰Ď ĐZ"|ެŰ/ ^ÍŰ˙ů‹żµŻWSÝ>ýé·ZţÄzťďň«őx˝jöě¸7Ŕ­AŔ ŔípcćĶíJ}ëMM±ŮnŞ=Ă‚U+őŔÚµf«CŞKrhţŞ•:±m».únôŔ}°ßZť…7V“7ÂÝŮŹ(5+KÝmmˇ .55ę¸T—Ë Ńş˝m vvFnŞÝ®E~B‡źëµkŐň‹żŹ:.ŮéÔŇ­[e uĂ9ňňôŔÚµúoĺ˙‡yź÷¸\zđzŘ˙ľŰ<Őˇp7잌ńGÎKˇrţúŻ˘ćÖ¸ćk×ęئMs~mŁźŻŐ9Śű óP 4ŹĆą<±JľşzťŘľÝňřďîüYT/bă¸Ű˙‹ů=s5Ôďc¨óěěÔ’­Ż i^¬ž˝ţߥń}-ÚđĂĎČ•@`DóObő±Łęt׏J4?'W}Ďu®ý”–Ż^Ż[˘üľ,«mďť»P9K µçŐRµŻ×˝sšŞçBm|ý˱ö_MŠK_ضWŰž{\Ť‡ö™çí˙ąŰSĄŻDś#Ľú×ęşý[Ł'đpú˙ĆŞÎYR¨ĺ«×ëČţݪٳà t§ŮfźKńąL&ĽL~„»3W5oŰ®üoq{gaˇľóÜs’BŐ‰'««u©Ý§oÚlrä-Öś‚%;ťúîÎźéĐł˙Ů /Ű<Ő˛ĄĄjNA(Ôí¨ŻW°«{Ŕk»»Íŕ׬šlkSW«×Ü>đ@ŃßÔ¤Žşz} Ę–ž®Ö>-[Zšmřˇ‚ťťf`gKO×_üj·yŹőďęÓĆFIŇÝsťräĺ)5+zK IDATKSív-ÝşUź|R’ô™×«6Ź”–íRjVdUňgCX (<(ěÝYyyZ°jĄléiúîΟŠ§ß“j»ľ}@—»»$)ć‡*<Řő75éÓë1đM›M©.—ć<˘)6›ś…úăÉ“aůT»]Eoţ_a}“ëőYŰIIRŇś9Z°jĄmřaĚ Wăy2Ž˙đíęözőe0¨{\®!Ď‹ěvÔ×ëRGÇ€ób<{}—i.—ů}u·µéăúwÍçÝ—j×>őżMÚ Ţľŕçš—ť{SU»ë^¬RÍ;Ôň~˝>ůč”ćgçęÉ^Ńüś\˝YµIó˛s#BÉóť~ÍËÎUÎ’Bĺ=şF ¶ŞŮłC§[BŐĽyŹ®ŃĂ«×÷kăpĘěµŢÎ@ őŕíîŢ;÷~íxű_ux˙nťnmÖ'ťŇ×ď5硳׮$őžľ°ănÓk†˝á÷Ö썺ßp ö×ď˝ jŰSĄ[•óPˇZŽ×é\{(7‚3îpëđ0ąîbLůĺojQ{[zşf]ňučHyyD`ĺolÔÇőďjyĺΨđř¤ÇŁ4—Ë w?®wĐ_ávvę¤Ç#éF¸ŰŐę5?Ě‚'V™ÁîďţóČűôzĺolTńőJć?{îűúçuë%Ißyîű’B둲ň°°ËëŐ‡ż~[ą7hNAléiJv:uŃçS—×{ýžÜf 8Ô±NµŰ•ó׺ď®.Z˙lÄÜvy˝ú¸ľŢśŰX­-=MWA|ň©ŞŕţcväĺÉ=8L¨C(ŘýđíQ Ř…ľ÷z3ź•·8"Ü Żň>±ýżDUIw{˝ć_8XYş5´-Ö÷ńicŁ9/ßyîű–aaĚË?Ż[q|¬yąńěĹţ.g36‹őŢĘ˝säŤuţđq…·ue~N®6ćĽ3¬9J°'{<±¶źďô›őüśč`بžO¸ /“×7ڵ۶ëj0(Izŕé§Ł~U>–ů+WšŻ›·mł¬DěňzÍ ÇT—kČç ł …o—|–ö•@@'«CcMv:#ÂĹPĎÔĆżN˙qý»ćkŁ˙ëMŤµŕ‘č¶í–s{Ńç3Çk‘Vţđó_X¶Ű8ýöóőÝĂř^’çÎU°+Tü˙\żľŐŘ.ů:,çc¶Ńc¸ľŢ2ŕô76šĎLŽĽ<łĂÉęjËďă˘Ď§?üü>s±Ž?YýŹ#š[zşů¬X}_ľş:µyŞ-ŰLŁ©ńĐ>•,NÓ¶çŹÚvľ3ÔwWŇM‡đ€ŃĺĚ{\:çFĎţ'NČ3ÄÂ0qîbĚí¤ë˝T7nŇqĆâR—|öýřÝÁç¬äXłĄ§›!_Ç»ő±ÇZ˙®>ů”ţ)™Đ5Ľř’öýĺ sŽĆÔ»ş ĂCĺţ‹šb?ŇľŻľş:üOOFĚ‘•«—–÷e„Öác虉|ć\–÷Ţ_xr¬g®»Őz^FŇwZ -'…ţb ˙Ç[-C哏Nz<#ľ0FŐ®ß÷ZŽ×«/Đ«sí§ÔrĽ^ŰźľËWŻŹjG¸őx|hË€qŢž!ŮéÔn÷ -Śđj(í iŮ.ťĽ>µĄ¦šŻ/µÇ5ŻöBµĄ§kú=÷(y®SS®÷Ť¬Ž”ń«˙F/ŘĆ{Éס»ťs”<×9čüŹ•Ô¬,M±Ů”<ש»ťNÝ=ÇiąŘYx%ě@÷kĚĆ=^ 4ĺăC[¬gn´5;}ŕ€ŮkŘ‘—'G^ž‚ť]f«ĎNžś´}v1±ä=şF§[šŐT·O»6şŁ¶g?T âďý€‰€ ęV¶hđz˝ęí퍹=++KIIIă6ŽÄÄDą†ůçěžžµ]_T¸˙xŹ;&IĘČČPffć„'`r!ÜŸ9±m»RŻ÷śÍrŻŐ§´!g´t@Ć+[zşxúi9ňÇě;.¶aţ/oÉś,XµRłŻ/77tNµŰőŕëAěěÔ‘˛rĺnÜh>ë¶ô49Ó ä, ő–ö76ꤧzÔe żu/Uéá5ëŐň~˝Îµź’ę3śł¤@÷νź € Î*ŕMIIŃŠ+Ćôşeeef:—ËĄââb•––Žz0iŚcéŇĄjhhÖ±^ŻWË–-“$=zTůůůć6ăőćÍ›UQQ1aÇ \w1nŚö ů?-ĐłhăsQ±›őÍéÓăv^yyćś.ů:ěîŇĹvź>»^ąĽĽrçmńśLµŰőÝť?‹uŻşÔѡ‹í>őtt¨ËëUîĆ fňh vu©c€¶ áúş»Çmn.ú|úíşuJv:5» @i.WÄ_j˝ /mňvŔHÝ;÷~‚\c3Ňî3Ă]Işpᄟ×ë•×ëUeeĄŽ=:ěĘUn„»WVícKK|źëż˘ßŐ:ń’šj·›UĄSív-ÚđCIˇó?˙…ĺ"`iŁü‡Ů«Á ¦ŘlCjc1ýžÔqťźy+WŢč_\_Ż“Ő˙8äÖ_†U';ť#®`˝ Ú6äVşčóéâ믛Ď#/Oł 1Ăîď|˙ű„» ¦Ď|­ęh:hľź5k–VŻ^=®c8zôhĚm^ŻWŹGmmmęééŃcŹ=¦ÖÖÖ ßZ`éŇĄ’4*-nVRR’9Z2ŔäF¸‹q׿=ż©Ér?ŁŐBjÖŔÁfx…ç—·¨…@x×Yyy1űŰŇÓőŘ[oJ’^Ú¤©v»Ů†!V°k7šşŰÚäXĽXIłg¸ź-=Ý Î‡ŇÂa4˝n]].2g:‡/b–šíŠî:b,‚v±Ý§Ô¬,%;ť!Ľ•,÷ZuµzőĺĺËcŢaŞÝ®¤Ůł•<שKľŽçëJ _]ť|uuĘőÇr,^lŮŹ@˛v˙ćoţF ă:ŽÚäç竬¬LĹĹĹŞ­­ŐŮłgUSS3.}oĆp['Ś%—Ë5ˇĆ;ß` 0ŢŚö ÇâĹ–űť»^yhKO°rőA÷Z󵿱é–ÝÓ%_Ç€÷#IłyÄ|}©ŁC a ± Tť:ű‘‡Íףô~z}ž¦ÚíZđÄŞűÍ_ąŇ|ýq}ý¸ĚĄv_ ÝAšËe`^ôůĚďaţă+cö/f"çĺFµëĽ°{ďĎYX¨Ýn=\U©äąsÇĺůz¸ŞRßyî9Í.x$ć~ăŔ€ř4Q‚ÝˇŞ¬¬4_{&đoUp+îâ–0Ú3 äĚćbjK¶ľbą°Ö‚U+Í*ĚŽúú€4ĽşqVŚJÍŃÔvýś¶ô4-Ú°!j{˛ÓiVĄv·µ)ŘŮiöÓhŚ‹6lPjX¸÷7ÔĹŘ|uu vuI’xúiËđÜYX¨«VšăŻEşşŻŻę›4{¶eťětjÉÖWĚ÷ýű-˙ţz»[zZTďŢ©v»ţâW»c.ŇÖĺőš×ĎrŻU–Eśět*çŻ˙JR¨}C¬j둯<ď˙ťt\×çXVOµŰ#ž/€pńěJ±Ű444hË–-Ú˛eË€Çű VÁZSSŁÇ{LË–-Ó˛eËT^^®łgĎ{Ľ]ݧ§GŐŐŐ×Y¶l™¶lŮ2äk544¨¤¤Ä<ö±ÇSuuµĺľgĎž5ÇÓ˙üý?ď?®’’’!UýÖÖÖĆ<Îăń iî7ʶ ¸eÂŰ3XąčŘK›´ôÇ[ÍPÎßبK×CĆYyyfHwÉס?üüQç0zË.XµŇüuőĘ_6&÷ăolTG}˝ćČYZěĘßب//u·ÓirWAłrąËë5ŰO,XµRÉÎ9ęözěęÖÝsćČ‘—'[zšąŹ•đ@pőoţo] túŔ;öŚ=öŇ&-ŻÜ©©v»–WîŚ9·Á®.{iÓ¸=׿«Ô¬,MµŰőč/˙AgP°«[¶´Ôy4ć¤PŰĺőę÷?˙ąľóýď+ŮéÔ_üj·®ú2x٬ö5ľ'+Í۶ë/v˙RSl6=čvkV^žş[˝–ß㑲ňQ˝÷Ka•·Ë+w*ŘŮĄî6Żš·m×Éę”#/OSl6ĺ˙x«.ú|fĄqBjšy‹Íp˙÷»^ç˙`€)Ýá®$mŢĽ9ć~ć>±Ú@”””DU744¨˛˛ReeeÚąsč‹Çşž×ëŐcŹ=fâ644¨˘˘bĐk•——GT2jjjĚEçÂ{ëž={ÖĎŇĄK#‚ră󬬬A®Çă‘ŰíÖŢ˝{ٶőôôhٲeňZ´˘óx<*++Skk«Ž;6ŕÜF•»¸eú·g°ŇĺőęHYąYeęČËÓn·t»ÍPďĂ·čHyąeÔđŔ×öF»m¸ćmŰŐć©ÖŐ`P¶ô4-xb•t»#É#eĺĆÍ۶™÷—ęréA·[ą7hÁ«4ĹnS›§Zż]·Î¬ĆtäE¶}ř¸ţ]łŞŕLžët¬}>)+;oôÜvÔ×ëĐúgě=;Ú|uuúđíć˝óaĚcw[›ţyÝzłRZŠ®rýđ×o«áĄMćĽNµŰeKOÓŐ`Pmžęź»`g§jž|Ęś—d§3ę{ vuéHYů¨W3wy˝f…®ńĚĎk°łSGĘĘÍďÚXđA·[ÎÂMµŰÇl\¸ţ‹ÓT˛8M}ŢqżöůNżyýpŻmt«dqšďű%_ŔR<»555ćë±X¤ěرcňx<ĘČČĐÎť;uôčQíÜąS’Bm!Ś ô¦ţ QR˘łgĎ*11Q›7oÖŃŁGuôčQíÝ»7âZ±Ş\Ź;¦ĘĘJeddÇoŢĽŮ<Öëőެ¬lŘă*//WCC˛˛˛´yóf$Ý;÷~ľ$@”xvĎž=«’’óýX-¦–••Ą††łę5??_n·[ůůůjkkÓ–-[äv»G.744AhMMMTkqq±\.—>ůäUVVƬrµgYY™ylmmí渴´4˘"¸¸¸Xn·[ŮŮŮ’nTđ*++ÍűŮĽysDřm›źźŻŢŢ^~`śîbTŤ$\ę1]# Ż‚ťťúđ×oG}>Puă@×JUä•@`Ř˝XŤ0r űhŃ5«ůą iÎ.ú|ĂŞöl,7ó< ö˝ vţ«Vęb»O=¬+€ĺ}}3¬Č—1Bú[9/]w¸ăÂÍK°ĎPŃ÷~ ™éŽ[rýsíÖ!îâÂ5’¤ů9ą|I€=ب_®×덨Ú-**ł_íŻ©©‰hg IIIIŞ©©Ń}÷Ýgî3’ĘX)ÔÂŔ`'%%™é@÷kśn·[[¶lQOOŹĽ^Ż\,DÝ_FF†e«—ËĄ˘˘"ŐÖÖFUŕVUUI µz°Şjvą\ެ¬Śćc‹pŔ¤ňťçž“j×a,®Öź±PÜŐ`jW É˝s¥ձ§[›ŻŹ#˛r·ř™đ匡ľKÝú ţ &Ŕ„’ś±@é  ¸O{ö¬Ů7x Jj·ŰM¸ ăpŔ¤âoj’cńb-XµR—|ľ~=lÓőŔÓO+őzEŃŰ·Źľ@ŻÎů>P‚m†îť{żÎwúŐT·O’4/;Wół­+`ÍpŐąĐlËpľÓŻó]~ÍLshfşC-Çëĺo?Ą”4‡ćgçZĺ{®ý”δžP_°W ¶De?TóŁýBx…n˙ë[Ýź$s\R¨Ňw qťďôëtkł.tů%IŮÜÖ-ľţňßôy÷Y~pL(źwźŐŚÔű4=9Ír{Ľ/ž–(—Ë%—ËĄâââ1]Śk°s»\.µµµÝT˙X—ËĄĄK—ęرcňz˝fkÜ[QQŃ -úW쎖ᶚ_n°cŤ`0öwL*'÷z”š•Ą)6›r7nPîĆ şčóizjަÚíć~ţ¦&ťŁ*L\-ÇëµçŐR-_˝^whżďŹ\,gIˇžůŰĘľş}^mîqIŇަ.óó_˝ZŞ3­Ízţ§{µç'Ąf_^Ă3/V)ďŃ5Qc8×~Jo˝örÔţoVmŇSĄ[µ|őúĎĎwúÍEÜÂVăúĎĽXĄĽôĐuN·6k׏J´¸pŤ^ł^{^-3űőJRÍž1ÇőVŐËQóQłg‡ćçäęůźě˝%˝†o‡ĂÁ € ď««_X~OÁîµk×nů MGk7Łň·şş:â3ăs—ËĄ˛˛2­]»vB?wŢaüĆŰXŇ€h„»&•‹>źţyýłzpíÓšSP IJ[¨î’ŻC§vOdLF ÚzĽ^—˝zćĹ*ÍLsč\ű)ŐĽ±C-ď×)%m–ž*Ýjcô»ŤµYí§i¶zţ§{•`KTăˇ}jŞŰ§=Ż–jfš#˘Úö\ű)m~Ąú˝Ę~¨@Ż~V ö:×ţެڤ7«6)%m–r–ŢóőŞáyýŞŠo,¦¶0j¬}Á^m~Ąćąiůęőš™ć0ÇőÖk/G„»}Đľç®W?UúŠf¦;Ôř\5oěĐé–fíúŰmŘőÎmńŚ$$$hçÎťňűýüŔPöď߯O?ý4ćöxŻŘťĚ’’’äńxTQQˇšš544¨ˇˇÁ\tĚëőĘív«ˇˇA{÷îťĐ÷xwL:ÁÎN5oŰ®ćmŰ•ětjĘőÔ.utčJ ŔÝĆŚ@ôr WŻx~g¶(ź“«{˘öĽZަşýáîŤ~·7ŞfĂ«iSŇfé…msŰüś\ő{ŐzĽ^‡÷˙Ň wű˝ÚőŁőzUô˝DôË˝wîýš™ćĐöç×[Ż˝î•·á!n¬j^c¬­ÇëŁ*tçç䪩nźyśáđţÝ:×~JçBm|ýť ÝćîŐËîďętKłÎµźşmZ4$$$hŢĽyüŔP¦M›sÁîČ ÖŁ¶ˇˇARhá±Ń™™©˛˛2łoCCjjjäńxÔŰŰ+ŹÇŁŇŇŇa-Š6žÂ+™›;Z2ŔřůS`2»čó©ËëU—×K° 3(}ŞtkTďY#í ôF´L° WŤĎ¦ŮfDÁ†‡W?+éF,IŤ‡öé|§_çBË…ĐćçäjšmF¨—n§?ěZF…îýQ׏UÍ»¸pMTë…ţˇ® ‰kßŘ!Iza›'ŞőB‚=Ńď­ŰŚs§¤9bV ‡3X«7ÖőŤ±†W‡łj/qˇ+4®Ĺ…k­ÜŤuŻ€ń÷G_«ţŘqŁâôvkĹ™™©˘˘"ŐÖÖĘëőęľűî3^Ł7##Cö~ÍĘĘ’×ëUvv¶Š‹‹•™™i.v&I‰‰‰–톣¸¸XK—.Ő±cÇäńxÔĐĐ ââb%%%©§§G555fliiiDČF ^YkŐĘŔQĄP8ÚżâőtKłÎwú#zűmŚ6ýCáľ@ŻţĎ'ţ˝ú˝úŻo˙«yN#Ž —®ćí˙yřuĄČĘŢyŮą:ÓÚ¬ó]~Ë9yłj“Žěß­ĺ«×[.¸µâ-Ř­¬¬µ…¶’’’TSS#Ż×ěćçç›mŚëő݆ ˙<33SĹĹĹŞ©©1Çćrąb¶bpą\:zô¨ů:śńy˙ë%%%Éăń¨˘˘B ˝jŤë[őÚ5Ć9PŢXăÉ8ĂąÝî[Däçç›÷bÜŹËĺšĐ=`˛"ÜLzálëńú¨ęŮ=Ż–ę|§_çÂp×*\5ŞiĄPµořąú˝zëµ—%IŻ~Ö qďť{żRŇşĐĺ×áý»Ł®˙Ök/«/е°ŮŤV ÷G}6”jŢţŰĂS3îëLkłŽěßŐ.˘ńĐ>Ůż[Ól3ôđęgy`‚‰ÇŠÝѨ‚µ:g¬óőóĚĚL••• ézIII1CĎÁúĺfffZM<ÜůŠ5ž›§1V«đ×čqś‘‘aîÓżţá5`lî&=Łw^v®ÎµźŇöç×ňëeÓˇ}jyżNÓl3´îĄ šśk?Ąľ@oÔbfFŕ›ýPjßء ]~-~tŤÎwúUűĆťďôk^vnT€űTé+ÚőŁŐľ±C_?Wö’ůŰ?Đáýż4ĺ~zc‘#DNIsD¶gZŤö WóößŢżőBń÷~ ĆCűt®ý”6»ż«ĺ«×+Áž¨Ö÷ëĚ6O•n´/`ě…‡¸·k+ÜzTEE…ŠŠŠb.śŢvb˘/“á.`Ň3*Ws*PŢŁkôfŐ&íÚč6·ĎËÎŐSĄŻX¶_čçZ÷b•ެzYŤ‡ö™A¨¤- r–ꙫôfŐ&Ţ˙KŢ˙ËëżđÓ˝í¬@ ˙<Ľrת7o¸Ó1Z6$صńőwô«—ę\ű)íyµÔÜ–’ćĐş«&e/ŢľKÝ7ć€`@śX±b…>úč#Ą¤¤ěâ–ÉĎĎW[[›jkkµeË•––š­zzzTUUĄŞŞĐ_–Qą ăŕŽk×®]câËŽ;ÔŢŢ.IržůH¶`I0*Úż=W—ívIŇż{¤D‰i÷MŠűú~Á<őzµa×;šź“«ľ@ŻZŽ×ë|§_9K bö©5zę•«§[šµýůÇ•’ćĐŽ˙*)¶ájÎ’ÂA«\ű˝:ÝÚ¬sí(Á6Cósr‡|ýľ@Żen¨Ęřó}ĂŰďť»0ć"pĆ}ô?×üěÜI»ŔÚ•`ŹZüĚ|˙˙đüŕ0D===fŔkČĚĚŚXTM -H×ĐĐ@^Tî&5«ĹÄě‰Q=fűł J­Şiďť{ĚvVě‰ĘYR¨ś%…Ăľ~‚=Ń2těúCßpď#^]ľŘeľž6m? CRR’TQQ!ŹÇŁŢŢŢ»*++rcŔÍ#ÜLjF ;/;wÎ5po[L|˝ť›ŻÇba&»¤¤$UVVŞ˛˛R^Ż×¬Řuą\TęŔ-@¸ Ôn˛ oú\gĽ÷¶ĹÄÖŰő?Ôuúż›ď w¸9ü»n˝o0€É,ÖbbĂŐčŐůN˙ős-dbăĚWW˙MMÍ÷sçÎĺ?HÄ=*w“ÚĆ×ß•ó$ص·©‹ ŤC_]ý7ťý—CşĽ±ĐËŠ+qŹpLZ˝]˙CM#‚ÝŐ«WkŢĽyL€¸G¸ &•+Á]ľŘ©‹ç>Ô;ĽŰ-Z¤?˙ó?g’L „»Kg˙ĄNwMů&qĺňĹ.}ýĺżYn[˝z5Á.€I…p`şóëŻÍ×}—č/‹ÉaęÉ-[IDATîÜąZ±b­L:„»SĘ…‹ú<)‰‰@ÜKNNÖĽyó´hŃ"B]“á.Ŕ”ŘÓŁĽmúbÚ´Q9ß…”]š™")ÔëtѢEL2ĆÔĚ™3•’’ÂD¸-î"ÜůőײŁr®€ÝnľNIIˇ‚€Qô ¦âá.Ä!Â]C„»‡w ÝĹÄ7߼o3 ŔmĘ]C„»q(77WÓ¦Mc"ÄŤäädą\.&€Qtǵk×®1 _¨Ü€8D¸ qpâá.Ä!Â]C„»‡w ý˙ÖAí™6»8IEND®B`‚ceilometer-6.0.0/doc/source/events.rst0000664000567000056710000002723712701406223021121 0ustar jenkinsjenkins00000000000000.. Copyright 2013 Rackspace Hosting. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _events: ============================= Events and Event Processing ============================= Events vs. Samples ================== In addition to Meters, and related Sample data, Ceilometer can also process Events. While a Sample represents a single numeric datapoint, driving a Meter that represents the changes in that value over time, an Event represents the state of an object in an OpenStack service (such as an Instance in Nova, or an Image in Glance) at a point in time when something of interest has occurred. This can include non-numeric data, such as an instance's flavor, or network address. In general, Events let you know when something has changed about an object in an OpenStack system, such as the resize of an instance, or creation of an image. While Samples can be relatively cheap (small), disposable (losing an individual sample datapoint won't matter much), and fast, Events are larger, more informative, and should be handled more consistently (you do not want to lose one). Event Structure =============== To facilitate downstream processing (billing and/or aggregation), a :doc:`minimum required data set and format ` has been defined for services, however events generally contain the following information: event_type A dotted string defining what event occurred, such as "compute.instance.resize.start" message_id A UUID for this event. generated A timestamp of when the event occurred on the source system. traits A flat mapping of key-value pairs. The event's Traits contain most of the details of the event. Traits are typed, and can be strings, ints, floats, or datetimes. raw (Optional) Mainly for auditing purpose, the full notification message can be stored (unindexed) for future evaluation. Events from Notifications ========================= Events are primarily created via the notifications system in OpenStack. OpenStack systems, such as Nova, Glance, Neutron, etc. will emit notifications in a JSON format to the message queue when some notable action is taken by that system. Ceilometer will consume such notifications from the message queue, and process them. The general philosophy of notifications in OpenStack is to emit any and all data someone might need, and let the consumer filter out what they are not interested in. In order to make processing simpler and more efficient, the notifications are stored and processed within Ceilometer as Events. The notification payload, which can be an arbitrarily complex JSON data structure, is converted to a flat set of key-value pairs known as Traits. This conversion is specified by a config file, so that only the specific fields within the notification that are actually needed for processing the event will have to be stored as Traits. Note that the Event format is meant for efficient processing and querying, there are other means available for archiving notifications (i.e. for audit purposes, etc), possibly to different datastores. Converting Notifications to Events ---------------------------------- In order to make it easier to allow users to extract what they need, the conversion from Notifications to Events is driven by a configuration file (specified by the flag definitions_cfg_file_ in ceilometer.conf). This includes descriptions of how to map fields in the notification body to Traits, and optional plugins for doing any programmatic translations (splitting a string, forcing case, etc.) The mapping of notifications to events is defined per event_type, which can be wildcarded. Traits are added to events if the corresponding fields in the notification exist and are non-null. (As a special case, an empty string is considered null for non-text traits. This is due to some openstack projects (mostly Nova) using empty string for null dates.) If the definitions file is not present, a warning will be logged, but an empty set of definitions will be assumed. By default, any notifications that do not have a corresponding event definition in the definitions file will be converted to events with a set of minimal, default traits. This can be changed by setting the flag drop_unmatched_notifications_ in the ceilometer.conf file. If this is set to True, then any notifications that don't have events defined for them in the file will be dropped. This can be what you want, the notification system is quite chatty by design (notifications philosophy is "tell us everything, we'll ignore what we don't need"), so you may want to ignore the noisier ones if you don't use them. .. _definitions_cfg_file: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html .. _drop_unmatched_notifications: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html There is a set of default traits (all are TEXT type) that will be added to all events if the notification has the relevant data: * service: (All notifications should have this) notification's publisher * tenant_id * request_id * project_id * user_id These do not have to be specified in the event definition, they are automatically added, but their definitions can be overridden for a given event_type. Definitions file format ----------------------- The event definitions file is in YAML format. It consists of a list of event definitions, which are mappings. Order is significant, the list of definitions is scanned in *reverse* order (last definition in the file to the first), to find a definition which matches the notification's event_type. That definition will be used to generate the Event. The reverse ordering is done because it is common to want to have a more general wildcarded definition (such as "compute.instance.*" ) with a set of traits common to all of those events, with a few more specific event definitions (like "compute.instance.exists") afterward that have all of the above traits, plus a few more. This lets you put the general definition first, followed by the specific ones, and use YAML mapping include syntax to avoid copying all of the trait definitions. Event Definitions ----------------- Each event definition is a mapping with two keys (both required): event_type This is a list (or a string, which will be taken as a 1 element list) of event_types this definition will handle. These can be wildcarded with unix shell glob syntax. An exclusion listing (starting with a '!') will exclude any types listed from matching. If ONLY exclusions are listed, the definition will match anything not matching the exclusions. traits This is a mapping, the keys are the trait names, and the values are trait definitions. Trait Definitions ----------------- Each trait definition is a mapping with the following keys: type (optional) The data type for this trait. (as a string). Valid options are: *text*, *int*, *float*, and *datetime*. defaults to *text* if not specified. fields A path specification for the field(s) in the notification you wish to extract for this trait. Specifications can be written to match multiple possible fields, the value for the trait will be derived from the matching fields that exist and have a non-null values in the notification. By default the value will be the first such field. (plugins can alter that, if they wish). This is normally a string, but, for convenience, it can be specified as a list of specifications, which will match the fields for all of them. (See `Field Path Specifications`_ for more info on this syntax.) plugin (optional) This is a mapping (For convenience, this value can also be specified as a string, which is interpreted as the name of a plugin to be loaded with no parameters) with the following keys name (string) name of a plugin to load parameters (optional) Mapping of keyword arguments to pass to the plugin on initialization. (See documentation on each plugin to see what arguments it accepts.) Field Path Specifications ------------------------- The path specifications define which fields in the JSON notification body are extracted to provide the value for a given trait. The paths can be specified with a dot syntax (e.g. "payload.host"). Square bracket syntax (e.g. "payload[host]") is also supported. In either case, if the key for the field you are looking for contains special characters, like '.', it will need to be quoted (with double or single quotes) like so: payload.image_meta.'org.openstack__1__architecture' The syntax used for the field specification is a variant of JSONPath, and is fairly flexible. (see: https://github.com/kennknowles/python-jsonpath-rw for more info) Example Definitions file ------------------------ :: --- - event_type: compute.instance.* traits: &instance_traits user_id: fields: payload.user_id instance_id: fields: payload.instance_id host: fields: publisher_id plugin: name: split parameters: segment: 1 max_split: 1 service_name: fields: publisher_id plugin: split instance_type_id: type: int fields: payload.instance_type_id os_architecture: fields: payload.image_meta.'org.openstack__1__architecture' launched_at: type: datetime fields: payload.launched_at deleted_at: type: datetime fields: payload.deleted_at - event_type: - compute.instance.exists - compute.instance.update traits: <<: *instance_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending Trait plugins ------------- Trait plugins can be used to do simple programmatic conversions on the value in a notification field, like splitting a string, lowercasing a value, converting a screwball date into ISO format, or the like. They are initialized with the parameters from the trait definition, if any, which can customize their behavior for a given trait. They are called with a list of all matching fields from the notification, so they can derive a value from multiple fields. The plugin will be called even if there are no fields found matching the field path(s), this lets a plugin set a default value, if needed. A plugin can also reject a value by returning *None*, which will cause the trait not to be added. If the plugin returns anything other than *None*, the trait's value will be set to whatever the plugin returned (coerced to the appropriate type for the trait). Building Notifications ====================== In general, the payload format OpenStack services emit could be described as the Wild West. The payloads are often arbitrary data dumps at the time of the event which is often susceptible to change. To make consumption easier, the Ceilometer team offers two proposals: CADF_, an open, cloud standard which helps model cloud events and the PaaS Event Format. .. toctree:: :maxdepth: 1 format .. _CADF: http://docs.openstack.org/developer/pycadf/ ceilometer-6.0.0/doc/source/index.rst0000664000567000056710000000340412701406223020712 0ustar jenkinsjenkins00000000000000.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================== Welcome to the Ceilometer developer documentation! ================================================== The :term:`Ceilometer` project is a data collection service that provides the ability to normalise and transform data across all current OpenStack core components with work underway to support future OpenStack components. Ceilometer is a component of the Telemetry project. Its data can be used to provide customer billing, resource tracking, and alarming capabilities across all OpenStack core components. This documentation offers information on how Ceilometer works and how to contribute to the project. Overview ======== .. toctree:: :maxdepth: 2 overview architecture measurements events webapi/index Developer Documentation ======================= .. toctree:: :maxdepth: 2 install/index configuration plugins new_meters testing contributing gmr Appendix ======== .. toctree:: :maxdepth: 1 releasenotes/index glossary api/index .. update index Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ceilometer-6.0.0/doc/source/format.rst0000664000567000056710000003021712701406223021075 0ustar jenkinsjenkins00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode .. _paas_event_format: ================= PaaS Event Format ================= There are a number of PaaS services that are currently under development and a growing number of applications running on top of OpenStack infrastructure. In an effort to avoid significant integration work that would be required if each service produced a unique notification payload, we have defined a minimum data set that provides the core data elements needed for downstream metering processes. This format is not enforced by Ceilometer but serves as an advisory guideline for PaaS service developers: :: [ { "Field": "event_type", "Type": "enumeration", "Description": "for event type records, this describes the actual event that occurred", "Compliance": "required for events", "Notes": "depends on service, defaults to create, exists, delete" }, { "Field": "timestamp", "Type": "UTC DateTime", "Description": "timestamp of when this event was generated at the resource", "Compliance": "required", "Notes": "ISO 8859 date YYYY-mm-ddThh:mm:ss" }, { "Field": "message_id", "Type": "String", "Description": "unique identifier for event", "Compliance": "required", "Notes": "" }, { "payload": [ { "Field": "version", "Type": "String", "Description": "Version of event format", "Compliance": "required", "Notes": "" }, { "Field": "audit_period_beginning", "Type": "UTC DateTime", "Description": "Represents start time for metrics reported", "Compliance": "required", "Notes": "Format ISO 8859 date YYYY-mm-ddThh:mm:ss" }, { "Field": "audit_period_ending", "Type": "UTC DateTime", "Description": "Represents end time for metrics reported", "Compliance": "required", "Notes": "Format ISO 8859 date YYYY-mm-ddThh:mm:ss" }, { "Field": "record_type", "Type": "enumeration ", "Values": { "event": "events describe some kind of state change in the service", "quantity": "quantity describes a usage metric value" }, "Compliance": "optional", "Notes": "" }, { "Field": "project_id", "Type": "UUID", "Description": "Keystone project_id identifies the owner of the service instance", "Compliance": "required", "Notes": "" }, { "Field": "user_id", "Type": "UUID", "Description": "Keystone user_id identifies specific user", "Compliance": "optional", "Notes": "" }, { "Field": "service_id", "Type": "UUID", "Description": "Keystone service_id uniquely identifies a service", "Compliance": "required", "Notes": "" }, { "Field": "service_type", "Type": "String", "Description": "Keystone service_type uniquely identifies a service", "Compliance": "required", "Notes": "" }, { "Field": "instance_id", "Type": "UUID", "Description": "uniquely identifies an instance of the service", "Compliance": "required", "Notes": "assuming instance level reporting" }, { "Field": "display_name", "Type": "String", "Description": "text description of service", "Compliance": "optional", "Notes": "used if customer names instances" }, { "Field": "instance_type_id", "Type": "enumeration", "Description": "used to describe variations of a service", "Compliance": "required", "Notes": "needed if variations of service have different prices or need to be broken out separately" }, { "Field": "instance_type", "Type": "String", "Description": "text description of service variations", "Compliance": "optional", "Notes": "" }, { "Field": "availability_zone", "Type": "String", "Description": "where the service is deployed", "Compliance": "optional", "Notes": "required if service is deployed at an AZ level" }, { "Field": "region", "Type": "String", "Description": "data center that the service is deployed in", "Compliance": "optional", "Notes": "required if service is billed at a regional level" }, { "Field": "state", "Type": "enumeration", "Description": "status of the service at the time of record generation", "Compliance": "optional", "Notes": "required for existence events" }, { "Field": "state_description", "Type": "String", "Description": "text description of state of service", "Compliance": "", "Notes": "" }, { "Field": "license_code", "Type": "enumeration", "Description": "value that describes a specific license model", "Compliance": "optional", "Notes": "this field is TBD depending on dev_pay design work" }, { "metrics": [ { "Field": "metric_name", "Type": "String", "Description": "unique name for the metric that is represented in this record", "Compliance": "required", "Notes": "" }, { "Field": "metric_type", "Type": "enumeration", "Description": "gauge, cumulative, delta", "Compliance": "required", "Notes": "describes the behavior of the metric, from Ceilometer" }, { "Field": "metric_value", "Type": "Float", "Description": "value of metric for quantity type records", "Compliance": "required for quantities", "Notes": "" }, { "Field": "metric_units", "Type": "enumeration", "Description": "describes the units for the quantity", "Compliance": "optional", "Notes": "" } ] } ] } ] .. note:: **Required** means that it must be present and described as in the specification. **Optional** indicates it can be present or not, but if present it must be described as in the specifications. **Audit period timestamps** are not currently enforced against the audit period. Sample Events ============= The event format listed above is used to deliver two basic types of events: *quantity* and *state* events. Sample state events ------------------- These events describe the state of the metered service. They are very similar to the existing state events generated by Infrastructure. Generally there would be at least three types of events: create, exists and delete. Examples of these events for a DNS service are listed below. ``dns.zone.create`` event is sent after a zone has been created:: { "event_type": "dns.zone.create", "time_stamp": "2013-04-07 22:56:30.026191", "message_id": 52232791371, "payload": { "instance_type": "type1", "availability_zone": "az1", "instance_id": "6accc078-81de-4567-894f-53af5653ac63", "audit_period_beginning": "2013-04-07 21:56:32.249876", "state": "active", "audit_period_ending": "2013-04-07 22:56:32.249712", "service_id": "1abbb078-81cd-4758-974e-35fa5653ac63", "version": "1.0", "tenant_id": "12345", "instance_type_id": 1, "display_name": "example100.com", "message_id": 52232791371, "user_id": "6789", "state_description": "happy DNS" } } ``dns.zone.exists`` event is sent every hour for existing zones:: { "event_type": "dns.zone.exists", "time_stamp": "2013-04-07 22:56:37.782573", "message_id": 52232791372, "payload": { "instance_type": "type1", "availability_zone": "az1", "instance_id": "6accc078-81de-4567-894f-53af5653ac63", "audit_period_beginning": "2013-04-07 21:56:37.783215", "state": "active", "audit_period_ending": "2013-04-07 22:56:37.783153", "service_id": "1abbb078-81cd-4758-974e-35fa5653ac63", "version": "1.0", "tenant_id": "12345", "instance_type_id": 1, "display_name": "example100.com", "message_id": 52232791371, "user_id": "6789", "state_description": "happy DNS" } } The ``dns.zone.delete`` event is sent when a zone is deleted:: { "event_type": "dns.zone.delete", "time_stamp": "2013-04-07 22:56:37.787774", "message_id": 52232791373, "payload": { "instance_type": "type1", "availability_zone": "az1", "instance_id": "6accc078-81de-4567-894f-53af5653ac63", "audit_period_beginning": "2013-04-07 21:56:37.788177", "state": "active", "audit_period_ending": "2013-04-07 22:56:37.788144", "service_id": "1abbb078-81cd-4758-974e-35fa5653ac63", "version": "1.0", "tenant_id": "12345", "instance_type_id": 1, "display_name": "example100.com", "message_id": 52232791371, "user_id": "6789", "state_description": "happy DNS" } } Sample quantity events ---------------------- Quantity events have the same overall format, but additionally have a section called metrics which is a section called metrics which is an array of information about the meters that the event is reporting on. Each metric entry has a type, unit, name and volume. Multiple values can be reported in one event. ``dns.zone.usage`` is hourly event sent with usage for each zone instance:: { "event_type": "dns.zone.usage", "time_stamp": "2013-04-08 10:05:31.618074", "message_id": 52232791371, "payload": { "metrics": [ { "metric_type": "delta", "metric_value": 42, "metric_units": "hits", "metric_name": "queries" } ], "instance_type": "type1", "availability_zone": "az1", "instance_id": "6accc078-81de-4567-894f-53af5653ac63", "audit_period_beginning": "2013-04-08 09:05:31.618204", "state": "active", "audit_period_ending": "2013-04-08 10:05:31.618191", "service_id": "1abbb078-81cd-4758-974e-35fa5653ac63", "version": "1.0", "tenant_id": "12345", "instance_type_id": 1, "display_name": "example100.com", "message_id": 52232791371, "user_id": "6789", "state_description": "happy DNS" } } ceilometer-6.0.0/doc/source/2-accessmodel.png0000664000567000056710000012576612701406223022220 0ustar jenkinsjenkins00000000000000‰PNG  IHDR3‡­T‰zTXtRaw profile type exifxÚUŽË Ă@DďT‘ří°”Y¶”R~ ëČÎ;Ŕh´z,íď×AŹFXÉGL$Ŕ…§§>+L^ł(Kďš‹s›TŇ«&Ó3ŘŻ‡~ö?†aâŹŔŔ†MË®»‰Ťž%j+÷7ňvÍľĘ ˙}ť’»ś>B ,(.AJ iTXtXML:com.adobe.xmp ΓÖ[sBIT|d IDATxÚěÝwXWĂđłŔŇ‹ôލ¨X˘ X± ±˘&Ɔ%[,XbMvMŢ(–ÄX˘±%Ř %¨`7*bŁI ťĄ|ŔîÇ‚K“]<żçńgfďĚÜÖťł· @DDDDDDD¤$TXDDDDDDD¤Lf‘RaADDDDDDDJ…a)5VUŐŢ˝{qćĚVQ6|řpôęŐ‹AD -3¨Ę˘ŁŁY DDuÜőë×Y D¤0Ř2ŞŤ0;F/^°"ęcc44XD¤PfQµfgĂňy<+‚¨IŐŐeAD §Ě0ýź‰ę>kkk,]ş”ADDDDDJĄĚ13dŐ}±±±¬"""""R:oífŇĐÂVV¬)˘:$2.ŹăŮ€”Ó[Ă ++ôsqaMŐ!§ÂÂf‘ŇâÔ¬DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RQcŐéééرcŕłĎ>ŽŽ+…ę¶Ě "ŞC˘ŁŁqóćMÜĽyß˙=ŇÓÓY)DDDDTç0Ě "ŞŁbcchQťÄn&DDu8Đ?>»śPą**°ďŮv]»Âر)4 ęA 2SR|˙>ž"&$¤Úö7öěŔ.·r-§ęĺ¶jl»¸âyx8‚ć/űz•TźŹśÔTĽ|ôŹńčôi  @îëMDDTYl™ADTGšZ` z;};; Úöş,^»îÝ cn5M ¨jh@×Ňö=z ÇšŐčőýZ¨ëëłÂ*ŔqČ'eµEÓĐÖ;"_$‚e›6Đ17ŻtYhŔ˘Mtţú+¸­Z Ş*/<Ő8…k™ˇëâRáפ……ńJ–¨?Ö up‚Č»Wůß¶Đ 2éŰؠ߆_ ˇ§‡ô„Dü·o/b/_AFR**0tp€ăOPßÍ –mۢ×˙ľĹ©™łź›[Ąýľ/ßĐ5i˘pÇÔ¨o_¨¨©âÖÎÝh5nőí‹[;wVęş©…Đ13C}77|0öSŘşşÂqČÜ;p€ż\DDTŁŢë–›öí«TxBD¤,ş» ‡ćí°…ÉÖeń"hčé!ţÚ5ť0÷˙öGjl,ňrr›•…¤;wpŢg%.řú˘ /ĆŽŽh6b8+NNĆMš*Ü15ęß™/Rp{ç.d˝z…†ýúVş¬|‘©±±¸ł{7Â~Ůhا7/<Ő8…3ă]´,¸~ďď"Şóş» lˇAĄXwčcGGd˝|‰—Ż€(#ŁĚmź˙GGÔkĐ©±±2·1mŢÍFŚ€Ů@Ă@9©©Hşűî<„7¤¶­č –mŰÂqč4ou]]ŇÓńâÁD9‚č Km/ÔŃÁČăÇđ:* GĆ}††˝{ŁĹ¨Qе˛BzBţŰ»‘'O @óáĂ ke…Ě”D?ŽŰ»ţ¨Ň9¶= m¦L)uľç}Vâé?˙TŞÎÔuuáqě(^?}ŠŁ“&ĂeĆ Ř÷pš¦&ţěď.W=š¶húőq˙Żżź›‹§ÁÁp:ćNNĄöWQQçĎŁă‚ůĐ·µă/˝żaĆ»Ŕ0h0ĐxźŮuď8r9oŢĽuű° Ë\ר_?tZ¸@jĽMCCŘvq…­kg\Y˙3řűWę8[ډ6Ó¦J-Ó00€Uűö°jßwvďĆő­żJ­ĎËÎ.ü Łˇ ű=ŕşČ[˛Î ľ:}ő%2’“aذ!ÚLź&Y§ka§I“ý&‡×č9V´ĽÜ˘sRŐĐ@ 8ů¤ÂuéŕŢđ8 đčôi8 ‡ţý«fĎŁ ?Źż\DDTăęD7“ôĚL8 ]l)ŁŹ¦ßŢ˝ĐuqóСXłu+t]\đߣG…\\\ ëâ‚§OK˝ćĘíŰűŐWhÔ·/ ;vD>}ŕ1o·‡—*˙Uj*t]\ŕ2brss1˙˙C>}`޵+ŕMZšd=śÇ@OOŘöě SWWt;űŠľ!KzůŢ?ţ—áĂaÖĄ ;v„〼t)î=~\-u÷ďŐ«1oôéŁNťĐjđ`|óëŻČ*úŔTҙ˗1|î\Ř÷î ĂŽa߫Ϛ…ŁgĎĘÜľ2ç}ůÖ-Śóň‚ă€0ęÔ Önnč0r$|7oFŇË—2÷#ľ†e)ľ^|LÎC‡öž8ö0uuE›ˇC±ëČÉë~÷÷‡Ë0îÜÍ ·żý†Ł´Wô~‘çž!މ@]N¨8Ó- ď‡Ë—«TŽžµ:Ě› ¸ł{7Ťů{úô…˙¨Ń¸ľőWäçĺÁeĆ čŰÚV¸lĂFŤŕüůd  wöü‰ĂăĆaOß~84z ®oýůůh9z4L›7—zťxL5m-´ž4ˇß˙€?űőĂß |0n,šŹ‰ «VaO_éu űö©Ň9ŢÝó§T«“]n=°Ë­‡¤UFeęLrNZZhňńÇůn-ţě×_îVjš¨ďć†×OźâĹ€”‡xůř1ěşwP[»J÷­«+ŕeä#ţrĂ yčhiá·•+!TSĂ’őëń$&Fj}dT–ýň „jjضjԅ·–ůÇŃŁč=y2ü‘đâDąąHJIÁńsçŕ>m6ďŰ'µ˝–† 3+ ?îÚ…Íű÷#)%陙͢őYŮŮ8sů2>š1gŻ\ÁË7o™ťŤë÷îaŇ’%đÄÄÇĂu̬˙ăÜňYYĺć"&!{Oś@·±c~÷n•ęmĂž=čé‰çÎ!)%9"ÇÄ`Ő¦Mč=yr©@ăÇť;1hĆ ś<É/_B”›‹äWŻ‚Q `ů† ĄöQŃó>Ś>źŽż“€‘ŻÓŇp72k¶nEçŃŁ›PĄóÖ,v­ţ ŔäĄKńߣGČĚÎFÄłgîă ü´kfůúâţăÇČÎÉAÔóçXé燭Vů~‘çž!b A5MËŘđ&*ŞJĺ4ůx0TŐŐqcŰ︾őWÉiĎźăÎîݸµcTÔTŃxŕŔŠ—ýŃ TUyâ®oŮ‚7QŃČËÎFj\îěŢŤ‡µ8(ICOĎÎśĹĂŁG‘›•Ťô„D„űůĚ>ř‡ăIP0ň˛Ą×ŘŐŻŃs¬TyEaşf˝zľp‘ÇŹ#7+ ąrţżQßÍ BťÂéS‹y|ú4Ô45Qż‡[Ĺ?HŞ©BĎĘ -<<ж¨…ËĂăÇřËEDD 3äŐ¦ys,ž6 é™™¶bňóóůůůľb2˛˛°tút87k†ůăÇKŤÉ‘†´°0 ë[8Öă|±f `Á„ ¸éďŹä‹qçđa,óô„šŞ*ľúáD<}*)C¨VŘc'=3ż<ź-BÂůó?wNj}jZf­^Ťńź|‚űÇŽáUh(®ěÝ —˘oǶěßřć×_——-ĽmâĎťCüąsüőW87k†Ěěl™áĽî=zďź~‚šŞ*~řę+< Bâ… 8µe šÚŰăú˝{řßożI¶że6@ `î¸q?xI.ŕöˇCXćé |ż};®Üľ-µźŠž÷J??äĺĺaŢgźáî‘#H AěŮłŘ˙ðł´Äó¤$ř}Ь,ń1ĄedŔÇĎ?yy!ńÂÜ;v }‹ľUúßożaÝΝضj’J¬űóřq镸_äąghPMSÓÔ2łŞTŽeçÂ÷Ă€™ëźÖćN­+\¶é= —]v`aw q+YJWÚóç˙˙ú˘0˝ä:uí=ÇŞ–÷´ÄqËŁQżţ(ČĎÇ“Ŕ ő„üÜ<4ę×ď­eŚ={FęĎ  Ţłm¦OP[‘ÇOŕńéţrQÍŽQÔ“w–‘âˇÄÜqă‚óááŘřçź9f ~Ůł!7o˘{»vřběXąĘܲ?˛sr°ĚÓ 'N”,··¶Ć‰‘_P€•~~řÝßkć6UQ)Ě…’_ľÄ4LřDş«@ ĽxýýşvĹşŻľ’¬kîŕ€ľú ÝĆŤĂíɲžŽ6ŻX¦öö’m;99aă’%č4z4®ŢąSéúÝú×_ČËËĂÂÉ“1eř˙ŹJߥMüîë‹^“&!äćMÉňm˙ŤĽĽ<Ś<+gĎ–,o`c…'">9›÷ďÇ®#GĐľčgeÎűIŃ r &L€ľ®náJˇîÝşÁÔČCfĎF|rr•î-ń1˝JMĹ”áĂ1©¨»‰¶…|çĚÁé‹qéĆ xO™‚Eěl‹­{đäI•ďy4ގńľĄ§AĂŔęş:Č~ý¦ŇĺčXX†Ř_ţ˙íVV˙<`nxýLvë‘×E­JtĚĚĘ,##1Qęßů"Ń˙ŻK*c]Ń˙5uŽU-ďu‰V¨oŁgmóÖ­đ<ü2Jü?š•’‚¸«W`Ó©ômmń&:Zţ‚ ťšŠ”‡8zQ˙2'"˘÷<̨ l]±ťFŹĆŠŤaoc??`늒‡Ç·ů·( =`€ĚőýúaĄź.\»&sý·|ł1}äČRËš5lXřá$- pbÓ¦2_/Ţ6­śQçßćBŃ8źôęUj]«¦M‘xá‚Ô˛KE‚}6x°ĚňFą»cóţý˝u«JçÝÄŢw>ĬիńÍÜą°45•lŰ®eKDľ:x¸K÷3n`m-ůyXź>2ץ–¨÷ŞŢ/ňÜ3µ!((ʆhPÝž 6r@|9ďQo#ÔŇ’k;őJŚÉ ¦© evĄ/o's›¬ěJ­«És¬jyň ŘZ\Ł~ý–.m%3«ČÜ®?\߲µĚőňÎ>CDDôކ•ťšŐĆÂ?y{cÜ×_cäüů€ßV®„U9ßŘ”hZĆéؓ2ľiô–ÎěJOY¦Uô!¬řŕ’ÉŻ^aËţýř÷ęUÄ$$HĆ©ČÍ«ú(áϊαŤŤ|uRÔě¶x+‘âš-Ź‰ŹŻŇyoZ¶ <=ńW@ü‚ŕҢş·k·víŕęě 5µę˝em‹ľÓPW—ş—d­+9hUďyî™ÚpýúuľC*9}cT®¤˙î¨IcŘuëZĄ0C”™ u]]ě89EátuefA]WjZZÉč%ÔÖ’CMŞîs¬É:+E (5 iYöéżţ†‚˘îşDDD 3Ţ!÷®]ajd„¤”ˇOçÎ Räü@”ZFË#r_§+Ç·6OccŃ{ňdăËhRÚĺ|“WŢşęĽ_äągj“Žž!·ěŔwK%clf ˝zĆrmË@ăýő$8MFŁ~ýpďŔ¤ĆĆ•»˝YË–h?wţŰ·_j¬‡ÔŘX7m };[$˙W˝Óž§'ÄC]·ęŮŰ#IĆŔ×őí ·+'LŻŐ}Ž5Yg%Yµk33Ä„„⌗W™Űą­ZŰ.®°jß±ˇ—ů BDD 3޵7")%†úúHJIÁŠŤńÍĽyrż^WK ŻÓŇsć ęééUx˙‚ýl+cÉúőxž”;KK,ź1[·†‘Ô…B¨©ŞBż}űŞ= ji!5=)Ż_ĂÔČčíŰkjâMz:Ň33%cYČz ×•łŮlyĚML°röl¬ś=‘QQ p0.\»†™«V!''SЦz•GN±ľŃ5ˇŞ÷KuÝ35v~úFhëęŞŰhĽź’nßAüµk°hÓn+W!hádľH‘ą­QăĆčş|´ML`Ü´©T~ ĆM›˘ůś[ľĽôĂtűöh7k&žťý7Š .-Ź„›·`ب¸Ë 3Šf1I¸u»F몪ç(PUEA±–•5YgeŐ‘x°Ô˛< €mW4ęןa)<•şvB!7n`ĂźÂŇÔçwí‚™±16îÝ+óA ‹šü?|ö¬ÖÎC<˙Ď?cDż~°ł´„®¶6Ô…BÄTqjR¨_4 Ř3l”Ĺ®hű{ŹË\żhą]%w+÷ť¦zxŕÔ–-Xďí řeĎž2Ă€’ÓÉ(5‹HuS„ű…¨ş Îrňţąôí·Č|ńő6Ŕ mŰĐbÔHčŰŮAUCúú0iÖ ífÍDßź×CŰÄ/""p}ëé÷Ů#G›•…únÝŃeń"čŰŘ@EM ZĆFhňńGč¶l)ômm!¬D8qř0ňsóŕŕîçĎ'CĎÚ ŞĐ·±Ó¤IppwG~n"Ž©ŃzŞě9ćµX¬ďÖ*jjĐ44¬ń:+NC_6ť;C”žŽč‹ËÝ6ćR˛SSaëÚęúúüĺ ""†ďJFV¦űř ???~ý5ě­­±vÁ©éYeÉÍÍ•úwŹ˘Vëwí’ą}ŕĄKp2>7ÖŘądç䬊 €)¶fëVÉĂ{e[¸:N ·çXéąŕoGDŔÄŐ='L(µýöC‡d–·«čCdg'§*ť÷ÄĹ‹áĐŻźĚ™ZÄrĆÉčzŁWÔ…ĺćĄÖ­ŰąłFď;E¸_hPeĄ'$âÔĚYxńŕ4 ĐfęT|ĽsFź>…GŁżßF8 5MMD_¸€€Ů_”43->—ľůůąąhĐ«>ţcĆbŘ_ˇĂÜąęčŕEDnn«x ×Ďž!ÜŻđýłĺ1Ľ{7Fź>…Ź˙؅ƶ( ߸Żk8¸®ě9ľ¸_ř˙R×%K0&(Ăý˙®ń:+®Aď^PUWÇł˙E^vůťć‹D:{*B!öúżDDÄ0ă]YúóĎŚŠÂĐ>}0 {wŔ޽ѿkW<ŠŽĆŇź–Ú^<&‚p0rD"$ľx4t(´55áډ‹#2* 9"â““±őŕAŚóňBdT”dŽšĐ¸~}Ŕ˛ ü겲łvçF/\Ô´4É̇˙ůG|T(42***Řuô(ľŰ¶ I/_"#+ çĂĂ1aŃ"degŁC«V’í'5UUě:rË7lŔădfe!2* >7bÇáĂPSUĹä˘iN+«  ńÉɰhN]¸€W©©ČÍÍĹł¸8ÉőkáŕPęuâe^ëÖáÎÇČÎÉAlBć~ű-®ÜşĂü†Iî"TĄőçĎqbę4ś[ľĎΞEZ|ŹNťBzBňE"äffâĹ¸ć· §fĚDNZĺîŁűýŤŔąó‚ěׯ‘ꛇ¬—/}áN1÷˙ö'őT™s ýá{$Ţş…ܬ,䤥KŤŹQ“u&Ö¨h¶¬'Arm˙čta÷ˇFýűóš  äÔ E¦Nť čÓ¦ úą¸ĽłŇ­ÄľŇÂÂp><îÓ¦ÁŘŔW€iQ3N p† —#ž™‰›6ˇk۶€ţS§â|ŃĄĹË€ż1iÉJ´ÚsrtÄńM›`Plüń±—5KEÖď;y“–,)µŤŤą9‚·mĂŇ_~Áľ“'K÷ŰöQÜŹ;wbńúő2×96h€€_•Ňoď^,\»VöŤ$ŕ» 0ÍĂŁJçý,.năÇ#)EvźmM řŻ_/ą†bOźĆřE‹JmŻ.âŻÄ4Ä&$ őęU‚rŹ©2ë*sżTôzU§Saa(šą`óćÍ2·ůî»ď sëF8ę ľ[ľ‡ţ=ń"˙»°¶¶.w ńý˘ýć š<ŚdĺŐ!ŤˇŻ,\¸BD ˇN´ĚHĎĚÄtŕŰůóĄ‚  pŠÍĺ3f   Ó}|^4XĺŹ^^puv†¶¦&ôutŕҢ…ä5Cz÷ĆĹ?ţŔakauˇ:ZZpnÖ ľ_|ŕmŰJ=V'Źţý±váB8ŘŮA¨¦ssŚýč#ýö¬ÍÍá=e śˇ.VzZĎ9ăĆÁýz|ر# őő!TSCkkĚ7˙üţ{©6¦Ź‰ă~~čץ LęŐšŞ*LŤŚ0ČÍ '7o–dTT}++\Ú˝ó>ű Ž @OGęB!ě,-1fŕ@śßąłTĂúöĹ–+đA“&ĐÔĐ@===tsqÁ‘ ĐŁCčµÂÉ|KŰĘŞíű…¨&°…)*…k™AD5Ź-3¨"äiˇÁ–DDu[f‘"RaQyŘB Ă ""z+DDDD¤Hf‘\h‘˘`ADDr+hlßľť•BDDDDďĂ ""Ş´ŚŚ V˝s 3Hn%g6ńôôdĄŃ;Ç0ä"Ď­DDDDDďĂ ""z+DDDD¤HfQąd‘˘aADDebADDDDŠaIŮ´ot]\XDÄ Ă ’rýŢ=V1Č """"…Ć0¤0Ě "DDDD¤čfԲ˷naś— €Q§N°vsC‡‘#á»y3’^ľ$żzăÎťˇëâ‚›÷ď—YÖ§OˇëâăÎťńâŐ+ąĘű~űv躸ŕżGŹş..ĐuqÁÁÓ§%Ű\ą}cżú Ťúö…aÇŽhЧ<ćÍĂůđp™Çó&- ş..p:°÷Ä ´÷đ€©«+Ú Š]GŽH¶ýÝß.#FŔ¸sg44ßţö x1Č """"*EŤUP{ă3ooäĺĺI–ĺDx‰»‘‘Ř~čÎnßkss|Üł'öź:…GŽŕGG™ĺí9v đIŻ^8.wŮňřăčQĚXµJŞĽ¤”?w'ÎźÇÚ 0ŐĂCę5š€Ě¬,ü€ÉK—JÖE<{†é>>°45ĹÝČH,úé'Éş¨çϱŇφúú2|8o"DDDDD 3ĹJ??äĺĺaŢgźaŇС°45Efv6.^»†ß}‡¨çĎáăç‡ÍË—câ!Řęöź<‰Ő_|! Äňóó±÷äIŔ¤!C0ł(x§l?~<ćŹ/ü3-,LRöă|±f `Á„ űŃG°63C|r2ś>ŤŐ[¶ŕ«~@ŹĐÄŢ^ň:ˇZáí•–‘??üäĺ…QŕĹ«Włf N_Ľ˙ýö"ž=öU«0ČÍ ÉĹÖýyü8Ă “ť‰¸¨‡¬%٧o˝zĆ 2aUŻ'±±’€@_W .½[7aČěŮONtiÓŽ ŕţ“'8rć Fôë'UÖ™+W›€čääTˇ˛ßfËţýČÎÉÁ2OO,ś8Q˛ÜŢÚ 'ND~AVúůáw¬™;W˛^ ^Ą¦bĘđáTÔÝDŰÂľsćŕôĹ‹¸t㼧L‘śŹm±už<áM˘^&ÇáäţźYJhÄäer 2HŮpĚŚZ$nĹ0kőj†ôę…=ÇŹcב#Xęé ř;(™ŮŮ8d¤KIEË.OZf¦\ەגB[SłRë¨vŮŮٱę(DDDD¤ĚfÔ2s¬ś=+gĎFdT‚CBŕŚ ×®aćŞUČÉÉÁ”#$ŰO2{ŽÇîăDZdút) µď IDATě>z@áŔźU)»,şZZxť–†3gPOOŹŤHÉ1Č """"eÇ13ť¦zxŕÔ–-Xďí řeĎ©m:¶nŤMHŔ…k×p˙Ʉܼ —-ĐşŚ)[ĺ-», ‹ĆNxřě/‘’cADDDDuĂŚZ4qńb8ô뇫wî”Z'3®ÄŕťŔ˙·Ŕ8xú4v :yذj)[,77WňsŹöíëwí’ąmŕĄKp2>7ň˘)0DDDDTW0̨EONĆ„E‹pęÂĽJMEnn.žĹĹaéĎ…Sa¶pp(ő:wwhkjâPp0ţó7^&Ç`ADDDDu [f(ˇÜÜ\¬ÚĽđůđá¬"’‰AŐUl™ˇDrD"†DDDD¤°Ţu!Ć@H±1Ě """"…T[A† ""ĹĹ0*E @ Č˝^üďšřSVůŞŞŞ077ÇđáĂqýúő ź[É?Bˇ666řôÓOqďŢ=ą^#ëXKşző*FŹŤ¦M›BOO¨_ż>>ýôSÜąs§RűyŰő!"bÁ@aŃ[”úSŢşŠl[Ö~^ż~Ťť;wâĘ•+čر#.\¸PĄcNLLÄúőëńĎ?˙ŔĹĹWŻ^•ë<Ë:V8vě:w/bÝşuHLLDll,ľůćś˙üsĽzőŞF÷Ą«« ???ŔáÇYůDT§Ô• CŚch˝{ 3ädii‰ďľű±±±={v•ËűňË/áęꊨ¨¨r·{óć +źęŚşd1Đ "z·ŘÍ„¨&Mš„Ý»wc×®]:t(>ţřăJ—•ššŠK—.aÆ řöŰoK­ß»w/€ę›……¨¶2 MçţČÉÎB\ÔCą^ݧo˝zĆďôĺ=6hܲ^$Ćŕer»śŐ0†DD °uëV´jŐ S§NE—.]`l\ąÖľľľ¸|ů2Ö®]‹üü|Ěś9VVVHLLÄÁ±hŃ"ššbÝşu¬x"RzQQQRA\>ë_ˇ2„ęš7űďě©đ1‹íŰ·cĆŚĽŞ»™U–/_Ž„„xzzJ­ĺţ)ÎČČ˙ţű/V®\‰sçΡU«VĐÔÔDłfͰ}űvĚž=wîÜAłfÍXéD¤ô´µµˇ©©YĄ2D9Yďôłł3«ĺĽ‰¨ú±eUĘŰf ‘gV‘ŠĚ… 3H)%''ăĹ‹’Büo@±Ă‰ŠĘĘĘ’:yέqăĆ[[[hkkĂĆĆFňw]<f‘ÂČČČ@tt4222[ĄrMŐŐ%?««ŞÂP(”ZoVĆtŇę**0ŇШ–sKÉÎFN~~©ĺ˘ü|ĽĚÉ‘Z–ő˙ÓS‹ đJ$*·lqŕQVđamm ŘÚÚJZw(SĐÁ0Â-imŤ¬bńoŁ­Ş UU©pÂP]B•Âą/,´´ę|Ë Elĺ ˛óň$ÁGzn.ŇssüřńJ$‚¨  Ôëbcc‹›7oJ-wiiÚ´©$ŕPÄi¦ëd‘sss››#>>žď XgĽFDDDDDď·¨¨(DDD ::HII‘ëu¦ęę’°BGM :jj0TW‡†Şę{YŹŞŞr…4âV âŔăĄH„ôÜÜR-<Ä]ZJ¶čhܸ1lmmakk‹&MšHĆě¨-5fÄĆĆÂĆĆFć:###tďŢË–-CëÖ­«ĽŻ””ś;wÜąsŕääÄw9•Ug%붦÷Gdgg--­R2‘r{đŕ"""đŕÁ·Žů PO(„ˇş:t…BÔ ®U…˛)ŻHšH„´˘`ăeNŇD"$•čćR2ŕ022’Mš4yç­7j,̸|ů2`Ŕ€8vědąH$Âőë×1iŇ$¸¸¸ŕرcčŰ·oĄ÷“——‡îÝ»ŁWŻ^’îž={˘@F3*›¬:“U·5ą?˘7n ::óćÍ{ë¶ Ô˛’÷ÔŐ«W±nÝ:„‡‡#..999°°°@×®]ńő×_ŁeË–2ËÎÍÍĹŽ;°wď^ÜĽyŻ^˝B˝zőŕěěŚŃŁGăÓO?…j‰ä_|<§Nť’ůž&^/ď}źźźŹŤ7b۶m¸˙>TUUáŕŕ€ńăÇcĆŚPSc/A"""Rlâ–Ąş2” .Ě45a(ÂLSşjjĐ-1~Ő,]ˇş2Ł”ělĽĚÉAzn.˛˛¤Ž””¤¤¤H®­‘‘š4i‚¦M›˘uëÖ5>öFŤ‡:tľQ…B´oß›7o†««+ĽĽĽŞflܸwîÜ——ďŔjĆşĄwíčŃŁ€ľuŰ⡀¬`ăرcřä“O`ee???ôčŃééé ÄĚ™3ń÷ßăěŮłhßľ˝Ôë1hĐ Üľ}‹/ĆÖ­[aee…¸¸8ěÜąS§NŦM›pôčQ™M뼽˝Ń§O™ÇT‹/Ćš5k0nÜ8ś:u ŞŞŞXľ|9ćĚ™»wďbË–-ĽaH!ŚܸqŁĚn#ő„BihŔP]ćšš .‘†F©)ŮŮH, 9˛˛‘—' 7BCC  p€ŃÎť;ĂÉÉ©Fş¤¨ÔtQňAALÜ˝ŕîÝ»ĄÖť>ź|ň ôőőaii‰5kÖâââ0lŘ0ŔĆĆß~űm©2ž>}ŠiÓ¦ˇAPWW‡ˇˇ!ÜÝÝqéŇ%™ű“÷ś‹×Yyu+ďţ  «« +++¤§§ăóĎ?‡‘‘śťťúQí‡őęŐC×®]«\–——rss±uëV¸»»CKK &&&5jÖŻ_ŹĚĚL,]şTę5ůůů>|8®^˝ŠĂ‡ĂŰŰöööPWW‡˝˝=–.]Š"44#FŚ(ŐĘB(âÖ­[8xđ`•ŹçÎť€ďż˙fff066ĆęŐ«»wďćÍBDDD #99űöí——|}}ńĎ?˙HÚŞŞh˘«‹®&&jcw++¸Ł‘ž % 8őőŃÉÄmlđ‘•:ÁZK Âb_čĹĆĆâŔX´h|||„ôôtĹ3ňňň@Pf‘XXFFRËwďŢŤ ==WŻ^ERRşté‚/żü‹-’lwíÚ5ÉT<ĆĆĆ(((@LL ž?ŽçĎźĂČČ 6DGGăÍ›7°¶¶ĆňĺËńÓO?áúőëPUUĹâĹ‹qáÂIąÄŔagg‡ű÷ďăćÍ›‹‹§§'ÂÂÂ`bb‡ ׉řlll°|ůrüř㏸˙>ŚŤŤáííŤĂ‡Ă××ß˙=îÝ»|ýő×8{ö¬¤Ś+W® uëÖ Ĺźţ‰WŻ^áĚ™3ŽŽ†››.^ĽXjňśsÉ:+«n+˛˙¨¨(¤§§ĂÁÁăĆŤCź>}‡óçĎ+ě5˘Ę‹ŤŤ…@ ŔĉK­›5k–T8VÜçź@€ŘŘXÄÇÇ#,, }űö­–.âţ|]şt)µnČ!Ř´i–,Y"µÜßßçÎť‡‡z÷î-łÜbčС8sćŚT:PQQÁ¤I“°dÉä%Ô•őňĺKI™bâß1[[[ŢtDDDTë.]ş„µk×bѢEĄ k--t02ÂGVVlcccŘę輷tÖeşB!é须™†Ű١ź…ščęB»ŘµóćÍĂďż˙Žű÷ď+fq÷î]¤§§ŁqăĆ044”ąÍŢ˝{Ž©Qňa˘^˝zظq# §§‡eË–víÚ%µíŐ«Wm۶•, /µL܇'66kÖ¬ŤŤ 6lAř˙V$™™™9s&ĚḚ̌}űvÔŻ_ćććŘĽy3ţüóO M›6•Ş“âÇŕëë‹úőëĂĘĘ ü1ŔÓÓ+W®”,›¸ĹCff&†Ž‚‚śšššĐŐŐUŘkD•gmm ggg–ZCCĂ2×9;;ĂÚÚÇŽCAA\]LäaooR­Ä´´´0uęT¸şşJ-·x=ztąeŹ3Fj{±śś,Y˛Ďž=Ăďż˙^Ąăo×®ťä÷S$aÆ 6l´µµ±aĂŢtDDDT+ŇÓÓqôčQ|ńĹرc‡Ô€âc¨Ť ş›™±ĺĹ{ĘHC.ĆĆlc~h©ŻŹzĹîĐĐP¬[·^^^2{ÔjQV“‚‚DEEá»ďľĂŠ+ŕää$i6-vđŕA¤¤¤ U«V’efff€ÔÔT©mĂÂÂ...Ą–É 8>űě3©pEܧ]<źżż?0bÄhkkK¶300@ăĆŤK•[âc;v,ŚŤŤĄŢ €ÂoŠ‹·RÉĚĚ,Ľ@EßĘîÚµ QQQ7n,--ĄĘźż¬€âmç\Vť•¬ŰĘîżm۶ř裏d^7E»FT5DLLŚTĘ…`Ú´i¸˙>˘ŁŁ%ëçh«Ş˘Ąľ>>˛˛’l}AĹŤV††p·˛B? 4ĐŃ‘tEIIIÁŽ;*jÔHqĺĘŔü@ ůŁ˘˘‚úőëăĚ™3X»v-._ľ, *Ę’źźŹ§OꀤKBya†řˇXVŔŃ˝{w©×?yňĐ´iS…ł@·nÝJ‡şşz•”ĹÇĐŁG©ĺâ$łärńâc;tč€Ň-Y@Łh@–ěěě źóŰęLĽ¬˘ű—9~üř2E»FTő0€T ŚŔŔ@…BĚ™3jjjĄÖ‰ď©¬¬,ˇS§NRa_UŚ7GŽĄĄ%¦L™SSS¸¸¸ŔŰŰ7nÜůq÷·· P$~ßJHHąţ믿ƛ7o°qăĆJ{TT”dĚŚ[·naĐ Axúô)öîÝ dggKZ?Ő´   R!†©ş:şŤ™ĐĘĐ-0H®`Ł“‰ >˛¶F##I7q¨áăăSˇî'5Ú2ăęŐ«ČĎĎG^^rssńĹ_(ě'?cĆ ÉĂgqÇŽĂŔaeeˇPˇPGGGj­QüXü-iYĘâe%gVą~ý:HĄĽvíČś®ńůóçĄĘ­ń1těŘQć1”ulâ.âckÝşu©˛Ĺă[é¬Č9ż-\×me÷/ëdE˝FT5íÚµąąy©Ŕ˘cÇŽ033CűöíĄÖH–###ŁÚş 4×®]ĂDZqăF4lŘëׯ‡łł3†^޵—¸%ÔۦOÍĎĎ€RÓłŠcţüůXłf ŢĽySˇcމ‰««+.\¸€+V@UUŹ?–'ĂßߣFŤâMGDDD5*99k׮ŤBŚžffčmi Űžz“ę& UU4ŇÓĂ`©P#66ëÖ­Ăľ}űj'ĚHKKĂÝ»wˇ©©‰Ö­[KZd¨ŞŞbŢĽyPSSĂŞU«dľvóćÍ4hâăăqđŕAĽ~ýyyyřꫯJ=H?}úÉÉɰ°°‹ŹŹ/·śśś ,ŃÔČÎÎ#FŚŔµk×pâĉRŻ]»v-€Â) ;wî,!$$¤T!kÉňĆ~(Ůő@üúâEŠg(Ůb䯿ţ’YFEꤼcgąé®bÇŹP8îFEĎąĽń+*»YűQôkDŐcŕŔHMMEhh(®_żŽäädôęŐKf$''ăúőë AjjޤŰұcÇĐ A´hѢJaŠ}$aĹ’%KĐ˝{wL›6  Äůóçńé§źň†#""˘qńâE¬[·NŇى®.ú3Ä ÔĘĐý--aZôś oooÉ—ęď$Ěw1)Ů]@lÁ‚€•+W–Z'î&Đ Aɲ7nH¦ĺüŕ$ËĹýĹe˛ş/Čó ,žÎóŃŁG’e’ĄŞV%Ě»]|ŞV pJÓ}űöˇeË–TŤo@şşčŢ˝;ţůç {÷î’ß_ˇPnÝş!((gĎžE·nÝ §§'w###$''ăŔRËĹ-·zöě)Y–ššŠK—.•9…©¸ËF×®]Ą–÷ďß˝{÷ĆńăÇ%­}J:}ú4ţúë/|ňÉ'pss+÷544°lŮ2lÚ´Iî:Ź#>/044ÄńăÇńřńc<ţĽT÷"""˘ęžž.é"+Đłhv˘wÉĹŘXhÄĆĆb˙ţýď6Ě(9-kqââ%[glŢĽýű÷DzeËФI„‡‡cĘ”)X˝z5š5k†ž={ââĹ‹ żmŢĽ9vîÜ)™ ¤ĽoýK>XËZîéé ___ś? 6Ä‚ ŕĺĺ…gĎžÉ<§[·n•Đł2a†<ÇfooŹ‹/˘qăĆřđĂa``€#F k×®¸yó&š5kV©reŐ™¬ş­Žýżëk$ďőˇę5pŕ@„‡‡#44´Ô°˝zőÂĹ‹qőęUI“ŁGŹBWW÷­ÁŔĘ•+aff†U«Vaßľ}HKKCHHfĎž CCC©÷___8;;cíÚµX¸p!ž={‘H„ŘŘXüôÓOX°`LMM±nÝ:©}ěÝ»®®®5j–-[†'Ož ''Oź>…ŻŻ/Ś?ü;vě«>ĆŹ;;;äääȵýŞU« ¦¦†É“'#((iii¸˙>ćĚ™‘HKKKLź>ńńń\”¨<ÁÁÁ’®%m Ů­„j5а.ş˙BCCe¶¸ô$Qä IDATĽmŘţ÷ÜłgĎ`ooŹ6mÚH®Ĺüüü››‹Yłf±˘đńúÔŽÇŹŁQŁF Ą’ÝĂÄÝ—""" ŁŁ <˙ý÷[ËŽŤŤĹęŐ«qęÔ)ÄÄÄ ^˝zčŐ«|||$űKMMĹĎ?˙ŚĂ‡ăţýűHKKŽŽ5j„ţýűcÎś9eN ťźźŹ;v`Ďž=¸qă^ż~ CCC´iÓăĆŤĂČ‘#KŤ!ţ·¬·Ô}űöaäČ‘e®/éňĺËđőőEHH^˝zSSS¸ąąaÁ‚’0# ęęęxőęoşwhęÔ©§NýĐÖŐýťě3öYNř@áŔ˛âľŢ'GŽ‘ŚŃ5iÁúw¶ßđ‹'p#¤pZřÍ›7óBPťçĺĺ…””Ş«Łw9ť˝ Ůyyř+&ĐŁGÉçi15VQˇ & gĎžEăĆŤ%Ë·lŮĄŰËĚĚÄ/żü‚€€Vž^#^źÚÓ°aĂ2Ř[µjUjťxšSyX[[—Ůu¤$ńśŢŢŢ>L0&Lű5ĺ…đđđ»¬:ŕČ‘#e®?tčo4"""ŞVéééHIINąJ ACU¦ęęHĘÉAtttéĎ쬢B‹‹ĂŚ3Ť””lٲ?üđ:wî OOO©í===Ń«W/©©DIq®Ż‘üŠ?,Ö“sŕr˘šf®© ŚŚ,µŽ-3ŠřúúÂÄÄ;wîDóćÍ‘››‹† ÂËË _~ů%444¤¶˙ý÷ßYi |Ťx}*ç•HÄń2H!$Ťá" ĂŚ"***?>ćĎźĎĘŕ5"""""zo=NM…Łľ>+‚jUšH„¤rŃg7""""""’x•›‹űś5ŤjYč‹ĺ®gADDDDDDR®˝|‰G©©¬Ş!IIHĚÎ.wv3!"""""˘R.ÍnŇHOŹ•AďDv^®Ą¤ŕIFĆ[·U–qqq066VČ MLL„@ €……ﮨÖ/‘â°ŠŽJn€Â@ă\b"˛óňX1TŁR˛ł/ 2432`’Pćö f„‡‡Ú¶m«•zç΀“““rŢ))8tčPµ˝ľŞőQÝĺý{÷—eő˙qüuł÷Ţ (‚ŠÍ™ĺJMëgšmWe¦f¦e*V–_WnMMËUŽ4m¸3sĄ™g‚‘˝do~ÜÜ·Ür ŕçůx|ß<\ç\ç:׍r˝9ç\B!ŁŚtĽ®^EżhšDf&Ű## IK“Á\v~>gŮCr^IÉx_E·ŚMÂŚrčŇĄ ………ěÝ»·Ć}0ňóóéÜą3‡~`ő«2ş=ńä4h …‚%K–”z̲eËP( 4HŁ\ˇP P(řâ‹/î{žĹ‹«Źż·ľB!Dme’™IĂ˙®`‘” @na!˙ŢľÍďjb\LNf{d$ÁĹögq ŹŔ3$˝‚‚2ëW«0Ăßß_îč¶lŮ2.]şDëÖ­Ký‡Ýžx˛?Ű 4ŕ“O>áÂ… %ľÄÇŚ——Ë–-ÓÚĆĘ•+ÉÍÍ-ő………,]şT[!„O$˝‚>žŽ;2aÂ4ŽŤŠŠ"::[[[ęÖ­[áúaaa¤§§ăĺĺĹ AčŃŁQQQlÚ´‰””ÜÜÜ:u* .$(([[[&OžĚďż˙ÎôéÓ™7oW®\ÁŇŇ’‰'rčĐ!uŰŃŃŃDGGccc§§§ň&„‡“’’‚««+S§NeѢE˘««Ë”)S8zô¨şţÖ­[éÓ§îîîqţüy˘˘˘9r$§OźĆÎÎ//Ż ŤďäÉ“yçťwđőő%88[·nŃ A&Nś¨ž6öěY"##°µµĄ°°rŹmiőµŤGUúŁ­˝ăÇŹăďďONNGŽ!22’6mÚ0iŇ$>űěł Ť¨Ý|}}Őßż~řˇşüłĎ>ăěٳ̜9łÔ_řůůńÍ7ß”Úţ7ß|CóćÍ100ÁB!ÄĎ61±D¨‘[XČÍŚ öĆİ;2’3‰‰lŤăx| žb~áÂĚË/żŚ‘‘‘zĆCdd$Ó§OÇĂĂ^|ńEFŽÉ´iÓÔĺ}űöŕźţ)łçĎźW·;sćLÜÜÜđôôT×?qâ™™™Ś=Ö®]‹‡‡ŽŽŽ¬X±‚M›6QXXźź_…ÇW5í}úôéŘÚÚbooĎĽy󰲲R÷ŕÔ©SU[mőµŤGUúso{ąąąĽőÖ[ŘÚÚ˛aĂ4h€ŤŤ łfÍÂÜÜśăÇŹWřĽ˘v5jýúőăűďżgëÖ­9r„9sćĐłgOĆŤWj˝ÜÜ\ Ŕ±cÇ8wî\‰ŻßĽy“Ý»w3pŕ@233e …B!î 5<±N¸­.OÎË#85•˝11üÁ™ÄDbĺç¨'Jv~~‰ăfF†z†N^>v±±ř\ĽTéCEďa\ŔÖ­[K”988ZlcŹâłĹ{Z‘ú÷>żđ %ĘUÇ*ééĘ{饗°±±Q—«Xttîf<§Oź.ő|đŕÁX[[«ËUަ¤˙úëŻÄĆĆňÁ`bb˘>ÎŇŇoooÎť;W©MOÍĚĚHIIářńăôěŮ€úőë“””¤qśŞď•[mőµŤGUúso{7näĆŤ`dd¤>ÎĹĹ…”””JŤ¨ýV­ZĹŮłgy÷Ýw177ÇŢŢžuëÖ•ąIg^^oĽń,]ş”ďľűNăëŞ}6Ţzë-¦L™",„Bq‹´t,ŇŇq 'ŃÎŽD[˛Šž{Ňóó NM%85}…G## q31ÁL__݉ÍĚ$.;›đôtőŰHJ|V’’±LNĆ61ńť÷‘lZPP@hh(€z)¶ ˘2ő‹·1dČ­ĎĎ>ű¬Fůµk×´–«fr4lذ̰EŐnçÎť5ę߼ySŁľęíO?ýt‰>«¦­W&Ě?~<}űöĺ­·ŢâČ‘#ZŹÓTdlµŐ×6UéĎ˝í©^ŮÚ©S§6˘öł¶¶fÍš5$''ÎüůóŐ!]YÜÝÝyöŮgٸqŁF–™™ÉęŐ«éŇĄ îîî2ŔB!„eĐ+(Ŕ!.ŽFW‚đąx —đŚŠďgVXHDf&g““ŮĹĎaa‰‹ăbr˛ĚܨaT3/.&'ł?&†Ť·nńW\ďÜŃ2tňň±N¸MÝë7hxĎdŔCš™±sçNľýö[Ξ=K||<ęß6oŢ\kQü·"ő‹·ŃµkW­ĺm۶Ő( ŕ©§žŇZ^|釶ţ©ĘJ«ß˛eK@ąO@Ó¦MKô9::úľACiĆŤ‡‡‡_}őëׯgýúő<óĚ3lŢĽYăNsHEĆV[}măQ•ţÜŰžęFŤ=°qOŐ¬#Ö¬YĂkŻ˝V®×§2„°zőju@¶qăF:t¨ l-”žžNDDDŤě{DDDĄ^ lll,ÁśBGÂ0'‡¸8ââČÓŃ!ÍÜś;VV¤™›‘kh¨nD 2¬ôôp46ĆÚŔS]]ŤŤe0«Ap‘ś“C\v6iąąÄeg“^ĆŰkŚ220KMĹ"9ąJËG[±bĹ FŚA«V­Řşu+ľľľŔŚ3hѢ…úXŐćźöööę˛*RżxÎÎθ¸¸h„ŃŃŃ888¨ßšQüxGGG\]]K”ŰÚÚâááˇŃ†ťť]‰˛{ëGFF‹‹‹ NNNęuęÔŃčs\\áááíVT˙ţýéßż?GŹeěر:taƱsçNőąprrR_EĆV[}măQ•ţhk/&&@=†UńdرcË–-ăí·ßĆÎÎŽŮłgłxńbŤMAËú Ť5ŠĺË—3nÜ8 K—.ĹŇŇ’~ýúÉŕÖBË—/WĎĐ«i~ţůçJ× @C!Ä#ĄWP€Őť;XÝąŁ|860ŕŽ•™ĆĆá(÷ŰHľgŮ»©®.ÖX`ĄŻŹ©ž6ĹęZ¤ç瓞—GlVI99ę}.î^Ą¦a–šŠ^AÁŁý|=čçÎť Ŕ?ü@ăĆŤŐ媍‹?0k[bR‘úĹ۸w#Mm{;”vÎŇʵµqżv‹÷#ż(µş÷MŞWAVf‰É˝:věČöíŰquue˙ţýllË;U鏶ö,,,H¬ÄôŁŇÎ+jżččh† F˝zőX°`ěÜą“‰'ŇŁG|||ʬobbÂË/żĚęŐ«Ů»w/–––ňŢ{ďa,ż¨•Ä}µup}dýuőh€ľą9Y•nĂČČHcď&!„âqPÍÚPQÍÜČ41&ŐĚŚ ŤăÓóóIżg€ľBˇžÁa¦ŻŹ•ľ>::X`X´ˇĐ”–›Kz^ž:°HËÍ%=?_ăí"eŃĎÎĆ8#ăĚ LSSÉĚ‹Gf¨–OÔ«WO]vîÜ9őëJ›5kVćnEęW4ś¨hyYKLĘfxyyqéŇ%nܸˇ^6‘‘‘ˇ*f|úé§l۶Ť;v¨Ň Š0GGGőqŞ7Ľ(*2¶ÚękŹŞôG[{Íš5ăđáĂܸqC¸;vŚđâ‹/ňí·ßV輢v+,,dĐ A$&&ňË/ż`nnŔşuëh۶-o˝őÇŹG˙>›L 2„Ő«Włyófő®,1©˝† ÂĽyóÔŻ‹¶¶sá©g_ŞPadlúHűÉő˙NŞŚńăÇcgg'7_!DµR|ć†sQY¶Ůú¤››“ilBގzSQ•ÜÂÂ2ÂMuu1ŐÓĂ@Gë˘_,« V…ŞĐM›•U˘¬ĽLRR0ĚÉĹ83ŁŚtL22ů¬‹Çftîܙݻwł|ůrŢ˙}ţţűoBCCŃÓÓ#77WkPüa¶"őK tqż˛Ňú1bÄFŹM@@+V¬ >>žyóćaggGHHĆuwďŢť+W®Üwwaa!7nÜ࣏>bíÚµčëëóé§źđŃG•8>66–´´4ĚĚĚ*<¶÷Ö×víU鏶ö&NśČáÇ™6mË–-#44”aÆ‘¨ń¦šňž·Ľă*j¦ąsç˛˙~&L ±il«V­}:ëׯ'<< Ľ˝˝>|8 RwóćMúôéõk×hѢ§NťŞĐŘj«ŻíÚ«ŇźŇĆr×®]Lś8‘«WŻbaaA›6mřěłĎ46r-ďyË;®˘ć9{ö,íÚµŁQŁFś:uŞÄr®ÜÜ\Ú´iĂĹ‹9zô¨ĆçGµbńżţ¦M›ĆçźŔ×_Í'ź|RęńÚꋚ'==]c††Wă6Ő>Đ(OqôŹŤdZgűöíěÚµ €·?^üČÎ{ćŘnÎWľťnĹŠr#D­Ä‚ đ ®ËĘ+ĂŘ<]˛LLÉ×Ő!ÇŔle qďŇ•'…IJŠň˙33ŃÍĎÇ8#ťüĽXD;;[´7ć˝?đ0C”íÖ­[Ô­[???őĚPnH———Ç| ôɸÖއP???BCC9uę”Ö·\ĽxÜÝÝ9w¦Ą†aaaÔ«W…BADD„Ć&´Ą…Ą‘żV%Đ C 3„0Łz€:ô4‚€=˝ËZŞK0ˇbž–¦ţoěôs”ł9Şër‡fčÉ·íĂ1tčPöíŰǡC‡đööV—Ż\ą€7߼űĂrff&ß|ó űöí“{€d\k/SSS‚ď{\łfÍČÖ2UO[Řŕîî®Ţ´÷~ÇKXQ»>KĹ—ś¨–gÔ´@C‚ !„â>@±ĺ&• jRĚĘż_V®!9†ĘYæ÷ĽˇĄ,5mÖÄă&aĆCâääDTTŁFŤbŐŞUšš˛uëVćĎźOűöí9r¤úŘ‘#GŇ­[7ŤW˝ŠŞ“qB”GM4$ČB!ľŠ é2`Ź€„ÉôéÓ±łłSż5//OOO&MšÄ„ 0,¶QÍš5kdŔW!DyŐÔ@C‚ !„B<©$ĚxHttt?~<ăÇŹ—ÁB ¦d!„â‰~ć–!B!”T†jyÚő˙Nrx÷új×O 2„Bń¤“0C!„(¦şd!„BH!„B”P] 2„B!”$ĚB!´¨n†B!„wIQAqqq( śśśŞE˘˘˘P(ŘÚÚĘ8 !ÄV] 2„B!4IQA—.]Ŕ××WŁ<11‘ß~űí‘÷çĚ™3´jŐŞFŚ“BÔ4Ź;Đ C!„˘$ 3*¨K—.˛wď^uY~~>ť;wćđáĂŹĽ?Ő5ĚĐ6NBQS=®@C‚ !„Bí$Ěx–-[ĆĄK—hÝşő#?·*Ěđ÷÷—!„ŃŁ4$ČB!„(ÝC 3®\ąÂkŻ˝†““ĆĆĆ4jÔYłf‘——§qÜž={xîąç°¶¶ĆČČ&Mš0gÎrss5ŽËĚĚDˇP`mmMdd$/żü2–––XZZ2f̲˛˛JôˇĽmW¤ż...( BBBđóóCˇP0fĚŢxă Q(4hĐ Ä9’’’°°°ŔĘĘŠäää_ŻĚ5j›™Q‘ë¶´´ÄĘĘŠú÷Ą%ÎÎÎLź>˝JǧĘÔß˝{7m۶ĹŘŘ'''ĆŤGnn.mÚ´AˇPpýúuůîBÔÚ@C‚ !„BÇf?~rrr8rä‘‘‘´iÓ†I“&ńŮgź©ŹŰ°aĎ?˙<éééś:uŠřřx:věČ„ ĐhóćÍ›ęęf͚ŗ_~IXX/żü2K–,)ń@\‘¶ËŰßččh˘ŁŁ±±±ÁÓÓ“łgĎ €­­-………„……ˇ««ËÍ›7K!K—.%55•±cÇbeeUbÜ*zŤQQQDGGckkKÝşu+|Ýááᤤ¤ŕęęĘÔ©SY´hčęę2eĘŽ=Z©cď§ŠÖßşu+}úôÁÝÝť   Îź?OTT#GŽäôéÓŘŮŮáĺĺ%ß˝BZhH!„BńÂŚÜÜ\Ţzë-lmmٰa 4ŔĆƆYłfannÎńăÇŐÇţúëŻXYY±lŮ2ĽĽĽ077ç‹/ľŕÇÔh÷ęŐ«čëë3{öl7nŚĄĄĄú}Ó¦MÇ—·íŠôWŰ,S§Ni”âééI^^ˇˇˇęă233YĽx1–––Ś;VëŘUôµő§"czţüy"##™9s&nnnxzzŇ·o_Nś8Q©cµő«Ľő333=z4¬]»Y±b›6m˘°°???ůÎBÔĘ@C‚ !„BÇflܸ‘7n0hĐ ŚŚŚÔĺ...¤¤¤pčĐ!uŮÖ­[ILL¤yóćę2RSS5Ú˝páď˝÷&&&ęr777ÂÂÂ4Ž/oŰéďéÓ§K<¤«ĘŠďYѨQ#®]»¦.[µjńńń|řá‡ZgeTćµí—Q‘1UŐ}pqqA__}}}u˝â3 Š? 7iŇDŁ<<<¸;{ˇ˘mW¤że=¤“É˝aĆO?ýDhhh™ł2*sŤÚúS™1}ę©§4Ęhٲe•Ž-ŢŻňÖ?{ö,M›6-1>ŃŃŃ%ÚBšhH!„BQ ÂŚśśśĘnĹŠôęŐ‹/ľř‚ pć̆Ό3đńńˇK—.;vLăˇwÁ‚řűűăďďO˝zőřďż˙Řşu+oľů¦F*Ňvyű«-<2dŤ7ć‡~ŕŮgź-f€r˸qăî;n˝FmAeĆ´xpż€˘"Çj›™Qžú#GŽdúôéüý÷ßxzzňńÇ3iŇ$nÝş@›6m4Ú¸páB‰ŤM«ú5!„xX†B!„†˘°°°°:w0&&gggŤŤ­1۵kW8Ŕ¬YłÔpÖ¶k|TnÝşEÝşuńóóS BQ¤§§3oŢ<"##5Ę%Ȣb¶oßή]»hŮľ×#;otŘ5b"®Ę_ Q›±`Á<±HK—AŹ]´ł±..Z˙Ö«îť/móČę쯿ţâŔ4oŢĽ\ł2jâ5> C‡eßľ}:tooouůĘ•+JĚNBęN5CŁx !A†§ÚÔ đź=2 B!ü2“­¬˝*Ş›ÜÜ\Nź>Í!C°¶¶f۶mčëëתk|śśśŠŠbÔ¨Q„‡‡“ČĘ•+™?>íŰ·gäČ‘ň+„¨qT†———BT’ŻŻ/FFFŹíü÷.'BńřUű™_s…» IDATĄí·PůřřAçÎťY¶lőëׯu×ř0Mź>;;;~řá7nL^^žžžLš4‰ &`hh(ß±BÉÔÔ”O>ůDBJ˛łłcѢE2B!ÔŞýžB!„B!.Ů3CTGe홡#Ă#„B!„BšD !„B!„BÔ(f!„B!„˘F‘0C!„B!„5Š„B!„B!„¨Q$ĚB!„B!DŤ˘'C „Bńŕ„……qőęU®^˝JFF† x˘řúúŇ­[7ěرc?~śâîî."f!„B<AAAěÜą“k×®É`'Öµk×$Ěx¶lŮBVVóćÍcüřńh„B!ĘFff¦ „¨6lřŘνcÇvîÜ©Qfîŕ!7E<1˛Ó’Éɸ#ńdee©˙_ !”$ĚBˇŐöíŰٵk— „¨1\]]ůüóĎůyúé'<€®ľ!N>ípöi‹ľ‘‰ÜńÄ <@ä…C2Ź€B(É B!´ –A5Jddä#?g`` :Č0±v¤yß‘¸·ě"A†⡰¶wîaaa2(â‰%33„B”I?% ë Ó2˘ÚJw­Oş«çc9÷–-[匌Ć=†J!„x¨<ĽšÓÄďŽţ±Qfh'ž„B!ʤČÍĆ89^BT[Y6ŽŹĺĽÇŽ#11Qů€Ńş—BG˘ał¶h'ž,3B!„¨„sçÎ``b‰Ł·ź â‘iج-ź{%'âÉ%a†B!D%\˝zk÷F2BGN ń¤“e&B!„• zU˘ž± F‘ä„XömYÉ•3GąAFęL-¬đhŘ‚./ Á·C÷uŇR’¸zî_üžî%(DÉ’ń$“™B!„˘Ę"n\áłAĎręŔvúůŻ>É’=A ˙|)1a×Y4á-ţÜňťFť‚ü|fŤęGpŕq@!*Ifh'•„B!„˘Ę6, íN"Ă&-ŔŻSOŚLL165ŁI›Î ˙b)ÖöÎDÝş¦QçŔŻk‰ ˘žŹŻ  U †xÉ2!„BQe7.ťŔłqÉÍP˝šú3˙·@őź§íέ«Ő^ńĺH¶,›ĆüßIg×ú%\:q¤ř ŤM¨ßÄźľCĆâݬµşNaa!ďwŻŹ±©9ł~:ÎĆEźqćĐ.lťÜřrí~.˙‹?·|GČ•@r˛łppő cŻWčţĘpôôô5úwář~~_3źđk˙alfNŰî/ńňČ)ĚŃ—›WÎ1kóqÝę‘–ÂîőßpúĐNnÇFbhd‚wóÖô4–úMd#Xńx %'B !„B!ĘÍŘĚśÜÄ,BĎÓĐ·]™ÇN]ó'Iń1Śű?_Ě,mX˛ű?Bţ dîŘŘ9»3bę·¸yůÂĘŻF3{ôK|şdŢÍŰp;6‚ěĚ <4ç»iđT·yóŁéäççsüŹm|7m4^ÍZóů÷{±°¶góŇ/ٲl©É·8ęsu_NÜÁňφă˙lŢ˙júFl\8…çN$4č<ćV¶ę #.ňs>@NV&?ť‹Ź_b®łâËQĚů"/ÜLŁ–íĺĂP Ě›7Oýşc!†÷#ËL„B!D•uů}:_VÎ""$¨Ěăo)_m[·asr˛3Y6ĺ] 7o#ő›¶ÂĐČw寮6ć+ňórůmő\uýëWżńz ¤u—Đ74ÂČÄ”łGö`bfÉ[ăgáčVcS3^:€ţŘŞn#';“őó'cnmÇŰ‹°sŞĄŤ='ĚáÄţ_),,ÄŁA3ňóňX6ĺ˘Ă5ý{ü:őÄŘÔśz>-yuôTe˙ľź#„J:wîś0Đ%'âI 33„B!D•=˙Övţ°ë˛cÝBś=ĽéŘűş˝ü6†šo} :Ż 3µP† {·r;6‚®ý‡beç¨ql˝˘cB.ź˝[?ř‚: iŮé9ŤăGM˙ľD˙,¬í”wi겳‡÷’O·ochd˘.71łŔŃÍ“°k—đ( [NŘέ«iä×-Új´]żi+n^9'„JĘČČP˙·o»ž2 Ąhâ׹\ÇÉ !a†B!„ĺ P(č3čCşĽ4”3‡wsúŕ.ź<ĚĎË˙ÇÁßÖ1yů¬íťJ 3ÎŮ @óvÝJţŔj`@^nN±0CYżcďWîŰ·‚‚b°wńP—_˘Z‘@CÔff!„B‡ÂÚމw1ö…ćW—'D‡“š|K[l\H»ŁÜ/Aµ¤¸°k—€»Ë9’âcHNĹĘÎQc¶Ŕˇß~`Ýś ÔmÔ‚‘˙űwď&™°mĹLvţ°:^wĂ Ől WŤ6R’HŚŤÄÜĘ;§:ýűv†Ć&rsEŤ!†¨­dP!„BQiß~1‚ű6#>Jű†Ć¦ŔÝĄ"pw‰jóOcS @s)‰Ę…öĐŞłrůjF…jsÎâölZŔ;Săݬµz/Śë—NPÇűîĚŹ‚|ĺ˛]}ÍWµž>´SŮ~±ţééő_!÷\Ô<˛)¨¨Ť$ĚB!„•¦§o@Jb<˙ţů‹ÖŻźý{M[ßݸ0ĽčM$Ĺ—”4öď@Pŕ?őoÇFrňŻí¸z6ÂŻS/ŕîćź [”8ߝ۱ػÜý­sصK\»p7Ouą[]â#CŐeŮYěݨ DЇ-ő›´*jë˛Ćů.˙‹IŻu`űÚňaŐš˘¶‘0C!DŤ4ůfŞÖ˙Mşq‡Źońú†ť4đ(üśU©S™6d„xĐ^:s+[¶Ż]ŔŢMËąAnN6·c#Ůżu5ëçMÂÖŃŤŁ>/Q7%1ž¬ŚtŽü#S~Z2•Đ óädgróĘ9M„‘‰)#§­DGW·(Ě8_f”ś™ŃĐ·]GNv&—N"äż@tuK®®~ö˙°mĹ,R“oz•Ť ¦`neŁ 3Ý Kú˝3=}6, üúdgfpöャüj4·c"qײׇhńđČžB!j…ŽĆV6Ômß™şí;ÓđąŘ:âu óóep„x\ëňĺÚýěް”ĂŰ7đËĘŮäçalf“{}zż9š®ý‡alj®®Ó±÷+ś>´c{&ěú|±j/vÎuĽ|ŰVÎäë_&'+gš·ëFßÁc±´uP×/k™Éŕ sX;ű~[5‡}›Wňlż!ô4†ś¬Lo_Ď×c0tâ<Ľ›·ˇËKCČLOaßć•Lxů)´xŠ7>šÎô}đlÜRÝný¦­řvŰ×ĚgöýÉJOĹĚĘßÝéůÚű¸Ő÷‘¨1”ÜCC 3„BGhF=sŤ?ë`áě†Ďó/Ńaô'xwëMëÁ#8ąziµěݵµ˝3oŚý_ąŹ·wqgúú#%Ęëx5fě×?Ţ·ţ‚ßĎ—ú5ĆÍŰP˘ĽÇ+ĂéńĘpŤ2Őëdű úP]–NJb< ›kĽ=”35ĆĚ^'7\ÔxÚŤ—^zIFÔ(˛ĚD!D­’ź“CŇ­ţY6—ýÓ&Ь˙ë20B «¦ČG/ú˘Q~ř÷ő´{®ż ’¨Őî]r˛uëV e`DŤ!33„BÔZÁě ×ŚĹŘzz—řšj݉ŇfJÜďëu;‚„kA|׫-Ý?›ŤĎó/a`bÂś&Nň!5–Ą­É 1ü8oC'ÍÇĐŘ„ÓwňÇO+đjÖš.ý†Č ‰'"Đĺ Ťś囄$Đf!„Ź™BW9±ŕî—Q—‡w·Ţ řv#Š˘Íě6¦ËÄi¸úúłíý7+öĂdĎyé›uíéŕĐȇFMhůęÖüß3¤ĆD•¨›ź“ŤG»§yuíŻč{őĄSS_^\´šĽś‚÷ţ@óođü¬Ąç1µłÇ»űóxwëÍS?ćĚ+5ÚĎËĘ@ßŘvĂÇâ?ř=ů`‰ZáĄá“0ł´áź˝?3ĺͧÉĎËÇŢŐťç}@ď×Gˇo`($ž¸@CˇPPXXH®ž<& 3„BǦA÷>Ä]ąřŔÚ,,(ŕą©s9·y'W/%éVF–V4}ńşNžNĂž/âőěs\?řGąŰ|ćăĎPčęrüŰůśÝ°ŠÔŘhôŤŚ©Ó¦Ď}9KWw:ü9;?ˇ%Ěȡ÷Ě%ś˙ůŽ-ťKz|,6őĽč=s n­ÚŇzČ‚÷ţŽ•{=zM_Ŕ?Ëćr~ËŹ¤ÄDbfďD“^ć鱓éţŮln=Hbȵ»íçć…¦ř˝ů»'}Ŕĺß·<Đ·Äń8čččĐóµ÷éůÚű2â‰gëੵą9Y( ňud7Qţ—!BQ«ţaÓÓĂŞN]žzw ]'O pÓšÖľ®'Ůđ!·o\Ą /ŹŚŰ ś\˝”żS†Mţď• µiU§Ç–ÎĺNDąąd§¦pýŻ=ü:z0Yw’0ł×ľ¤ĂŔĚśđS˙°wĘG¤FGR—Gµ ţš1›˘%6ţ†Łk`Čáů˙ăĐś/IşB~v6w"nńϲąü˝x:zz´|m¨ć Ц›ŘÚqußNÎý´–ÜĚ r‹^§)„˘fK g÷ć%äćdýµ_QŃ’!Ş3™™!„˘FSíAQšs›×qń—Môśg~üNků•ťżĐnÄ8ś›µ¬P{·o\ĹÁ§)˝g.a˙´‰¤ĹŨżuî4ó}ÝˬjͲeńW˙ŔÔĆŹöť¸řËF­m\úm3ťÇ†ÇSK=ĎĄí[ä'„µ¶ CQŤfŢ˝úT‰˛Âü|˛SSIŤŽ&ćü®ýń±/VËńUőąků°=f!„¨] ÉJI&úâ97®&hĎoüqA—´–'†ŢŔ̡bcîüdŻoŘAă>ýńéÝŹ¨ó§ ýç0ˇÇvň…÷ŮóăöŤk%ĘrŇ”!Źj +7e ňÁńŕ2۲ň¨Wę×’nŢĎ—BÔ÷Ý»wçĎ?˙¬öýVčębde…‘•ö>>4{ő"OźćŔÔ©¤ĹÄVąýfŻ ¤ă'źHQH!„˘F+ím#“*(¸WnfúFĆj/ćňyVt÷§Í°Ńř<ß×–mpmنŁ>!->–#ó¦qnóşRëçegÝ÷&fĺꋡiéă™™ś(¸&9!–Ź^lˇ1Ë˙ĽŽN±Í_k“”¤>ěÓ {íx<żˇMŠŹaÜ˙ůbjaÍ7{®Č‡OÔ¨ cĐ AŘÚÚVŰ0Łx° ĐŐĹĐÜ[//ę=Ó™F}űâęďĎKkÖđˡ¤ĹV-а÷ń‘„B!DÍ¤ŁŻ_ö?žFĆZ÷ŚĐ76 §űI¤ÇÇqpöçśý9ÖuëăůtW|z÷Ăý©Žôžő şĄ.o)ŹśŚ4 Í-™×ÂŤě”;r“źˇÁ¨ăŐři)I\=÷/~O÷z,×uďů#C‚p÷núŘĆúVŃX×mŘ\>x˘Ć:t ((¨Fôż0?ź¬äd"Oź&ňôiέ_OŻyó°kŘî3¦óëŰďT-Ěh$a†„B!Dµţi¨ t ÉĎÎÖř’]ýeVµónDôů3%ĘU›m¦DGV©kIˇ78z3?¬¤ĺkCé5c1mŢ]Ą0#)4§f-±őô&ęÜią˙O[ÁçđhŘěµYźĎ¬Qýhâ˙ôc 3´ťß§UGÖ‹y¬c­ Žę6’0CÔĽ Ł&K‹‰eçy}ŰVśZ´Ŕ˝}{ÂţůGýu#++ZŚG‡;;ŁŁ§GFÂm˘Îžáěšµ$ÝĽ @Ë!i;z´şžjż‹?'p}ßľ µĄŤ[›6ř ‚]Æčr'<‚ŕÝ»ą°qyšËI+zÇfÍhţúk85kމ­ yŮ٤ĆÄpóŕA.nŢBVrr‰ţ86mJ‹7ßÄŮ×#+K˛SR˝x‰ó7ućLŤ¸÷ň6!„O¤ě˘Ą"NŤ[”řZŰ÷>*ł®ßëĂ´–7é;€¨ó ^\¸Š1'®áâë_âk—wlŔÜÉĄJ×{óŘAĺµ˝;Fë×=;wcÄ@:Ź˙L>µú»a‹Öć_×D=ßÇrMŹűüĄŹőů˘0Ł…|đ„ŹXćíŰ\üiłňßł.]ÔĺfŽŽ ܸß7ßÄş^=ôŚŚĐŃÓĂĚÉ‘˝{3ŕDZoܸ\ç¨l[yůÔ}úiú,YŚkëÖZX khŤW}ÚŤů€î3fTé<ž]şĐďűďđęŢ3'Gtôő103ĂÖË ˙wße঍:8hśŁaź>ô[ő=ő»uĹÄÎ==Śml¨Űůi^üv9M¬÷]ff!„x"Ĺ_ĆÍżÝ>›ÉžÉc¸r ;ÚŹü7ż6dÝIÂČŇZË%yÔmß™®38żů’#najgOĂç^ Í°Q{ŠB™˙·x űľřđ3˙’“ž†…ł+íF(•ř ËUşŢłVá?xŤz÷ăĹ…«8˛pw"Ă0±˛ˇAŹľt™řfćš[Ę‡Łš»pü/ţÜň!WÉÉÎÂÁŐŽ˝^ˇű+ĂŃÓÓ\"u+Xą„ŤŁ+ßN}ź‹˙  ?źö=đĘč/00Ľ»żË‘9şű'˘o]''+;ç:tč5^oŚBˇP0uhwn]˝»ĹŠ/G˛eŮ4ćýz–÷»×ÇŘÔśY?gă˘Ď8sh¶NnôoRąűz•íkćsĺě12ÓR±urŁcďWčůÚűčęé•zţůżňŃ‹-HNeö–8¸z(ܢĂٵ~ —N$)>Ccę7ń§ď±x7ÓÜŘodoP(ąń(?ΛħŹ``dL×ţĂč;xląîË--ÁQyîUDHź˝ő Žu<™őÓ?m¦§$óq::ĚÝz sK2ŇRŘ˝ţNÚÉíŘH ŤLđnŢš>ĆRż‰źşnaaa©÷ĺ˵űĺI‚ŚZd¨Ü<|V-łďÝ Ó˙Ýw0up îŇeŽÎ›Çíë×°kĐ€ŽŹÇŢLJ¶ŁF±cÔ(×®#píşRß@R‘¶4ĐiÂ'\ůýwÎoÜHJD†ćx÷ęEűÇŕŮĄ ;pëč±Jťç©‘ďŁĐŐ%pí:.oŰFz|‚Iדő÷eüŢx‡Ľě,ţš9ĄJ×{'"ŚťźŚ  7—&/äýçx5‘1'ŻÓó 003'ćŇ9ĎűJ>ŐŘń?¶±đ“7ÉÎĘŕóď÷˛xçeĽ›?Ĺ–eÓřeĹLÍ{žOR|4z†ś>´“ŢoŚfÎÖS´éú"~YËŽµ ŐÇnývkfŤĂÝ» 37eζS8Öńäçĺ˙ă·ďç0uÍźĚ˙íf–6¬9ĂüßąAvf®őřnÚ4mÓ™żźŁk˙aĺîëőK§ůňíçČËÍeŇŇß˙{ žŤ[˛őŰéüúýě2ĎźśKrB,¦Öę #äż@>Ü…—Î0bę·,ý#O—l#1.’ŮŁ_âÚ…“ęsߎŤ$3=k{'~[=—×ÇNcęšýčččňËĘY\=âľ÷%)>†ä„XĚ,m°s®Sˇ{ĺčV]]˘ÂČĎËÓh÷Ż_Ö•‘FŹĂ11·$.ň_ éĘß;72pÔç,Ţy‰ńó7}ë:3GľHPŕ?Ĺ®Kű}™´ěwůF’ ŁÖ)ŰÚ¨Ë,ÜÜČMO篩_{ń"y™™äefsţ<żš¦ülVľ˝v*Ű–Žľ>1.pxĆL’CoQ—OfR6näÜŹëđîŮ«Ňç±puŕĚš5¤FGS—GNZ:·ţţ›}“&‘ť’‚‰ťťúř¦/D×Ŕ€“ß®ŕÄŇe¤DDź“CjTg׬áôwߎ٧Kă~ýŞý=—0C!ÄéżŰŘ1~8±W.’—ťEVJ2·Žaă[/zě9™ĘM<őŠŢL˘gh¤ 322¸qřO6ľŮ—›G’u'‰Ľě,â®\bß—ź°ëÓQ• V=ßžăßÎ'áz9i©äçäp'2Ś Ű6°şo'ÂN­ň5_Ůő+«útŕ¶ ¤D†“ź“CnF:1ůkFëúw%;5E>ŐŘŮ#{01łä­ńłpt«‡±©/Ŕ?lŐ8Vµ_†ˇýß›„»wLĚ,č3čCNţu÷ˇöŔ/kxiř$Ě,m°°¶ăŐ¦bbn©^>p3H&ßä2âşňÍá7ţŁCŻ´îňú†F\ü÷@ąúš——Ëw_ŤĆĚ҆÷ľXŠ“{}Ě,¬0"#3®_:SćůCďŮx3';“eSŢĄ°°qó6Rżi+ ŤLp÷nĘkcľ"?/—ßVĎU׿ţź:đŢdl\ppőŔ·CŹ˘`äě}ď‹¶Í?Ë{Żô ±wń ??Ź„puyNv&űţc3 z |—üĽ<–My‡„čpFM˙żN=165§žOK^=Uy]EÁSY÷ĹČÄTľ‘$Ȩ•ת~ŁńÝgŰGĽĎ÷ťź!9´ä Äĺń&&ĺjż*m]úy«Öňëű•oŹqhěSéó$…†đLŔdŤĐ îŇeVwéĘÎѨËÜZ+—´ďÚĄµOW÷ěŔĄ•_µżç˛ĚD!DŤô ^Ézń—MĄ. Yѵ•ĆźsŇÓ4Îvâhą†{űYZżÓâb88ű Îţ⍶ŻÇ_˝ÂÎŹG<ň±ƨéß—(ł°Vţđš•‘¦QZ´Ä¤]ŹţYÜ]2eíŕ (g$¨›’™žĘŤK§iÖVąŢÜÁµ.K÷k¶Tr_âaBËNĎU¸Ż˙îű…¸ČPú‹~Qh`mďÄň?Żß˙üę2eđĎŢ­ÜŽŤ k˙ˇXŮ9jÔŻWT/äň١O‡^1µ°ş{°B€ŽÎýßswóĎ•şWÎ^Ƈ‚Ł[=ŽěŘDjňm^:sKţÝ÷ ·®^¤‘_´h«Qż~SĺßW7Żś»ď}© ˘Â®É7»†FŘ9Ö‘ نćdßŃ|c—‘•%M_[›Ö9:blmŤŽžŠJĽá©˛mÝľ¦ý3}'L`ŢBTä<ľü’–-Ă«GęwëFÜĺËDž:EÄÉSDRŻąą¨ą‹r®A»w•Ůg 77 3„B!ÄĂQPP ţmľ˝‹‡Ć×Tł·~ZŁ­:UŞŻŞ6ť=ĽĘfh;ż*̸uUůç:^MJÔOŠW>äXŰ;—¨ďŮDsJµjłQŹ÷­í--33*rŻ\ę*_®śB~âŻßHWĎĘ(şü´d*?-™ŞýaÎÉíľ÷Ąşňőőe×®]ňM~ŮŮ™d”Âł«rVYń׊¶3S{{RŁŁ9±t)ŃçΓuçąąäçóţ©“ĺnż*mé’—YňŢé-aÍĚŞŇy2oßćß%KřwÉ,ëÔˇN»vÔďÖ??ž™2]}.ýüsŃą2103cŐ3Ď’“–VŁďąź° IDAT„B!„5Ŕˇß~`Ýś ÔmÔ‚‘˙űwď&™°mĹLvţ°:^wS“o“X´ŚÄÜJsúrTčU­éţĎôÁ˙™>\=‚M‹?'čě?¬ž1–±s”Ô%D‡“š|K[u˘ÚřŇĘÎk{§JőőÎí8,mĘ~@Ór~ŐćźćV¶Ř9)§ß§ÝIĐaT®]î.ËPŐ·°±×8’âŁIIŚÇĘÎ K۲űĄs+[l‹Â„Š\ż2Čń .RąF~φĄę˝2TT×őíţ ŤMĘŐ§{ďKućîîN@@ĄüűI·`Á‚r÷¤6őëÓ¨Oŕîžpwť|Pb 3§Š}oTĄ-OOâ.—|+™•‡r–VZlĚëóťđpsiË÷ëGç€É´xăuuq'<{¬ęzwérŤľďf!„BÔ{6-ŕť)‹q­×P]~ýŇięxßť‰ š• ŁŁąß»ęMMÚtÖzž-žb̬uŚű?_.źţ»X›%—R¨~űo0R‘ľ™š“ž’tßë×v~ŐL ŹbeƦ¤§$‘—›®žćŹşţQľ’´UçŢőď]Tt] +7+Ł"ׯ 3”łRnÇ„sńßDܸ CĆiěᡧo \Ł Ü}*Ϭ’ęhĘ{R +zÍ›‡Žľ>7öď×ŘźB·hiYz\|‰z­‡ż …… P Ł§GÁ=oRčęjě7Q•¶šĽô’Ö0Ăë9ĺFĂĹżVŃótűß4\ýýŮóńÇ%‰kűöŃ9`˛Ć’•'±÷ńÁ÷Í·Ř7qb‰sÔi׎Nź|Ěő?÷srůňj}ďĺm&B!„5ŔťŰ±Ř»Ü}ŕ »vIN¸yú{˝¨ţďđ˙©˙;7'›“ýŽ©ą­:?Ŕ–eÓřt`[őŚ €Â˘˝,­ďţ^ôvŚâË7Tˇ‰GĂ•î«[}ĺÇE†j.c_hÎş9ĘuţzĹ‚„Ćţ4^S Ę OOţµWĎFřuęĄQżî=ýW_WćĺYJľÉ¤"×`bf•ť#IńŃěÝ´cSsz핡RżI«˘v4V.˙‹IŻu`űÚ÷˝/B‚ŚÚBßÔǦMi?v,/Ż˙ 7Wsxú Ťă’n)g6´= #+Kt phŇ„žsľĆŔÔŚ”Hĺ 6Ď®]ĐŃ×îîcQż[Wtôô0¶±©t[yů¸¶ö§ýرX׫‡ž‘!ćÎδxăuZĽöÁ»vWˇĎ Lěěč>}:;``f†BWsggÚ} |‹Éíëw7SľĽmyYYÔďÖ•n˙›†eť:ččéabgK“ýynÖL,ÝÝ103«öź™™!„BQ4ômÇ…ăqđ×u<Űo0WĎź !:]]=ňór5ŽUýfľM—řińĽ7u9FĆl[1¤řh†MZp÷ť……ÄE†˛ińĽ°]==~^ţ?€Ô)‰ńde¤cdbŞž-qď †Šôőů7Gř;Ö.ŕ­ńłH gőĚŹHOI¦eÇĺ<˙Ý aŕČĎąřď~Z2K{\ę5 2$5łĆcdbĘČi+Ń)z#€ş~Łć÷ŚźöëŇf”Ü”´"ׯâěáÍ•3Gůďôßôň‘ć›U€~ďL řÜq6,ŕíÉ qp­ËĺÓGX=ă#r˛2q×yĘß!AFMđţéSe~=ňÔ)öMšLvjŞFůůőčöżi48¦ŞËÓbbůeŘ0Ú~0 77şOźŔr˙ÖÄ]ţ—V~Ę2e1Ëý[W¸­mŰ…#™™5›^óçÓâÍ7Jô=hÇŤ}>*zžő}_Ŕ­Mk,\]é˝pa‰öółł9ľh±úĎ©ŃŃú%Ýţ7 ďž=ńîŮłDťř  N._&a†B!„¨şÁć°vö'ü¶jű6ŻäŮ~Cč3h 9Y™ŢľžŻÇ `čÄyx7oCČ•@úŹŕÜŃ?5ę˙HIJŔÉ˝>ďM]NŰîýÔ퀡‰)Ç˙ŘĆ'Ú`lj†cOŢ™˛˝îţ ݱ÷+ś>´c{&ěú|±jo©Ë*Ň×fm»0vÎz~^ţ?ĆľĐcSsęůřňNŔbőŢ÷;ń ÁÎą“—ď`ŰĘ™|ýáËädebăŕLóvÝč;x¬ĆĄő?´Ë4Ôm T*rý*.u•a†±©9Ď˝ň^‰óÔoÚŠ€ow°}Í|fĐź¬ôT̬lđíĐťžŻ˝ŻžáRÖu=L9éɸşşĘ7«]^V6·9ž«{ö~ü¸Öă®íÝ‹‘ĄÍ^ys2nqň§V¬$=.ŽS+Vb]ݶőë«ß‚rxć ž ŔŢLJ‚Ľ|’CC+Ő–nŃŁr33 űçvŚI«·ßĆާz††$‡…qĺ÷ßą´ĺç*ő95:š-ŻżAóW_Ąnç§1sp@×Ŕ€ô„˘Îś!đ‡I Ń8ÇŤýűIşyß·ŢÄŐß[[ ňňH ĺúű¸°y3ąąŐţs (,,,”ż„BÜkÎś9\ż~Ű18ź9 "Ş­$Ϧ¤x)$W¬XńČÎűŢ{ĘN×ćĎŕ޲‹ÜQ%_ŹŔ•3Gyůý)ô~stŤë˙™­óÉIO¦yóćŚ5JnčCúűĆ·]OZučýP‚Ś   őFŁžÁÁX¤ĄËŔ‹Ç.ÚىX­˙ĆËž˙ßŢ˝‡GUßy˙r™K 3B’$$BA,$´h$´*›¸ÖRm5éÚÖj­ÝÖô¶ ­»­O-Ö^žŞ­­5 ¶kh‹+ÖF-I´JRA !PČ…‹$Če&Âţf$d€0ąÍ$ď×?2g~çĚ9ß3óńűű`í~ű ˝˙Îßź0WźČůRŔť˙‰Cű=ťv»ť: ĆjGp.¦™# ««Sµ{ßÓ?ĽWćHÝýĂ'P×ĐéjÓl–$ ÂŚa@ô ĚFŔÝňq?vXÉöT}îľir쌀:˙NW›ö•<'牞§·dggËl6sc‡Đńcőzďť­€3€ń`aYŔžű‰Cűuŕ›=AFJJ ?އAMő‡Ź]&ČŔXG0§ŽPmĹ 1ŁńŕnO!őąąąfd„Ň|ô€šŹ s –/_®¬¬,Š1Ś2€„>ŤŤU}}=…Ŕ“ ‡ĂˇÔÔTÖČ&éééÚ˛e Ap ŔEuNŇ‘EË)üV—qd~L­]»–â999ĘÉɡŔ93u&$L퓦Př  €W«WŻÖłĎ>K!0ŇŇŇ(caŔ+›Í¦üü| żD @ !Ě…0 P3@@ái&\¦ššŽŘçgggËfłq#€A°}űv•––jůňĺr8a—©¨¨H{÷î±ĎőŐW•——ÇŤhűöíÚ°a$iďŢ˝şí¶Ű´téR €0€ËÔÖÖ&I 5hRtě°}îńcőęěp©ˇˇ› ĐąA†›ű5ř? |4):V˙ö™{‡íó^üĂĎu´~…čÜ #4Ě ´Y*ůŰ ęhwh@€`PŚçYźý˛ćŘ+ëł_VhARO‡ĆöíŰ)ř1Â Ś Ţ‚Śč8IRtL Śz 2Ü4 pf`TëOáF 0ŁÖĺnŕ˙30*ůd¸h€#ĚŔ¨3 ĂŤ@üaF•Á2Ü4Ŕ?f`ÔĚ ĂŤ@üaF…ˇ2Ü4Ŕżf ŕ eáF ţ0m8‚ 7 đ„XĂd¸hŔČ#Ě@@‰ ĂŤ@FaÎHn0r‚)`¬ÚłgŹOű9ťÎ=o§Óéóą'%%qăđü!Čps/<ý:Ú]žóZşt)7 †a`L*..ÖĆŤňÜëëëőđĂű´ďŞU«”‘‘ÁËź‚ 7 ~L3ŔG–ɱ÷y‹…‡€ĺŹA†SN`xŃ™“222d±XTPP —ËĺŮnKŻąŽ«/ąÁ(ë”řa=çÔĺźÖěy‹Őîşô4—ÝĺŰTS˝Óó:**JąąąJNNćć# ťdHR¨Á¨í{ţ‚ă#&NŇŇY2ĚvÇ×éÝ·ŢPóÉăj0ŞŁ˝çď” 6Čd2Éápp`fĆ,‡ĂˇÄÄDެ¬L’TS˝S.§–]w«""ýŻ‹áRJó‰F˝ţň3:RWíŮ–žž®ĚĚL™Ífn:VaaaŻ×-'›Ôr˛é‚ăkż$iůŤź´sřÇëŐÁ˝»/kźââb „€1Íl6+//O©©©*((PSS“ŽÔUkÓ†µ0ízÍ[”0ײëí-ÚQ˛Yť=˙Wn Ś&™™™*//ď×Řşş:ą\.ť:Ń8¨çŕîŠ2 Š‹ëßô–´´4n  $%''kÍš5***Ň–-[ÔŮáŇ›[7é`őNżíŇpk8Z«×7?٦†Cžmtc`´ÉČČč÷âµëÖ­SuuőťK\\śňóóą)0‚38Ël6+''Gv»˝W—Fáoď—#íz-L»ŢďÎyGÉf•—löĽŽŤŤUvv6Ý`T#Ěŕ<î.ŤW_}UůË_$Iĺ%›u ęźZvý­Ăľđ§7Ţş1V®\©¬¬,n ő3đÂl6+++ËÓĄQ__ݦ†Czţ©u#ÚĄár¶jwůë}ş1rssełŮ¸q`L Ěŕ"l6›Ö®]«^xˇO—ĆâôO)vzâ°ťKýÁ*ýýŻżWË© I7‹3čo]/oüĄć.ĽZŽÔëd0ÝB›.g«ĘK_ÖîŰ<Ű”——'«ŐĘÍcaýäîŇ(..VQQ‘\.—vďئšęťúŘ'o’.Ťó»1 233űýT€Ń0€Ë”‘‘!»Ý®'ź|RŐŐŐj9u|Đ»4\ÎV˝ąe“ŞwżĺŮF7@ |`µZ•źź?$]öţSŻo~Fť.ItcśŹ0€¸P—†-aľ>ţÉ[.«KĂĺlŐý˝jŞwz¶Ą¤¤(77Wfł™bśEŔą»4ĘËËUPP —ËĄšęť*¬ą_Ë®żU3f/¸ä1ĽucäććĘápP`€óf0H‡UPP wß}Wť.˝úüíŇh>Ѩ˛­¦ŕ2f0ĚfłîľűnŻ]K®˝Y‰ó{Ćîz{‹v”lötcDEEiőęŐtc\aCŔ[—Ć/?Ł˝»Ţ”=ő:U”ľ¬#uŐžńéééĘĚ̤ 3"î.ŤĘĘJ¨©©IGęŞőňĆ_zĆDEE)77WÉÉÉ  ź‚(C+99YkÖ¬QzzzŻíéééZłf AŔe˘3€a`6›•““#»Ý®ŠŠ ŮívB f0Ś’““ 1i&  f€€B a(„0Ć™L&Ďź»Çóśř§±ç{Űç= ăl6› $édd$_h‰—$Y,–>ﹼ*//WaaˇŽ?N1®˝öZ­^˝šB€Źěv»ĘĘĘÔdµ(údr:) FĚá©ęî‰,ŇŇŇúĽOgŔ«ââb‚ ”×^{Ť"Ŕdffzţ\3ݦ® ~.bd´Ť::mš¤ž)&‡ŁĎ:35ľ­EćCű)ü–+j˛:,S) ŐjŐŞU«´qăFąĚfU'ÎVBŐ^wwS ›Sáf5K’d0”››ëuaŕ˘Ć;[µ…€ßjşba ’ŚŚ ŐÖÖŞ¬¬L.łY»çĎÓŚ}ű4ˇĄ•â`ȉ‰Ń‘i1ž×ŮŮٲŮl^Çf<ňňňd4µeËukR’¬GŹjęˇĂti`H´ŤŞŹ‹Së„Ivdx›^âFč%''GIII*((ËĺRĂ”):n±hňÇd=z”P˘=4TGb¦ŞÉjől‹ŤŤUnnî;2Ü3}8ĹÇÇëÉ'źTuuµşudZŚ>M¨ńbHŇĘ•+•••ŐŻcfĽ˛Z­ĘĎĎWeeĄŠŠŠú„OśPôÇxŚ+úĄqŇ$·X<ÓIÜ–,Y˘ĚĚLYĎ 7.†0pQÉÉÉJNNîj4Y­j˛ZehmŐ¤Ćăšxň¤Â::(<ÚŚF·ô„ÝÁ˝#_B 7 @żśj”––ެ¬L’ä2›uČlÖ!ĹlŔ`śŚŚTgXXŻ÷ RSS•‘‘áSáF¸,îP#;;[ĄĄĄ*..VSS“¤ľÁFxK‹Â››ŢÜÂŁT{h¨ZÂĂŐ®“‘‘}:0$)%%Ev»]K—.”Ď$ĚřÄl6+##CŞ©©QEE…***T__/©'Řp™Íj2Ągü©fE´´ČÜÜ,c[áF€r‡N“Q-áár™Í^Çą »Ý.óĆřŠ00`6›M6›MYYYjhhPEE…ĘËËU]]íÓ:!âěâŹ1’$Ck«ŚN§ŚmN…·´°¨ę ’Ód’ÓhTKD„Z"½v^H=SHěv»‡$Ŕ8a`PY­VOdž$UVVŞ˘˘BUUUž® éĂÎŤ¦sź 8Â::ennVhGko “öĐP9ŤąLfµŤršŚ}ÖĽ8_BB‚‡ełŮ†í\ 3CʽƆ[eeĄŞŞŞT[[«ŞŞ*ą\.Ď{GOGPW—ŚmN…v´+¬ŁS!íí íč`ŞŠÜťˇˇę Ssx¸NŹşŕT‘sEEE)))IńńńĂ^śŹ00¬Î7T[[«ÚÚZíŮłGŤŤŤžE%©;8¸gŠŠ"ĽĎ|ŞY’dt¶)řt·'ěúô›şŇ.Iž°˘=4Dˇaę ˝d—Ĺąbcc/‹Ĺ˘ÄÄDĹÇÇé´‘ËEQV«UV«U‡Ł×öĘĘJŐŐŐyÂŽóC·žu8>üŻ7!íí mď™®âîđp377÷ëî 7w8áÖf4ęôřń’$§ÉxÁu,.%!!A&“É\X,–^A“ż"ĚcBkk«ęęę(ÄŕdA`Ô8żĂ­¦¦FmmmŞ««S[[›öěŮ#IŞ««ë5eĺ\ťaaž0 o‡GLżÎçÜ@dĐţÎ@ŃQQQ˛X,˛Z­˛X,2™LŠ‹‹óHŠ0Ś ………*++Ł0 ¸×j¸PACC$IUUU’¤¶¶6ŐÖÖöNgŻ…HűëÜ@d¤ĹĆĆĘh4J’’’’$ÉTH ř°âR3ŁÂ¸  ÍÍü´’>™©i)WĘd±JăĆ©őŘQŞxG;7ýAŐŻ˝<¨źůÝő´¤>03â˘ŰŕÜ˙¨ĹŘqEň|ŠŚQçţďĎ” w§‡[ccŁ˝Žuw ÷ôoâăă=a…űµ?­Y1Ň3Ď2+I7?ö´¬łűţcebÜtMŚ›®9˙ö)ŘľU›ľr»ś'ŽŹĘ:,şýKúÄ÷"HąÔ÷erŚŇVÜH!Fą ‘“4!ĘB!ôËH>•ľ!Ě´¨łtűź‹e©S‡ęTöřĎ´oëßtęH˝‚‚ĆkĘÜ-Ę˝Ssn¸I3–^Łśő›´ţÓęîěuµ:ßÁ˘BÂ Š›9›BŔ‚( ýűĎ'Ă„H(٦Ç?q•Ţ^˙k5ÜŻÓííęt¶©îť2=wO®ž˙ÚčĚéÓŠIY¨ĹwÜ3*k3Ź0Ś tfÖ¬«W(&eˇZŽiÓÝ·©Łµĺ‚cwýQÓ식8WMö÷yĆŇkôŃĽ»4Í~• &ĘŐ|RGvUhÇ3O¨ę•|®ÓWiÉ÷(îŞ4™"'Éyň„•żĄ7÷jĘ޸ŕ~ÓS—食żŰs^§×içź~ŻŇǦÓíí’¤Ô/]é߼߳Ź{ÝŽçľš«ÝEňéúÂ&LÔ7ţY§†˝•úÍőK´bÍšłňS 5™´î#Sůň |‘Ľň&IŇŽg~ŰŻu0Š˙÷Ű^·/ů⽺ö;?čµÍ4ÉŞ+–ečŠe*yô!m]wżĎç™ňé[µňGŹhÜŮgÁK’Ů­Ů+VjvĆ úë÷ďÓ;ďłßUźżK+ţűGҸqžmQӯвŻ˙·W¬ÔúU+<ĆĹřr}]®žG[†ŤJýâ×´čö/ń…~i&€€ë¸J’´oë+>cňśy=] gΨôW?ŐŻ3®ÔŹ“Łőč˛ůÚşî~ťéîVÚ—żˇig?ërEÚfęúţ\’TňčCzěšz0ŮŞG>>O[×ÝŻî®.­Xó &]Ń{ ëěde|÷uwuéĺ5_×OÓőă9“őÔęëÔ¸oʦÎwčc_ů¦$©ô±‡{-úůŔĚ=03B»‹ţäóőť>»¦HѬ…ź˝C/}ç­›;…® @Ŕ@DL‰‘$5î«ňů oůĽĆŤŻŠg×kËkŐ¸ŻJ]í.ť¨= ’GŇ;OýF7N V}Χă/şí‹¦m˙@[×ÝďYĎădÝA•<úŢřĹŹ,Çgňzíwĺgż qăǫ䱟hÇÓż‘óÄquąśŞ}k»žűjž:ťmŠż*mč®ďĚI’ÉbUŐ+/Şâ˙ ÔélSg[+_<@€ŻBŚ=ĎZďŔě¸E=@ų뽾żsÓzĆ]ąÄ§ăOO»şç8ţ˝×÷w=÷lϸĹëµÝvöőű/męłĎŃÝ;µnî=ťsý°\ß® ů˛żÂš€€ŐŢrJĆČI2L”ł©Ń§cDĆő×h¨Ż`$Đ™Xď˝°QRĎŁO#m3/9>vábÝńR©ćÝ”ăŮv˛®F’={Ž×}¬ É’¤gÇ]®¦ű%I–óžVr)'jöě7+i@5ęë „€€U÷v©”lSѤUŹ˙źÂ٧\p씹)şůѧ4yÎŻ”›oéłĎä9óôÍĘcşýĎŻz=ć¸ńă‡íúFa  ˝§Z>8˘č¤ąúÂ+o)őÎ˙”eV’‚Ă 2LŚRĚ‚+µbíŹuŰ˙¦đ)1:˛«BŻ=¸Öł˙ާ«î®.-Xő9]“˙=EÚf*8Ě ¨łtő7ÖČľúvuwuiÇ3żőéüv<ó„:ťmJľá&Ýřł'5c–‚BB=E o˝CźúĺMš™ °‰˝ö+˙Ăďt¦»[ V}NiwÝ'Ó$«‚ FŮL˙ţ‹'fPÝ;oöÚ§ÓŮ&IšsĂM ‘Ů=ä×0X3ĐNŞÓú›3tóŁOię|‡Ňżő?J˙Ö˙x[őJ‘ž˙Úęr9=ŰŞ+UüĂďčß[§´»îSÚ]÷őŢéĚ˙ŕŰjŘ[éÓůť¬«Ń‹ůwęĆź>ˇŹÜ­ŹÜÝgĚ‘]Úö“Ţç|lĎnmyp­®ýÎtMţ÷tMţ÷z˝ßP]©íż\×kŰáwwx·fF éőŚ @Ŕ;YwPżËZ¦änŇśnŇ4ű•2[˘5.(HÍG©ć%z{ýŻudgą×ýß.ř•ŽíŮ­ĹwÜŁiöE=Źz=Ѥúezó‰GT{vކŻŢ˙Ë&5ě­Ôâ/Ţ«K–É=EÝ]ťjÜWĄ÷Šţ¨·×˙J§;:úěWöřĎőÁž÷ôŃĎEÓ,T¨)\§×éý—žÓöGÖőYĐsóÝ«ř…¦Îw¨»«ËóŘŐˇľ> |TůŇ&Uľ´É§}–ľ®ĄŻ_Ö>ĚŚč×6I:Vőľ^ĽďÎË>ŻýŰŠµ[qżĆ6î«ŇS«ŻŇë𬙠a(L3cJG»KőŞ)|úîüaSŽpX/<ý… €1ÍŚ V«•"`PL&Š#ŚÎ 0&äĺĺ)55•B`Ŕâăă)Ś0šáNÚČIDAT 0f$''SF¦™€€B a(„  f€€B a(Á”p1§Má:1k>…€ßjŹšLc3uÚ®“„đ#L3x•––FPRRR(cĸ3gÎśˇ PĐ™ a(„  f€€ň˙ńGĹ8ýž–IEND®B`‚ceilometer-6.0.0/doc/source/measurements.rst0000664000567000056710000000212212701406223022307 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _measurements: ============== Measurements ============== Existing meters =============== For the list of existing meters see the tables under the `Measurements page`_ of Ceilometer in the Cloud Administrator Guide. .. _Measurements page: http://docs.openstack.org/admin-guide-cloud/telemetry-measurements.html Adding new meters ================= If you would like to add new meters please check the :ref:`add_new_meters` page under in the Contributing section. ceilometer-6.0.0/doc/source/glossary.rst0000664000567000056710000001150412701406223021446 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network (DreamHost) Copyright 2013 eNovance Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========== Glossary ========== .. glossary:: agent Software service running on the OpenStack infrastructure measuring usage and sending the results to the :term:`collector`. API server HTTP REST API service for ceilometer. billing Billing is the process to assemble bill line items into a single per customer bill, emitting the bill to start the payment collection. bus listener agent Bus listener agent which takes events generated on the Oslo notification bus and transforms them into Ceilometer samples. This is the preferred method of data collection. ceilometer From Wikipedia [#]_: A ceilometer is a device that uses a laser or other light source to determine the height of a cloud base. polling agent Software service running either on a central management node within the OpenStack infrastructure or compute node measuring usage and sending the results to the :term:`collector`. collector Software service running on the OpenStack infrastructure monitoring notifications from other OpenStack components and samples from the ceilometer agent and recording the results in the database. notification agent The different OpenStack services emit several notifications about the various types of events. The notification agent consumes them from respective queues and filters them by the event_type. data store Storage system for recording data collected by ceilometer. meter The measurements tracked for a resource. For example, an instance has a number of meters, such as duration of instance, CPU time used, number of disk io requests, etc. Three types of meters are defined in ceilometer: * Cumulative: Increasing over time (e.g. disk I/O) * Gauge: Discrete items (e.g. floating IPs, image uploads) and fluctuating values (e.g. number of Swift objects) * Delta: Incremental change to a counter over time (e.g. bandwidth delta) metering Metering is the process of collecting information about what, who, when and how much regarding anything that can be billed. The result of this is a collection of "tickets" (a.k.a. samples) which are ready to be processed in any way you want. notification A message sent via an external OpenStack system (e.g Nova, Glance, etc) using the Oslo notification mechanism [#]_. These notifications are usually sent to and received by Ceilometer through the notifier RPC driver. non-repudiable From Wikipedia [#]_: Non-repudiation refers to a state of affairs where the purported maker of a statement will not be able to successfully challenge the validity of the statement or contract. The term is often seen in a legal setting wherein the authenticity of a signature is being challenged. In such an instance, the authenticity is being "repudiated". project The OpenStack tenant or project. polling agents The polling agent is collecting measurements by polling some API or other tool at a regular interval. push agents The push agent is the only solution to fetch data within projects, which do not expose the required data in a remotely usable way. This is not the preferred method as it makes deployment a bit more complex having to add a component to each of the nodes that need to be monitored. rating Rating is the process of analysing a series of tickets, according to business rules defined by marketing, in order to transform them into bill line items with a currency value. resource The OpenStack entity being metered (e.g. instance, volume, image, etc). sample Data sample for a particular meter. source The origin of metering data. This field is set to "openstack" by default. It can be configured to a different value using the sample_source field in the ceilometer.conf file. user An OpenStack user. .. [#] http://en.wikipedia.org/wiki/Ceilometer .. [#] https://git.openstack.org/cgit/openstack/ceilometer/tree/ceilometer/openstack/common/notifier .. [#] http://en.wikipedia.org/wiki/Non-repudiation ceilometer-6.0.0/doc/source/new_meters.rst0000664000567000056710000001053312701406223021754 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _add_new_meters: ================== New measurements ================== Ceilometer is designed to collect measurements from OpenStack services and from other external components. If you would like to add new meters to the currently existing ones, you need to follow the guidelines given in this section. .. _meter_types: Types ===== Three type of meters are defined in Ceilometer: .. index:: double: meter; cumulative double: meter; gauge double: meter; delta ========== ============================================================================== Type Definition ========== ============================================================================== Cumulative Increasing over time (instance hours) Gauge Discrete items (floating IPs, image uploads) and fluctuating values (disk I/O) Delta Changing over time (bandwidth) ========== ============================================================================== When you're about to add a new meter choose one type from the above list, which is applicable. Units ===== 1. Whenever a volume is to be measured, SI approved units and their approved symbols or abbreviations should be used. Information units should be expressed in bits ('b') or bytes ('B'). 2. For a given meter, the units should NEVER, EVER be changed. 3. When the measurement does not represent a volume, the unit description should always describe WHAT is measured (ie: apples, disk, routers, floating IPs, etc.). 4. When creating a new meter, if another meter exists measuring something similar, the same units and precision should be used. 5. Meters and samples should always document their units in Ceilometer (API and Documentation) and new sampling code should not be merged without the appropriate documentation. ============ ======== ============== ======================= Dimension Unit Abbreviations Note ============ ======== ============== ======================= None N/A Dimension-less variable Volume byte B Time seconds s ============ ======== ============== ======================= Meters ====== Naming convention ----------------- If you plan on adding meters, please follow the convention below: 1. Always use '.' as separator and go from least to most discriminant word. For example, do not use ephemeral_disk_size but disk.ephemeral.size 2. When a part of the name is a variable, it should always be at the end and start with a ':'. For example do not use .image but image:, where type is your variable name. 3. If you have any hesitation, come and ask in #openstack-ceilometer Meter definitions ----------------- Meters definitions by default, are stored in separate configuration file, called :file:`ceilometer/meter/data/meter.yaml`. This is essentially a replacement for prior approach of writing notification handlers to consume specific topics. A detailed description of how to use meter definition is illustrated in the `admin_guide`_. .. _admin_guide: http://docs.openstack.org/admin-guide-cloud/telemetry-data-collection.html#meter-definitions Non-metric meters and events ---------------------------- Ceilometer supports collecting notifications as events. It is highly recommended to use events for capturing if something happened in the system or not as opposed to defining meters of which volume will be constantly '1'. Events enable better representation and querying of metadata rather than statistical aggregations required for Samples. When the event support is turned on for Ceilometer, event type meters are collected into the event database too, which can lead to the duplication of a huge amount of data. In order to learn more about events see the :ref:`events` section. ceilometer-6.0.0/doc/source/_templates/0000775000567000056710000000000012701406364021213 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/doc/source/_templates/.placeholder0000664000567000056710000000000012701406223023456 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/doc/source/configuration.rst0000664000567000056710000002314612701406223022457 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================= Configuration Options ======================= For the list and description of configuration options that can be set for Ceilometer in order to set up the services please see the `Telemetry section `_ in the OpenStack Manuals Configuration Reference. HBase =================== This storage implementation uses Thrift HBase interface. The default Thrift connection settings should be changed to support using ConnectionPool in HBase. To ensure proper configuration, please add the following lines to the `hbase-site.xml` configuration file:: hbase.thrift.minWorkerThreads 200 For pure development purposes, you can use HBase from Apache_ or some other vendor like Cloudera or Hortonworks. To verify your installation, you can use the `list` command in `HBase shell`, to list the tables in your HBase server, as follows:: $ ${HBASE_HOME}/bin/hbase shell hbase> list .. note:: This driver has been tested against HBase 0.94.2/CDH 4.2.0, HBase 0.94.4/HDP 1.2, HBase 0.94.18/Apache, HBase 0.94.5/Apache, HBase 0.96.2/Apache and HBase 0.98.0/Apache. Versions earlier than 0.92.1 are not supported due to feature incompatibility. To find out more about supported storage backends please take a look on the :doc:`install/manual/` guide. .. note:: If you are changing the configuration on the fly to use HBase, as a storage backend, you will need to restart the Ceilometer services that use the database to allow the changes to take affect, i.e. the collector and API services. .. _Apache: https://hbase.apache.org/book/quickstart.html Sample Configuration file ========================= The sample configuration file for Ceilometer, named etc/ceilometer/ceilometer.conf.sample, was removed from version control after the Icehouse release. For more details, please read the file etc/ceilometer/README-ceilometer.conf.txt. You can generate this sample configuration file by running ``tox -e genconfig``. .. note:: tox version 1.7.0 and 1.7.1 have a `backward compatibility issue`_ with OpenStack projects. If you meet the "tox.ConfigError: ConfigError: substitution key 'posargs' not found" problem, run ``sudo pip install -U "tox>=1.6.1,!=1.7.0,!=1.7.1"`` to get a proper version, then try ``tox -e genconfig`` again. .. _`backward compatibility issue`: https://bitbucket.org/hpk42/tox/issue/150/posargs-configerror .. _Pipeline-Configuration: Pipelines ========= Pipelines describe a coupling between sources of samples and the corresponding sinks for transformation and publication of the samples. A source is a producer of samples, in effect a set of pollsters and/or notification handlers emitting samples for a set of matching meters. See :doc:`plugins` for details on how to write and plug in your plugins. Each source configuration encapsulates meter name matching, polling interval determination, optional resource enumeration or discovery, and mapping to one or more sinks for publication. A sink on the other hand is a consumer of samples, providing logic for the transformation and publication of samples emitted from related sources. Each sink configuration is concerned `only` with the transformation rules and publication conduits for samples. In effect, a sink describes a chain of handlers. The chain starts with zero or more transformers and ends with one or more publishers. The first transformer in the chain is passed samples from the corresponding source, takes some action such as deriving rate of change, performing unit conversion, or aggregating, before passing the modified sample to next step. The chains end with one or more publishers. This component makes it possible to persist the data into storage through the message bus or to send it to one or more external consumers. One chain can contain multiple publishers, see the :ref:`multi-publisher` section. Pipeline configuration ---------------------- Pipeline configuration by default, is stored in a separate configuration file, called pipeline.yaml, next to the ceilometer.conf file. The pipeline configuration file can be set in the *pipeline_cfg_file* parameter in ceilometer.conf. Multiple chains can be defined in one configuration file. The chain definition looks like the following:: --- sources: - name: 'source name' interval: 'how often should the samples be injected into the pipeline' meters: - 'meter filter' resources: - 'list of resource URLs' discovery: - 'list of discoverers' sinks - 'sink name' sinks: - name: 'sink name' transformers: 'definition of transformers' publishers: - 'list of publishers' The *name* parameter of a source is unrelated to anything else; nothing references a source by name, and a source's name does not have to match anything. The *interval* parameter in the sources section should be defined in seconds. It determines the cadence of sample injection into the pipeline, where samples are produced under the direct control of an agent, i.e. via a polling cycle as opposed to incoming notifications. There are several ways to define the list of meters for a pipeline source. The list of valid meters can be found in the :ref:`measurements` section. There is a possibility to define all the meters, or just included or excluded meters, with which a source should operate: * To include all meters, use the '*' wildcard symbol. * To define the list of meters, use either of the following: * To define the list of included meters, use the 'meter_name' syntax * To define the list of excluded meters, use the '!meter_name' syntax * For meters, which identify a complex Sample field, use the wildcard symbol to select all, e.g. for "disk.read.bytes", use "disk.\*" The above definition methods can be used in the following combinations: * Only the wildcard symbol * The list of included meters * The list of excluded meters * Wildcard symbol with the list of excluded meters .. note:: At least one of the above variations should be included in the meters section. Included and excluded meters cannot co-exist in the same pipeline. Wildcard and included meters cannot co-exist in the same pipeline definition section. A given polling plugin is invoked according to each source section whose *meters* parameter matches the plugin's meter name. That is, the matching source sections are combined by union, not intersection, of the prescribed time series. The optional *resources* section of a pipeline source allows a list of static resource URLs to be configured. An amalgamated list of all statically configured resources for a set of pipeline sources with a common interval is passed to individual pollsters matching those pipelines. The optional *discovery* section of a pipeline source contains the list of discoverers. These discoverers can be used to dynamically discover the resources to be polled by the pollsters defined in this pipeline. The name of the discoverers should be the same as the related names of plugins in setup.cfg. If *resources* or *discovery* section is not set, the default value would be an empty list. If both *resources* and *discovery* are set, the final resources passed to the pollsters will be the combination of the dynamic resources returned by the discoverers and the static resources defined in the *resources* section. If there are some duplications between the resources returned by the discoverers and those defined in the *resources* section, the duplication will be removed before passing those resources to the pollsters. There are three ways a pollster can get a list of resources to poll, as the following in descending order of precedence: 1. From the per-pipeline configured discovery and/or static resources. 2. From the per-pollster default discovery. 3. From the per-agent default discovery. The *transformers* section of a pipeline sink provides the possibility to add a list of transformer definitions. The names of the transformers should be the same as the names of the related extensions in setup.cfg. For a more detailed description, please see the `transformers`_ section of the Administrator Guide of Ceilometer. .. _transformers: http://docs.openstack.org/admin-guide-cloud/telemetry-data-collection.html#transformers The *publishers* section contains the list of publishers, where the samples data should be sent after the possible transformations. The names of the publishers should be the same as the related names of the plugins in setup.cfg. The default configuration can be found in `pipeline.yaml`_. .. _pipeline.yaml: https://git.openstack.org/cgit/openstack/ceilometer/tree/etc/ceilometer/pipeline.yaml Publishers ++++++++++ For more information about publishers see the `publishers`_ section of the Administrator Guide of Ceilometer. .. _publishers: http://docs.openstack.org/admin-guide-cloud/telemetry-data-retrieval.html#publishers ceilometer-6.0.0/doc/source/2-2-collection-poll.png0000664000567000056710000010021712701406223023154 0ustar jenkinsjenkins00000000000000‰PNG  IHDR/„ć4ĎębKGD˙˙˙ ˝§“ pHYs  šśtIMEß ! i¤‡C IDATxÚěÝ”Őĺ}/úţH:SęĐ$˘h ÓŢäjlŔäôfzOďQđ$zcn0±bW LWoXe%ĺGNF#žjî9§3“Ő›¨!uL´Úfʧ b´ŚEĆ 11r˙ľ›ďžŮ{Ďža~|×k-×r†ůůĚţ|źçyźďóŚ9|řđáČS4EÂK “„—@& /€L^™tš& ?;vě˝{÷jőőőQ[[«!ŁőŘŮŮ;věC‡i f555Q__ŐŐŐúBUUU1sć̨©©éóo±sçNŤ#ŕśsΉ™3gžÔm0ćđáÇ˝(ĄŁŁ#î¸ă #8Qűň—ż¬!Łőř­o}+žyćŤ#䪫®ŠK/˝4˙öŽ;âž{îŃ00BÎ9眸ůć›űĽ˙/ţâ/˘łłSÁYľ|yŮE.':ŹŤSVww·F€Tn¨aôëq˙ţýFĐöíŰ ŢîččĐ(0‚J­r\ÂČ:Ůç‚§bż1ö}ń±é5 ż{~źz„ă¬/š61jĆ˝OÁëěúE<űR˙7 ~ëCccú¤34ŚňŘô÷/¤Á`ü°}üŰŰżĐ!ĽdÎűľř÷łĎŃ0ĘDő٨ǏMźÓ'0ä^Úw°˘đrú¤3ô…±©:„áŃľď đňŹŤ™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “NÓĐ×–mąřÁóąŘ˝Ż3vż¶?""fLŻŤyłë˘aŢ, ŁŕŽuÍů˙_ľ¨q@ź{ «;îÚôDO-O«ŤĆKf ŮĎó‰ ębŢě: NX»öíŹ ßmpý<ÔňtěůigLţPM\Űp±†ÔVŚRżéuĹńč@Wwüŕů\lŮ–‹Ú;""bü¸ę¨ź^őÓk+š“ËX-™fÜy<´mË–±ł}Oź¶ť;».ć^`\Kö/!Ąů©íŃôµGb׾ýE;°µź©“&ĆĘ?]0$áÇÉf˶\ěŢ·ßšAٵo¬omÔ ´ů©íqű}=Řu+®’ź'ůzË5 /9ˇí~­3˙zŹxvĂŠ¨Ż›Üďç­om‹­ĎçbîuC~ÝOnHś(7†ł­8y%uëuĹń$ąľŻÝřDčę.:¦‹:ib,_Ô×Ěź3,cµ­Űrůđ3Káĺ뚣aîĚŠúáRź_IŰ޷⺲m¶ľµ-tý,nşúr/ZF„ÇĆáH'ąčÖâʦ»óÁĺŚéµ1÷‚şXľ¨1ć^PSÎŞ‰žĺʦ»cíĆÇ5Ü\Ůtw\ńĹ•±űµNŤÁ \3˙âÔ€éé}n˛ęrüŘ*88F‹n{`Tż˙®}űăŁźą­ PŕÄ“]ńĹ•qű}ÍůpmĘY5qÍü9EçdźżőobŃ­ âN4»ö폺?řóž¶yűĐŕúď[(hŰdľŰ0ofź¶˝â‹+㡖âăí+ľ¸2>ëßÄ®C^´Ś+/áČD,ąÓ4czm¬Z˛°čť¦‡ZžŽek6Ĺ[oŠek‰ńc«!JÚkŢěžAŐî×:ŁeËŽ övy$&€łł˝#nżŻ9nľ~tV˘ě~­łčż’ŕ2łÍ˝ .–__|ĹdóSŰăöuÍńB{G<Ôútě~m|˙ަ!ýyćή‹ĺ‘ť—ÇÚ÷=Ôňt›UOť41żçKDOxÚ´ć‘‚§ÔBµ<ľŰ™V«—.Ś;Ö5ÇúÖ¶üăĆUGăĽYńĄE Ež+ľ¸2"">óć”|T>ů‘t#ůťľŰ–?ąoĺź.(Řl:Ů ;ýsőt˛=§Űžh'ý1pI µlŮ-[vÄ®î˛uŰüÔöük©Ô†îCýşK^óI­ł3·'šľöHź:Hż˙ű÷6ĹÎÜž¸ăţ–‚›$óf×|NóSŰ{VÁĄ®5×Îż8V.YP˛mvíŰw®k‰ć-Ű úŤ—ĚŠĹ /s%Íť]‹».‹»6=1$ŹŹŻom‹ő­O<‚6uŇÄ{A]ź×pRé×lÓšM1a\u|dZmLW?x>ăÇUÇc+o,úý®lş;ŢęęŽ)gMŚu·?Ľ+éď/Ľ¬ /-U7óf×Ĺ5ó/.zŤé]Óµ<ť˙¦Nš‹^ZŃaËÖlĘźŰűç‚ÁH^ç+˙tAL™41šÖcńkćωŠ/«xÜĎđKƉ¸yă® ăŞăćëcŮšžëęúÖ§K^S‹˝†ë§×ĆňEŤEŻ©Ĺć5ĹĆ™wmz˘ ëoNŐßřłXm\ńĹ•%űľRcÍŢĆŹ­Š]ÝńÖŰ•Ľ«–,ŚÝŻuĚuËÍëz·QŇgn}>×g|}ÓŐ—¸Ďś0®:–/j(řűnŮ–‹»6=[·ĺ Ú'ů,^xiÉëS±ˇ’Ď[ądA4?µ˝`î<ůC©ßá% Z:ôkś7s@ĘwV-.y!Lď×Ňű"ľvcĎEîű÷,ës1}ˇ˝#¶>ź‹ŹL«ÍO {ţí÷5ÇżěŽkçω+nXŐg0(ôŘĘ :âÝŻuĆ–mą8|¸głćäw?¶*ŢzűPčꎇZźŽć-Ű‹ţlI§ü‰ J2“ďQěwJ˙|É>˝ŮôÎÜžXtŰEŰmg{Gělďć§¶Ç÷+XˉíÚůç÷Ľl~j{Ů}gÓŹČű¸ťą=Eë¨÷ëîŃű“Ľć.ý1Ţ>”Ż•t¤ßźŢs¨w-^qĂŞřцů‰]±kŰÖçsńŁ +úÔK©Ż›´góSŰăÚů— vŕćëŁeËöŘýZgܱ®9ç üÔÓ]ÝűM÷îÇvíŰß§?J×GşN#"îąAqǑߙŰÓçgڙۓú~ą˘óSŰóßăľ×ÔM©­b¶lËĹ–mąh޲=Ö­¸®ŕk¦ćć§¶ÔŢ®}ű+:ě ÝgŻ[qťŕ’!‘Ľ.·>ź‹;úOŽWW5ÝÝçc¶lËĹEźą-žÝ°˘O˝Ą_·ĺú›u+®ëÓ?—»>4?µ=¶nËĹňE %÷Ý۵o\Őtwɱří÷5SfLrsfüŘŞ_ăćÍŠekzŻő­mEĂË^ę»6ő=a{g{G\ŮtwŃqO±yM%ŻńôśŞÔ5»ÜřłŘX¬\ßW©ąłëb}k[ělďek6ĹŞ%ĺCĎbăćróşŢcÍR}ćÎöŽřü­k7>ާËő™şş jşÜ5&ý7xtĺŤ}n’”šŻ§?ďLĎĎť:řü­ë[źŽGWŢč3„<6ÎIí­Ô¤a ŻRĘô…pńÂË"÷·_‰wž»?^ßüőXµdAţ®×7¬*ąér\^3N<»aEĽľůë=Ç#Ë÷ďÚôD,şí8|řp¬Z˛ ˙1Éţ$‘żkŐŰÖçsńPëÓ1czm|˙ަx㮍wž»żĎĎ6TŹ]¬Z˛ ŕNÜ5óçÄ÷ďmŠďßŰ”ż“w «;®úłoäŰmů˘Ć‚vKVľílďČŻŕäŐxɬ|-$wÄKŐăúÖ¶üç›Ŕ$Żőńc«bŐ’ńúćŻÇ;ĎÝążýJÁë®Ř¤m¸5}í‘?¶*Ö­¸.rű•xvĂŠXĽđ˛Ł5Ótwܱ®9żéú뛿ĎnX GnÄěÚ·żĎ©ě[¶ĺňÁ)gŐÄş×üÎÉç>Ôúôďʼnc¸ęX—z¤o0ŹŹ7­y$L4Ě›ĎnXďÝŰńc«bńÂËbů˘Ć1˝¶ß•˝K‡2Ô’Gm—/jŚg7¬Üß~%?&Lú˘«šîŽĂ‡çűŁtq{Ż›hÍOmĎżn{×wňąéŻß۲5›Š^žÝ°"®™?'tuçĂŞb}:¸,57¦ĚŽt(U?}ŕ󱩓&ć÷ľ,X'ŻÉäužĽľźÚ3s ăžĄ«7ĺ_ăs/¨+ه-şí>űC§çZIźRj,–ÔAąľŻRéş[»ń‰řŕ'˙8Ýú@´Y=y]:ŘëÝgűý’ńuăŕ¤Ďś{A]Ütd|ű}Í×äošľN$mÝ´fSźŻÝű‘üýľoSĚ˝ .tu—<´÷ßoŐ’ůďźľ6¦ÇÜ/á˝đŇŃÓě†ÂÚŤGWK®Z˛ V/]źüLW7]}yţŔź]Ýq}™ ÚňEŤq˙-ź‹úşž}I/™Up‡lg{G<~oSÜtőĺůŹąéęËóáF©;aIhŃű`˘›®ľ<;˛št(7g®Ż›\đ}¦ś51ćÍ®‹yłëňw˘Ön|"˙ł>¶ňƸůúĆ‚v»ůúĆXwdĚÎöŽx¨ĺi/Ţ“\rpĎ–mą’ŻóôŠŤd°S8akÎÖ[µ¸`ë©“&ĆÍ×7ć…;Ű;F|ĂňÇÇă÷6ŵ ÇÔIŁľnr¬^ş0˝ÚŮŢ3¦×Ćsß’? ˛ľnrÜËçňÓŢwę“kÎř±Uńě÷ĵ üÎßYµ8?¨LÁ‚bćÍ®Ë÷7Éăă•Ú˛-—źt,^xY|gŐ₍—ĚŠgľ%C-Y]®Üv.•ŚĹ“ńnOßîFÝhKŽsąmÍ„±ýŻnK^çÉëaŢěş‚C’súýys{ňűs6Ě›Źł©lÖ; _¶fS~|•ŚóŇc±űoů\L9«¦ –Ęő}•š:ib<¶jqľLVţ§ewÇ?ůÇqѧoŤek6EË–íš×ë3‹ý~ßYµ¸ ŕ+5ŻŰµoAź™nç¤ýç^PßYµ¸ /M®é4íˇ–Ł[Ő$×äëΛ]Źł©ěŤĹôß/™?$ßż÷üˇÜ€^rŇ؆R˛ lĘY5%÷[IřS.x)v*rzUÉŚéµE;¬tç±űµÎ˘_»Ôž›óf×ĺ/ôĺV´ µtTęQ‘k.Î.’A-'Żt}”dĄ7~ď]+éU™×ĚźSrĎ­›®ľ<˙şKľŢHiĽdVŃOě‹í 5a\u~ĺBzuyúzłjÉÂ’Ź±¤o’¬ouŁ€Ňnľľ±`‚UééăIX0~lUÉý2'Ś«ÎߤXßÚVqÝxIOVnĺeňu{.;s{ň5’„2éşąůúƲustEKń0¤aŹ× .)ĄĆ¬éľćšů Ú“Źé]ź óf•Ýă˝\HőĐ‘ľągEÓÂ~Ż=ĄĆâ3Žě;WLOdLy"™‘Ú—±XXTęő0a\u~,u «»˘kSăÁR %Ay±ů^˛˘ďšůsŠö ={;ö¬Îź|dçP™7».Ú›ż‹^–ďłň}ŕ‘őIYl[˘ţěĚíÉ·˛Ŕ¦ż.5ľž{Ańpv׾ý±xáe=×2{n§Wń¦_ÉŤËróőRמĚ’ßĎ ’ˇ#Ľ„!’^éLŚJI˙űÖl±Á^ú}ŤóЇ|ă+ŘWŁÜ$(ąďÚ·żâ‰č±Ř™Ű“ď”ű»ŰšL&‹MJ9ą$z”šxěÚ·?§µŘŞËtÝőWŻI­čꑚčýzŻt°Ţźôď\®Ö¦NšXrĺ&ôî“óřxĄ'ž¦of•z°·†Tß^}ťü˙”łjň_·÷ ÍG&”SÎŞÉOŇuÓ0oVٶHęŞw(Zl"UN:¸\µdŕ’ }J‡µúš7_ßßYµ¸ěk÷Ç%jşu뎊®Ĺę1]ÓŤófUÔ·ďlďđ”Á }Í„^ˇ\݇ö5ÉţśĄćkĹĆZI_˛%u°LąźéÚ†‹ăą‡o‰ď¬Z<ä{&NW«—.Ś7ţçÚxvĂŠü#Ůişşăöűšă˘Oß: úHĎŃú;O˘!Uýµ_ďqjrŤ)čęŽ1cŠß$<î·O/2/X%ÜĎ8=ą¶–0pěᤰ˝ĐŢ‘|üX¤W9ö7ČK˙{±Ő‘•<ú0\Ôô# éD†Kú{´lŮž?±®ż6.v'—kćω­ĎçzÖéőzXźZąQtŁôÔŕŁÔꍂÁÓş‘«‰üĎUÁA•"Ô;„ąľźiĚ‘ß[]‡ĽĐ(+y|| §Ź'ˇ=?í,»ç\z´u[®ě©ÄéšHúö-ŰrůúO‚řąGoK«Űş-—ŻŁdwzBłűµý×[ýôÉѲeGɉ^%ŹCn}>W0ůŰú|{E§‘Ă`UŞO@_Sě5ýB®#ĽÝť?¸\@”_ýÜO˝ tŇăÄô)ČýŽ)Ű;*şľ0LăťÔ8l°Aňî}©yX?O­”ťV^&Żß=ŻuVĽoj±ůŢř äR_7ą ˝¶lËĹúÖ§óăčd_ĘR§­÷í·+?O"]ĂÇ2ŻŰú|.?ĆÝş-»_ë,ůdcúýý…ÂsgוÝ+ű®MO”}R1ů›»92t„—śôťĺ Gî¸čęĐť­rß_g4š§Ž g0:/´ďé3¨¨ýߪśě/™ËÖlŠ·Ţ>µ¶ĹęÔ 'L${Aövśl*]Qyŕm.ú7ÓÇÓřr{3‹ąÔĹ íÁEň˙ÉJ‰ąłë˘eËŽŘú|.®m¸¸ PIďw™žıŢ`›{AϤ)9uÖ ăoîÚřDÜžÚ[ş`ś<¶*¦LšXQPTz,]Uö߇ëúÂĐKŹĂűšŘóÓÎükk¤ćG˝÷k­ÄÖŚ?Ń’ě_ą|Qc\ŮtwţF`Ą}Ú@‚ş‚кȼ®żŔůŽuͱvă%Ż1őÓ'÷ Ó!r×~űůöĹ;„—śÔ晼$ťÉ@&ýĚmů Ćş[®Ó4嬣–UK¬ü,g Ź0qâI˛ZßÚ­[wÄęĄ=űÓ¤÷ęď‘đd 5Ň7ҡýh´ŰŁ+o¬p0_ĺ…FEŻ©u·|.żeŃmÄłßRňc×ĚźSQŤöžäôçÚůsň+A“kA2ÉHúů¤˙OçJ+?¶Ş`‚6~“›c .×­¸./™}úÖŘýZg,şí›:ä˛néęMűŘÍ˝ .ćή‹)gŐÄ”I=|lŮ–«xĺuń/ŰňřŇ0ofţ†Ň@Çeۦ૾ÁôaéˆŇC-Odžď¶ĹřqŐńXcŔ©“&ĆÍGĚžŐŤ•ôků»ĄÄ¶Ë_\™Ž“ rî캨ź^›ßú塖§ű„—éqíc|şč±•7fbőěÉDxÉIŢQΊekzND[˙ݶŠ;Ľt8ň‘içôąčö÷x[ú.ÝHwbýí™UF"¸H_ô“ë R7-Ľ,Ö·¶Ĺ®}űó«“’Cf¦śUSňő”®»ţŽ ńŔ(<Ž=eRMÄó=BuĆP+öřxĄź7Ôęë&Ç”łjb÷kť«\Ňű“%+0“Io˛ďq@ň(x¦”›Ą1¬ąÔĺ÷ Lá]ÝzlFÓ®}ű Ě{üަ•>ľŰ_¸9~lµľî8Ň8oVţZ»văýn?’–>X¦Ô>’/´ď)űzHćG•ě›l;2ńÔ@¶`hٲ=ĆŹ«ŽÓjŹůćŐî×:óóĎJĂát[ fÜşkßţ˛O7Ąkx OAĄOđľfţś’‡P{T?Ŕöw )¶J6=?Î5f¤9°‡“ZúĐŹô…°?w¦:Éd/Şô!?č' ¬ôŕŚáRîĐ‘¤ ŇT:9+uęsŮIfŞclîçóożŻ9®lş{P§ßqbŞŻ›ś\%Żźdđ›>‘ĽO@Ş»ôˇĺjbüŘŞ!¤ EČ1P¤Ňßůʦ»cŮšMťş ‰Ţ§Ź'Źń•z-ö·‰}óSŰăŠ/®Ś;Ö5řńϤƛ·lĎOJÓ5śIźśü,˝'ľ}T?u“|Ť†~)¨ÔĽŮuqÍ‘GŘ·lË9±”ăBú˝›Ëś6^ęĆ`RsÉ ĽR’}z×LĄcʵĎŹ)íI7ú/™UĐTz@bĎŤmůąK©…(ĺńMĎ˙*Y…›>ś­Ük'Y]śţ}*íSšźÚ˙iŮÝqůVÉŁćéĐmíĆ'*úśô „JW';¤¨żž1Ŕ•ĎéżW©ŕ˛ÜX;飋]Cşş‹.řČüaŃ­Ģ[0oBÂKNz«–,Č˙˙UMw÷`.şőŁ˙_PW0PJز-Wňëčę.¸#=űíÝqKńŽ=·'üô~ ˘żÓ¶lË•$ÁnďN~¸ęüäl}k[ÉÁĘÎÜž¸c]s4?µ=väöxá’—ěOײeG4?µ=˙+÷(OúĆņﶕ F¶lËĺkb Ź"˝đRńLwíŰź˙š#=1Hę°ÜdmíĆÇŁů©ín!}=Oź>^Ş®’kţ®}űËrM_{$¶lËĹ׿ýř€Wť$!äžoĎ÷[˝o>$“;ďo‰]ÝEřJ×Í]›ž([7•žr<°1ĘÂTݶŘĂŹăŕ:pô©ťRŹTöŚ…ź,úo7-eýwŹ<9 ]®m¸¸ ľ«ź1Ţ®îhúÚ#ţćŮ"Ý·óPËÓůNď3=PĄľ~zNŰwŢpq~ R*X\Tâ`ËŢó‡RŻ©-ŰrńPëÓńPëÓúí!$Ľä¤W_79`čęŽ+ľ¸2Ýú@´l9‚ěĚí‰ő­mQ÷y$uüŘŞ>{]Ţ|}c~’qUÓÝ}îČěĚíÉ?–LJFCóSŰcŃ­\đ·lËĹ7¬Ę˙n˝W­%wÜvíŰßçs[¶жó‘}‡Z·îČź¦š|Ť›^–o·+nXŐgµ×–mą¸ęĎľqt`ëäU &*§&#ÍůÉK“‘ĺGIJÉě=ÉIŠÇŹ­Šĺ‹*{„©ń’™G~kéSgźşaU>|xT&ÉcX;Ű;âŠ/®ě3čşkăů­4fLŻÍ?ľ •Jďo•Ü[¶ć‘X¶fSź|®lş;?ŕżéęËKÖó†Ö¶Řú|®Ďk9™€čę.yzqúŃńb˙žîŰÓuÓ{"2śu3a\uÜdB~ «;®/1ˇ‚¬HŻpľłČŤ˛d,ś®Łô*ĚúşÉůkČC­OÇ_\-[¶Ç®}űcë󹸲éî˛7×–/*‹÷ivíŰ_0/÷”#߬[q]ţzwŃgnËĎÉŇZ¶lŹ+›îŽ+›îÎ˙×­¸®ß§cÝö@A_‘Ěű’ńßÍ×7Vd§ű°»6=ѧ;ĐŐ]°ŘeńÂË ľnzüŮ»’±l2‡L÷éíĽJő}ĺ¤çťK×lŠ+ľ¸2îÚřD~N–Ôز5›˘îţ<˙s•[ÝŘräé†ô*Ĺäű$µV¬ĎLÂÁ)gŐTĽohď9iĎ5¦ď‚śdN[*ŘlĽdVţ:•<Ő·őůÜ‘ĹŰă˘OßZvUeÁßď†U}>v˶ܠćôĎž— Ă’ÉGr§¤”)gŐÄc+oěłjr¸ęxüަ¸üČ čʦ»c꤉1嬚‚“L+íd‡ËŚéµůß1é’źmüŘŞ˘ű-_řáýů IDATÔÍOm?r˛óŃĎMŢŞ% ňmا͎췷kßţ¸ü =´˙ަ7»îH€Ľ0ßů/şíXtŰ1ov]ě~­ł ÓÍv#›’Ő»ë[ŰŽĘQÁʧdĽč¶ňˇDRŻé×Ýř±UńŘŞĹŻ’ľfţĹńPk[ĽĐŢ‘Ż•©“&ĽŽ[yc~ô‘ľÖílďČ·ŐEźą-˙;ďl?şRtüŘŞ¸…ČśôéăĄÜżâş¸˛éîŘýZg¬ÝřD¬ÝřDźţ¨§žćôŮűlŢěşüžcéţ¨÷!AÉI Ň»_ëV–şnô®›ş?řó¨?˛fşnfLŻ–şiĽdVţwIwŹ¬ŞŻ›\đzMę%™ě'ő˝|Qcţ†ă®}…׊ŐKĆ[owÇúÖ¶˘O3Í^s/¨+8(1uŇÄxlŐâ¸rŮ]q «;–®ŮK×lŠyłëŠŽĹGň€úwmĂĹ1a\u|ţÖż)¸Ć—2~lUÜËçúý;&ăÄ‹>s[Éľf ×Ődľ÷B{GA6a\uÁëőšůsňJ–¦ű”ôç6Ě›YĐ˙Ő×MîÓ÷MWŻoţzĹ}Éş×Ų5›â­·•}R0=˙+vC.©ńťíůy]îożS'MŚĆKfĺç„ĺúĚd>=Đ•Ď×6\k7=‘go}>—_홌ß{ĎKwďŰ‘ęó[ucţď×üÔö>dNŰ{7ý÷KćűĆUGýôÚ>óÖűoůܨ|KÉ×ęë&ÇłßRđX\ú‘ę†y3ăŮ +FuEÓă÷6ĺď8ílďČ˙l×ĚźŹßŰTôw›:ibÁçĄ;ýąÔĹă÷6•=ŐqŐ’…źŰÓ!ě)čż·©`ݰôÁH3¦×Ć÷ďm˛ŚŇ‹ ę´ŇÉȵ Ç÷SŻë¤^“Ď5óçÄłß2 Ŕ<ą‰qMę1ŢŻăŃś,ÝËçbÝŠëň˝äw>ú¸ýś˛×8¨¤Ę=ľ—î+§Vާű٤ż˝żÄ׹˙–Ďő9x«ĎD-F[U9uŇÄ‚˝¶ĘŐĺý·|.[ycţăw¶wäë&YYQŞ˙Şşőř8Ç‹űoů\~őä®î|H˛ł˝#ć^Pßż·)nľľ±ěŢsIÍ5Ě›™í'×…ţš7»®`,žŚ)“ëÄŚéµńŘĘŤ)3Şń’YŃŢüŐXĽđ˛’›N9«&V-YíÍ_­hLuÍü‹cÝŠëbüŘŞ>}ÍŞ% Jö5ýŤőŇ+}“~!â芻R_·÷řłŘç~gŐâ˘s¸t›čęĐž­×6\íÍ_ŤkćĎ)ٶăÇVĺ童N*6ŻKß°ĽéęËăű÷6•ě3/ĽěĆšé9c2ŽMĆďÉ8ö¦«/Ď˙ŽľŰÖçď÷ÜĂ·ÄŞ% ~ŹÓkcŐ’ńÜĂ·Ä„±ŐĎ’ë\ţIŽ#×97G†ÖĂŁńěÇŤ\.kÖ¬‰ßúĐŘř“†˙í¤ůÝwćöÄ·t’ąsŇ{čŃÚWçöűšów¸ßyîţüĹ>éhňłĄ?o0í˛e[®ěçőľ3>ض?žÜ´îŮü˙ó›ßTŹŁ ÷ën(Vř¦żć„±U™ ÓµÔňtüŕůö2©¦ě^q?~ioDDŚWu\ŹĹůkîpĽŽeŢ8ßo¨ÚäX绕Ôřpö™ąĆ,]˝)ĆŚ‰1­ôŐéÓĆçš·f†đ€Q÷ÖŰÝů=ćÎ,®¬ÝřxţńĚJö¸†ÁHďΉ%9ô«ń’YEWI¦O‘ź+Ě á%Ł®aެ¸ýľćxëíCqĹ «bŐ’…1eRMLţPMěůig¬oiˇ›3¦×Ú·’!µkßţŘóÓÎx««;żObýt[śH/™•?ě룟ą-–/jŚÓΉńăŞű\cćÍ´Ş2C„—Śş©“&ĆŞ% cŮšMq «;>ëßý¸äĐJ[·ĺbŃmĽď¦«/Ó0'ôiá»öí/yŤi7sŔ91Ľ„—p’rVMźSá K®m¸8ćή‹µź­Ďçâ…ÔÉĐ3¦×FăĽYV\2,Ţz»»`î´|QŁŁOŕkĚëšcg{Gţ3czmÔąĆř»gŹđN˘‹´Y7uŇÄX˝tˇ†`DÝtőĺqÓŐ—k“äceĺńĺMd‘đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@&ť¦ €“É »ţ-^}ó†(ŁfÜűâüɢúý¦Ś¨GőŁË+ź!ŐýλńŐ˙öbĽůö;ŁŚŞ÷ť‹.źÓ&ťˇ1PŹęőzdííěŽuŹż¤!*đ~|J\rţ5ęQ=¨ňŘ8CęŐÎnł úĹŻâ‡íű5ęQ=˘QŹŚôëěťw5B…vĽŇ©PŹęFť•—0J:»~®@=ę‘QôUťc=GÚ›‡"žŰűž†@=ŞGČ á%Ăć·ÎڏiÎé"ĺuľw=ó+ zTʍG ˇÉ€3«"~żîT Ń«źŰ«PŹę˛Ăm “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix dŇiš€JýüżŠ˙őZWŮŹyőÍn UˇC´'¨GőHv˝úfwŚ3¦âŹE=CZ/ť]żÔkE=ŞG†Ö±ĽFÔŁz¤üśŹÂKбŠżnýg 1TíŮŮ­=QŹę‘ăŘ˙űĚŤ EĎľ´?ž}iż†PŹŚ2ŻőĂÍcăRUUĄ ĂőX]]­a`ŐÖÖĽ]SSŁQ`ťyć™Ć¬Ŕ¨łň’˛ęęęⓟüdtttTôńÝÝÝńꫯj¸ 'Ĺ˝äśÜŞ««cÎś9ęQ=’ázlhhęęęŘż`«˝ÔŁzdŕ&Nś—]vYÁűfÎś÷…ęQ=2qţŮE˙íÎŤĎFDÄg>ů;1ĺg¨G†ÝĚ™3cćĚ™ÇüuÔŁz„J/9î,üňßĹî#ťĎ?Ż»¦l‡đÂ+űăß}éôű5ëĎ›76ÔÇg.ýť>˙ö©żčůü˙ďÎ˙#ć~älPŹ Ő#”´űőń…żŢśűçź]¶FvżŃwn|®ßŻ{çĆç˘ţĽ‰qď˛ĎÍ…äó?qţŮC–€zTŹÂKŽ+-?|9?1‹Ř°ů'ýŢ=;ÚQM*Ú9íyŁ+vľĽ?®˙ë'cçË˙+}BCző¨apőřŁW""bĆąă…Wödž'RqŔ˙Ą«/,úţÝŻwņÍ?‰ť/_ţ»xćż,ŇĎ Ő#d™đ’ăĘú'’ďHîÜř\lx˛ňÉŮ÷ľü‹ľ˙ŔŰďÄźÝ˙÷±aóOâî–˘ácçYAęÔŁz„Aą»ygDDüŐç/ţÝ—ţÇ‘› V´«\ÝŢŘ8#>ţ'ŹĆî7ş˘ĺG/Ç5—ţ®Ćő'§ŤsÜ8đö;ŃzäÎŮŤ ő1ůăz:‹ľ|L_wÂŘ÷Ç}riLţŔ¸ŘđäO46¨GPŹęlëŹ_ŤÝotĹ䌋ą9;ćôÜžúŮ|ěőSŢoćWJ·üđŤ ęNÂKŽI'3˙ŁçĆ„±ďʆ#ťĎú!šLÍ=˛áňî7jlPŹ Ő# ĽŹÔ]R7 ;·ŕýÇjĆą#"â­ź˝Ł±A=ÂICxÉq#Yňźt:‹ë#"˘őGŻÄî׏}BeRęÔŁz„Á*XÝ8٧.?z^Ś˙ő÷ Éjčżq_DDLů€C@@=ÂÉCxÉqaçË˙»ßčŠńżţľhřčy=ťÄĎČßíşëČÄmĐł×ĆŹ_錰ź¨GPŹę¬ĺG/Çź˝“?0.úpĎjčžÚ<ÖŐĐëźüçŘůňţřÄG&ipPŹpŇp`Ç…»›_ž»eé“Ünlś_řëÍńđćÜ NA=đö;ń÷/ľwn|.üěť˙ëď‹Ď|ňw48¨GPŹęä-=ő¬~N×ă†Í?ÉŻ†.wPČťź-úţÖ˝’Jfś;Ńá  á¤"Ľ$óŇKţ?siáÄ©áŁçĹźýúßÇź˝ëźüç˛GuăÝý~ŻżúüďUtň¨GőęHě~ý`>ĚHöťMÔź÷›1ůăbĎ]qWóβ7îÜř\Ůďó‰ó'Ĺ#ńď58¨G8©/ÉĽô’˙ŢŹ¬%K˙7lţI<Ľ97¨»^3Îť3Îť_şúB3PŹ Ő# X˛EĂ”Ś+z’ńÔž{Ţčęw5ô—®ľ°čű'`\Ě=˙lµęNJÂK2ďáÍąŘóFWŮŐ![üjŮĄ˙ÝÍ7jLPŹ Ő# [=î~Ł«ěj­ţVCéę‹4&¨G á%™¶űő±őÇŻFDϲüR^xeĽőł_Äťź‹űţäR ęÔŁz„ŃňĂ—ó{Ă–{„ôÎŤĎĆţqß WCęNVÂK2-Yň?ă܉ń˝/˙DzťĎťź‹Ö˝Ţ~§ŕĐ@=‚zTŹ0\’S‹>z^ź-ŇŢúY}üŕ÷ő»PŹ@ˇS4YVę ‚Ţ’PüěťhůŃËÔ#¨GőĂn÷ëóőŘđ±sË~lĂÇ΋ńżţľč˙ @=G /ɬ–ľ»ßč*|•2ĺgÄü#'É}ŁĺŤęÔŁz„áŻÇ#AÉ䌋†Źť×ďÇ'5›¬†Ô#Đ?á%™•,ůź˙Ńs+zĚ-ą»¶óĺý±óĺŐ€ A=ŞGVwŮ¡áŁçVôńÉji«ˇA=•łç%™tŕíw*^ňź¸ćŇߍ?»˙ďă­źý"în~ÁÁ A=ŞGÖzLÂŹţVA'ęĎűÍř«Ď˙^ĽőłŁ«Ľ¦|`\|éę ŹégI>ĘĆůĂ Ő#śp„—dŇ„±ďŹîćüyŻm\TđöÜŹś=¨Ż“v¬źęQ=‚z„łżtőEţĽĹŤőoOůŕú:iÇúů Ő#d™ÇĆ€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™tš&`¸üۡď˝ôž†HŮ˙3ízTʍGő¨ÉŠy3âOZ©!@=&ĽdŘĽy(âďrżŇ ő‡&Nś¨@=¨óŘ8CŞşşZ#ThÖ¬Yő¨QʍGFXmmmśy晢UUUńńŹ\C Ő#Śş1‡>¬J;v쎎ŽLýL/ľřb:t(ęëëăôÓOőꧦ¦&fÎśi2ËIYŹ‰ŽŽŽŘ»woLź>=jjjÔ#ęq„üň—żŚ_|1"">üág˘_TŹĐŁ»»;|đÁ¨©©‰«®şJŔ(×bUUU\uŐUú%eÂKNxßúÖ·â™gž‰sÎ9'–.]ŞóQÖÖÖ>ř`DôÜI^ştiÔÖÖj·ß~{ěÝ»7""fÎś7ÜpFڏçž{bÇŽńÉO~2,X Q@-ÂIĎcăśĐŇÁeDÄŢ˝{cőęŐŃÝÝ­q`”tttÄŁŹ>šűСC±zőęĚ®…­_L‚ËžŐ ßúÖ·4 dŔ“O>™K""6oŢ\đ602věŘѧsąś†Q$Ľä„ž ĄË„FOGGG¬^˝::Tđţ$ŔěěěÔH0Âýâ3Ď<Ź<ň‚QîÓ7öŇu«o„‘ÓÝÝ]ô¦Ţ>hţŁHxÉI1A»đśSâ?ןš[€ Ł31K—żvZÄ˙ýżźżvZĎż:t(ľńŤo¨KO>ůdź~ńÂsŽ7oŢmmm FAwwwÜsĎ=ů·'ť1&~ŁŞ°oFĆ>«ţFUäÇ©ťťťŃŇҢ`”śú—ů—©8‘ .?=óÔ8{ü8łjLüřőžm^</ľřb\xá…™:¬Nä‰Ů›oľ=Á›ćśż]3&~÷§DŰî÷Ô% “¶¶¶řö·żÝ§_śńˇSbď[‡ăŤźőĽçÎťQSSc˙Ya÷߼ňĘ+©ţńôńˇÂľ±»»;Î?˙|ŤĂÜ_~ď{ßËżýů OŤßýŔ©±}_O-ľňĘ+Q[[úЇ4Ś0+/9ˇ” .ŐZ #­»»;VŻ^ťßg/ .Ď>cLDDś}Ć>u™^ŰD,9+"â·ÎŚ‚~ńÓ3OŤIGj1˘gʼnýgaäôŢçňÓ3O‹3«úöŤVGĂđęěě,ŘşaîÔ1ńŰ5§ÄG>4&Î˙ŕ‚ů¦ą#Ś<á%'Śţ‚Ë„FV:¸ŚřĂźš.KŐe{{»CDŕőŢCoŇcâóžVđ1U§Ź‰›>^`:@ FF.—+¨ŃOMë JŇ}cz{‡G}TmÂ0Î%ÓŹ‹˙~]፾ô6G雂ŔČđŘ8'LgSIp™đ9Ś\mţÓ?ýSţí˙\j\T{JÉş¬:}Lüä_{ęrďŢ˝ŃŮŮ3gÎÔ0@˝÷ťtFOHYuú>{ú©câ‚IcâéÝďĹ»ďEĽűî»ńÜsĎŇ?üá?~ĽĆ„aĐÝÝ_ýęWăÝwߍ#«˘gťÖçă¦Őډţ×ĂŃőNOmľüňËĆ«0Äž|ňÉŘşukţíĎ_xj|pě)ýäÔßĎîíŁţô§?őř8Ś0+/9î 4¸LX #[›˙ńĂĄËÄĽs W™<óĚ3ńä“OjL€d«†ôáXźžY<¸LTť>&nšSx€–Gă`řÜsĎ=5Ú{Utş6Ó«ľöîÝ[ôTr`pzÄó©i§Äo×ôŻţvÍ)1wŞÇÇa´XyÉqm°Áe L---±yóć‚ÚlřÝĘjsƇN‰Îî}{ęňĹ_tT( .{ŽŐ{«†bÎxĎZĎżÚłSźĂ×G¦ÇŻ_ühá*ŻbµyĆűŹŽW÷îÝ«_„!rĎ=÷Ä믿=O)|vöi%?vęoډç÷˝?·g%ô믿^xˇF„ Ľä¸u¬ÁeB€ C«­­-{ě±cŞÍ:%^Ú˙^ü[Ϣ§ CzŽń§żWYp™č’óôSÇÄ9ă Ż®®ŽóÎ;OcÂ0óŘ8ÇĄˇ .!‡ˇŃűdăó?8fеůů Os 2 ŔŁŹ>Z\ţçúS\–ę CŁ»»;îąçžüŰżufᡠýůĂź’ďmíÇ¦ŁŁ#Z[[óojÚ)ő™˝oii‰ÎÎN ĂĚĘKŽ;C\&¬Ŕ„c®]»6˙vĎÉƧĆé§ŽÔ×KI*‡@…}cąĂ±*íŽř—7 Cĺë_˙zţńÔ_;-bÉďť6 >294$˝µĂÁŐ% ˛<ł–{\Ľ·©żQxVGG‡'` /9®'gC\¦'kL¸ädăäÔÔr'Äé§öěÁ÷٧ C)Ź<ňHüŕ?Čż=wę¸lÚ±÷ŤÓ&î?kź=ĽîsYŠý/ahęńţá""Ůúô¨ŔT/ą‘жű˝č9ôÇăă0Ľ<6Îqc¸Ë„GČa`’ŕ2}jęP—‰3«Â)ČPB[[[źĂ±ţđüÓ†ěëzć©qá9G‡‹>ř`´µµix€\.×çńÔb§d¬š®ËG}Ô¶*0€qkşżîÔ8łjŕ_çě3Ćħ¦­CŹŹĂđ^r\©ŕ2=(`B˙ş»»ă[ßúVap9ç´! .ÓÄt€©&ah÷-çÓ3Oíł˙l.—ó€ űÉcŮç˛ű_ÂŕÇ­ézśwîŕ#‘߯;µ żńŤohd&ÂK2o¤Ë„ú¦O6N‚ËÁR‰łĎřáľ5 'ŁŽŽŽ‚ŕrŇc†µoĽéă…ć=÷ÜcĄTŕž{î)¸Á÷ů ‡fetŐé=5źľ©÷裏jp(ŁĄĄĄ`ÜúéYÇľ-XşďÝ»wo´´´hhöĽ$ÓF+¸LŘJűĘWľRp˛ń˙uÁińŰ5cFĽ&"ÂÉf¸ö-'9@ëů}ďĹĎßµ˙,Tb¨öą,Ĺţ—Pą\.ßţö·óo7üî©ń»ż9fHę0}Ŕ]{{{Ěś9SßClĚáÇk˛¨wpYΤq6ďŘÂÄ˙öŹďĆÖ]••Ă9çśK—.Ťęęj(ÔgűÉƵĺ•÷âżżř«üŰ˙řÇăłźý¬? 'ĽÎÎθýöŰ VrÝréiĂ\¦˝zđp¬m{7~Ţ“›FMMM,_ľ\˝ärąXłfMţíOM;eH/ćῊçööRUUK—.`BJwwwÜqÇů=)Ď˙ŕ![ťXŰöËř—7ŹÎoľůf Cč4M@VU\FDěëŠř_ťďÓćç•—=w¶;::˘®®ÎŠ“NďŕrҸűyÄ÷^zŻĎÇž˙Á1Çôů«Ç?ľ^Ľ6í´Č(Ď<óLÔÔÔDCC?'ôäëßřF>¸Śč©˙ç{ďöůŘ3«"Ď9}P‡$µ÷_ź{7ŢtĘ1­Ś.öT™U=7#zžH>}zĚ™3Ç…“VďUĐçpL|äCĂ·ÍĘĽsO‰^űUţńń{îąÇÖ*0D„—TlćĚ™qĂ 7ô{'ąµµuHżď™gžŮזּ¶V€Â ďŹţčŹĘţ{KKË×ßüůó=účěěěS?ţéářńOŐçc˙.÷«řĘżř~íÉS •xôŃGŁ®®.jjjüq8)=řŕ[­ÔTŹéłÍѤq=7ăÇ?}/Ă ?f­“?Ľ§łł3ZZZbÁ‚ţpŚ„— ČĚ™3ű=Ý{¨ĂűYp"ůů wUČS éĐN6±cÇŽ‚÷myĄřv*+.řJčWŽ˙úżę÷ă6oŢsćĚq#á%Ŕ1Ş©©é÷鄡Ľą×ßS Ó§O·ę*đf÷{qfŐŔV_ľđÚ{Fđ`ô÷tÂP†—žJ€ŇjkkËŢLhkk‹7ß|sHľWą˝ź§OźnŐ% á%pB)w3!—Ë YxYWWçF łS4EÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Iş… IDATx d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%I§i˛Ş­­-žy晲łwďŢaůŢ«WŻ.űď555ńŮĎ~Ö ` /ɬ|pÔľw{{{żs饗Fmm­?Ŕ0ńŘ8™5mÚ´Š?ö×N‹8űŚ1Çôý~ëĚĘ?¶ŞŞ*jjjü‘†‘•—dÖýŃĹęŐ«  żđśS⣵}CĘß®9öţ¦9§Ç«ǡ_.x˙«#ţű‹żĘż]UUK—.Ťęęj$€adĺ%™U]]K—.ŤsÎ9'˙ľçöľov÷„•é˙†ĘŮgŚ)řşU§Ź‰żËő .=.0ü„—dZ±óŰ;ĎvĽ7ěßűŐ‡cmŰ»ńów{Ţ\Ś,á%™7¦ŕ`ô /9.Śd€)¸Čá%ÇŤ‘0˙˙öî>*Ş;Ď÷ýĹ đ˘@´FŇŠÝJeFs[â4šŽćÄMLśŐ® dťîĚ1ęŠFWǤŐ)t:D Đ´Ą€$j¸ŔŢTQUĘĂŢĹűµÖ¬–]»j˙öo>~żżÁ%€u^ÂVz3Ŕ$¸°ÂKŘNo—ÖCx [ęÉ“ŕŔš/a[=`\Xá%lífL‚Kk#Ľ„íÝH€Ip `}„—×`\Řá%"FwL‚Kű ĽDDé*Ŕ$¸°ÂKDśp&Á%€˝^""… 0 .ě…đ+T€Ip‰`ďŢ˝*((PUU“°µh¦‘Ě0wîÜ©ŻľúJK–,!¸DDs»Ý*))‘$µ¶¶ę‰'ž`R¶Ex‰çp8”——ÇD ˘577kçÎťfpiŔÎ/Ŕćš››•źźŻęęj&Q,^ž–}Ř‘Ăň•—_}őO :1‚K˙Üůqć`%8¤Ła"„­ÖĽL?yJq>O €%Ő¤¤¨îö”^żNçŕňÖhi©+ZS’ŁT^˙1ذläČ‘#ňx<ć×·FK+ł˘5fX“8„—`×\öŐÚ›ôÂK°˝{÷jĎž=ć×·‹ŇĘ3$|Ĺeuuµňóó™<€m^€ĹąÝn•””_w\&8IşĘ¤"á%XXçŕňIQZę _q™#ÝwÇ ËlÜó9ó7Śđ,¨ąąYůůůŞ®®6ŹM;HK]×çO,‰Đ`°–› .$T^€…TUUÉív—‹ľ?XŮăů·&ŔŔCx QUUĄüü|µ´´Ç~ś9Xw9 .á%X@çŕňÖhéˇď\6ÂKčgGŽŃÎť;‚Ë•YŃ3,ŠÉ h„—ĐŹNž<)ŹÇc~Mp @ú ůoĚ#IwÜEp @;*/ Í™3GUUU*))‘$ť¨mŐoŽ]ŃŹ3+f!fOąxń˘ŢxăŤ.Ď1b„˛łłĺrązäš•••fUísĎ=gw»Ý:{ö¬rss•šš*I*..ÖˇC‡4nÜ8ĺĺĺńŔ á%ô3#¬ň0·6]ŃʬhĚâőzőüóĎwűyĽőÖ[=vÍĚĚĚ€đň±Ç“$ĺććšÇŠŠŠôĆoś ĽKČËËSbb˘öîÝ+Iú˛QÚz„ł§K’233µeË–€×*++Í˙óx|¸YuénOU}@¤ Ľ‹X°`ÍVă/Ą^aźŕú‡‰ťĄ¦¦ę…^ÇăąéđŇ*ýÉp!Ąq. ö€…dee´}Ą­óÜĄV&ç&”––JR—ÁĄ˙ë•••7}ÍC‡I ^^k€6„—`1YYYZż~˝bbb$u'j 0o„Ń.]»-űâĹ‹a_óx<şóÎ;Ą¨¨(Ť?^Ź=öXČ÷!eçk!Ş˙±˘˘"Íž=[«V­ řŚŮłgkáÂ…’Ú*3-Zd^{öěŮ×č|żŹ=öĆŹoŽó…^Ô¶YĐěŮłĺv»ůĆ` ´Ť€9ťN­YłFůůůjiiŃ×WdîB~—łëw:w©U˙Yg˙ ó«¦ďzäs‰ˇI’|đAóŘĹ‹ÂÂěělŤ1B^ŻWn·[ĹĹĹúôÓO5bÄ kfgg‡‹ĺeQQ‘Š‹‹Î-..6Ź˝ńĆZµj•†®ěělUVVšaf¨ëΞ=[/^Ô¸qăĚóźţyUVVęÂ… *..fc ¶Ax e;věPCC$éťŇ«’6ŔląÜŞýă&ĎO¸ ±3·Űm®7jTŚĘƢ˘"ł-<ś'ź|R[¶l1ż~ăŤ7$) ô—ššŞáÇëoű[@dW›ő\O5ć“O>˛JŇřüÔÔTósŠ‹‹5|řđ ŕŇŕrątöěYŞ.Ř á%Ř€`îÜąS%%%’ÚĚŻŻ´­3$*č=‰‰‰Z°`Á€ž7Ż×kn¨®m|Ö¬YJMM5˙×˙˝•••>|x@yg.—+ ˘Ó$‡đy]µ‡‡«Ć DŻůľĘ.\¸đšU•ětŔN/Ŕ&‡ŮÎl'j[µµéŠVfE‡ 0:˙ Ď»+\›wgť« C’/^ ąăy¨křWcú‡źťďéZUťťmćT^°“ALŘK^^žîąçóë/Ą­G®¨ĺr+“ÓIwB˝pŚŠÍ®*CUY†ZďŇ8–™™yÍÍz®š†ú|C¸÷\Ľx±Ë÷€U^€ -Y˛Dąąąć×_6J˙úÇ+:w‰ąńםb†bě0îßVn’ת°ôŻĆ t†s¨ő4;_§3cĎĚĚLľ)Ř á%ŘTVVV@€ŮĐ"íţËU&ĆŹôÝHxi„gĎž `ËăńH’ŮÎ/u§q˙jLˇÂOˇŞI;oÔů|că!ZĆŘ á%ŘXVV–žzę)ĹÄÄ0ť!źtcˇÝÂ… 5nÜ8UVVjőęŐŻkѢE’¤çž{Î ݵ{řŤVcv~Oçő0Ťđ´¸¸XŹ=ö*++UYY©^xAłgĎ6ĂWÂKvCx 67qâD­Ył†ł“pAâő(**ŇđáĂĺv»5~üxÍž=[wŢy§>÷Üszţůç®éVVVšááµv7Ţ®˝;Üž©©©zë­·$ÉëřńăőüóĎTç˛Ţ%»!Ľ€ŕt:µfÍ%$$0~˛łłÖŁĽ^.—K^ŻWO>ů¤ącykk«rssőé§ź—á®yńâEegg„ţçúŠĆą]Ť9Üëyyyúâ‹/ôÜsĎ);;[O>ů¤ţđ‡?´´Sy Ŕn˘™ N§S6lP~~ľęëë•••5 çcŐŞUZµjŐMNjjŞąá͵„ 3].WČŤtŠŠŠ‚Žĺĺ儍ݹ'˙]ŃSSSĆ`|ýŕňCŔv¨Ľ€âp8´aĂ-[¶L%%%:rä“áĽ^ŻFŽ©;ďĽ3äëƦB7S ý…ĘK0GŽ1«S§NÉĺrÉáp01ĘظÇŘUܨ°,--Ő–-[TYY©ĚĚĚ.+:ŔިĽ€â\ŞŞŞ–ššŞçž{NR[[ą±YĎÂ… U\\¬ĚĚĚmë`T^@„Řąs§<ČD @Ď?˙Ľňňňäv»UYY©ĘĘJÍš5K.—‹vq¶Fx Ŕív«¤¤„‰ŔBmÖvGx 6ÖÜÜ,ŹÇ#Ż×k›>vÎ]jŐ——Z™ €­±ć%ŘTssłňóó‚ËĄ®ÁЉ&¸Ř•—`CőőőÚ±c‡Ş««Íc÷Ý1Hó'frđl¦ŞŞJůůůjii1Źý8s°îrRL,„—`#7\ľţúëLŔ–/Ŕ&Ž9˘ť;wšÁĺ­ŃŇʬhŤĹä⸅ż`ô€ 9rDʧŰÁĺÔ”Áş•ž¬Ű‡EiJ2˙™űăW[°¸˝{÷jĎž=ć×·‹ŇO¦G+!&ü{˛ÇRöx‚ €˝^€…ąÝn•””_ß>,J+g VĚćů/Ŕ‚š››µsç΀ŕňIQZę¬!¬c/Ŕbš››•źźŻęęjóŘô±´Ô5É (,Bp @*/Ŕ"ŞŞŞTPP úúzóŘŹ3ë.'˙Î/ŔŞŞŞ”źźŻ––óÁ%` ă·b°Ż×\.ú>Á%üf ŕrąc~˝ű/WőIŐwL `@#Ľ p:ťZłfM@€ůN)&``#Ľ‹p:ťzĺ•W4věXóŘ;ĄWµű/W™Ŕ€Dx âp8´fÍš€óĐßémď&0ŕ^€ĹćŚ3ĚcG«[ő›cWÔrą•  „—`A‡Cyyyć‰ÚVm=B€ 8/ŔÂňňňtĎ=÷_Ů(LŔ€ÍŘCBzş†ÄĆvëÜşŇŇ^Çĺ¦&5”—_óřĐřxŤ0A’tńĚ}ÓŘČîӒ%Kät:ĺńx$µ/Ľ˘•YŃ3,*ä{}ńťöťĽŞŻY*`c„—61mĺ %efvűüŞĂ‡őů®wUëőöĘ8ęJKő»'W]óřČ´4Ý»eł$éŁU«{|<čyCăă5%'GǶog2,$++K’Ěóë+máĚă5—z†Ăá`ĐohŹPΙ3uď–ÍrÎśÉd Ű’].=řÎŰĘxd1“aAYYYZż~˝bbb$uźT}Çäč rą\Lú •—6ôďłf‡ţ#=]·ÄĹiŇ#‹ĺĽűnIŇŚ§¦ż––ö[»¶Ż®NĄnŹůgXŰh—KCăă™ s:ťZłfŤňóóŐŇŇ˘ŻŻHď”^•$Ýĺ ýďQO=ő”&NśČäl‡ĘËŇP^®ZŻWĹ?_ŻŞŹ?–ÔÖőęó÷ޣ¶9­YłF;věĐéÓ§%IG«[%`"áe„ęjÝ¡ńńĘ~éE%…X€?!=] +VhÂĽy*ŮôZ@Ŕu#n‰‹3ŻsK\\Čă ééš»ůő 1'¤§+kÝÓť™©’×^ 9ÖPďKrą”är©|ß~Ĺßž˘¤ĚLŐzKŻkÜ ééĘ~é%34ě<Ć5ÂŤÍ9s¦f<ýł ±9gΔsćL•Ľö óţCŤ-}ţ|ÍxúgAÇăR’•ńČbMwźţ¸acĐî펤¤¶ĎŤŠŇÔĽm ň’îtijnnČ­»Śŕň[źOe»ŢŐ_˝^Őz˝íU)š’›Ł¸ääcKHO׬—_’$}ëóéĎŰw¨ˇ˝*ÎßPÁ¤Á?¸ôŐÖęضíşPQˇË>źś3gš×ľwËfýÇO~2`1a‚’23ĺ«­Ő OˇY 9vćLe,~XRŰfNFxyćĂőWŻWćݧ´ymóöŃŞŐmchßh)}ţ|sNŤgUëő*.%EÉ.—ůĽţţ‰eŞ:|¸ß6‰¨ňňň$É 0„—(ŮĺŇ”śóëĎßíhOź?ß .KÝw»Í×ĘËŐP^®ćş:łjđ–?ˇ#›^ëŐńŤŹáÚĂÂE˙óIŇŘ™w„SssĚŕ˛ó{Ťć{·l¨öě®ôůóÍŠËăŹĘ~»Ë|­Ö땼^5ś>­˙ň?~-I‘–đ~Łţ[źO­Z4¶ęÇÍ÷†š‹żb™$éBy…>Z˝: ,ß·OU‡ëţ_ż©¸ädM[±\żk;N¨÷×z˝úÖçSf^®†ĆÇ+ŮĺR­×+_MŤ|55íW}ŮąŞÓÄ/”W|OřjjT^SŁo5ëĺ—44>^Ł33Í`}'//OßűŢ÷äńx @D Ľ´ˇÎ­Ŕ’äHJV|J˛b“’ZťËv˝°áć’ÔVŃç\ú+ß·OŁ3§*mŢ<ĄÍ›§?oßŃ«UtU˛zĐWSŁ ĺ™žBŤŹ7«Ëv˝ň˝ ĺĺ:îńhÚňĺ×=žoUWZÚVué\vţüo}>ݧ„;ŇÍăq))f»őqŹ'ěŘĘv˝kV@ú›0ホjŇmŰBÎű7ŤŤ:á)ÔŚ§¦$—K éé!ŻSęv‡|őáĂć÷Đčöđňz´*tKrŐáĂ:¶}».ś.7«5Ń÷˛˛˛$) Ŕt:ťL Ŕ–/mhj{{čµ”ízWǶm3żŽKI1«.+Ú۳é:ü±öv]ç¶vß6ů‚ŽŤÎĚ4˙\ÝŸÎě˙đ†Â˪ÇŻyżCăăĺ«­5çÓŕśywŔ†ŰţáĄ|~ëóu*úż–t§+dxů×ŇĐë|Ţč:¦ľš%ef*!=]?Ú˛YeżÝĄż?† {Ń·˛˛˛”¨={ö(++K‡IŘᥠՕ†ß|ĆWSŁ Ş:üqĐ®ĎqIIćź/\#ŔňľF¦§÷jxŮ|ťUz#ýĂ®ľžŞMĘĚT\JŠâ’“42=]#ÓŇCnä#ICbăžE8áÄŘögÔÚÚjîö}-ţ×ěŤű7÷Ę9s¦ąŮ’´Öy˝Ş:|XuŢŇ›Ţŕ =gâĉr:ťŞ®®f2¶ExiCż{rŐMĆeźŻË×űrł•®Bľ›őMcc—;݇—’˘)99a7ýńŐÖę–¸¸ 55ý[Čo„QÉiěhŢÉwştÜÝ7Ďé?~ú¸¦­X.çݦţAfCyąţĽműu·˘Łçťź.TT¨öSŻšëęÔpú´ĘËőŁ7¶(ÉŻ…]jk÷önÔ…ňŠ€–˙®|{Ť ş'ůjjTüóő/çĚ™ť9Ő¬Ć4ćîŢ-›UĽ~öôŁ˝{÷jĎž=ć×UUUL Ŕ–/˙käé]VÇ%űUý5[ló˙ń$w±áĚŤ—’4ő±ĽŽMs¶o»Žcě褠c—ýÖčŚKI [UÚy­LC]i©’23őm“ĎŇŐ‹ß46Ş|ßľöŕ_SBzş&-~Ř\'uÚňĺ„—ýńłŃܬ‚‚ť:uŠÉD„ALÁŔaě-éš-ÉŁý^o8}ÚR÷áęŤîâ>ś3gŢĐç•“U6¸rÝËşO;Ć–ÜĹŘ’î ýšv&efvľĆĄ¤hÚňĺĘĚË „ö¤ˇńńĘXü°¦-_ňľĘËudÓkŞúřăöń%ó×ÇŞŞŞôěłĎ\" áĺcTĂ9ďľ;lč54>^“~HRŰÚŽVŰ„ĹWSc†d“~Hq))!ďaJNÎM]çŰ.ÖýśřđĂ!Ź7”—ëBy…$iJNNȲ«±ťńŰ~JnnŘëOÍÍQĆ#‹»˝óüÍú¦±QÓV¬PĆ#‹5aŢ}74gč=ÔË/ż¬––óŘ’˘€í^0Ç=…főĺÜÍŻUŃĹĄ¤¬÷xlŰvkŢÇ[nImAŕýoţJióćih||ŰZŚwß­űß|ó†«˙|µµ’¤±aŢŚĹ+3/|°h¬U—’¬ßy[λďŰďĽ¶Ş˛Öë5w“7*;ź›áמ]WÚ{;|•«Ćő+öď—$ĄÍ›§ŚĹ‡<ß—q.z—Ń&ľsçNóŘ­ŃŇŠ5k˝ěŹ5/_MŤţĽ}‡ţaůŻ{·lVCyąąű¸;y©ŰcŮu ĘËUňÚ/4ăéźih|Ľ˛Ö=tNĹţýfv=ŽmŰ®Y/ż¤ˇńńú/˙ăת:|XĘËĺHJV˛ËĄ¸”důjkuˇ˘BλďZű˛Öë ۬&vňřIDATyąË±]č>ZżA÷nެ‘éiĘxd±2Y¬şöVůؤd3”˝P^ˇCë7ôčĽúŹeÖË/I’Ęv˝«c۶éĎŰw›óL[±B“^¬¦ş¶ wH\śôújkőçí;řaëeUUU*((P}}˝yěöaQZ9c°b†D©Ľţ;& `{„—Půľ}j8}ZÓV®PRffPuá…ň •şÝ–ßpŸŹI‹ŘńÚü7^V>¬cŰ·kjn®n‰‹“sćLł ń[źOĄnŹNľűnŰń»ďV\JrĐĆAĺűöÉWSŁIŹ,Ř}Üqcl—;íţMcŁ>Z˝Z˙°ü óśÎk”Vě߯?oߡoz¸M»ęđá Đ7áŽts\˙ńÓÇ5mĹróľ;W·Vě߯ăžÂ۱mŰ52-MqÉÉšš—§©yy!Ď+yí–ß ýçzŰÄĂiűĺ_öDx [ú¦±Q{ňe<˛Xi÷ÍÓČôŽő-żőůTuř°Îě˙°_ÖdôŐÔčź>®‰?¬ż›93äŘŽ{ iGXě&@~†­•ývWźWVvÇ7ŤŤ:îvë¸ŰÍCB·Ýl›8‘†đ, §ÚÄ$„—ĐĎh 4~3€~äőz‚KÚÄč0)`E„—ĐŹ\.—rssÍŻżľ"ýćŘíţËU&0ŕ^@?ËĘĘŇúőë•`;ôĹwúšËjha~á%X€ÓéÔ† ”™™iű˛QúšË:QŰĘ$ÂK°‡Ăˇ'žxBŹ>ú¨yŚ6rŔ@Fx 3gÎÚČÍ€őmän·[ĄĄĄ’:ÚČ—ş˘5%9Ş[źsîR«Z.3źD‚s—ŔŔCx e´‘věX­]»– Řmă`3ěF(/Ŕ†Ś6ňĚĚLóŃF~˘¶• DÂK°)ŁŤüŃG5ŹmäűO}ÇlŹđl.Tyy=Ő—ű#Ľ€ŞŤ»#Ľ€ŞŤ;#Ľ€ÓąŤü¶ŰncR¶Í@ä1ÚČëëëĺp8€-^@„ŞŻŻWAAęëë•››«¬¬,&`+„—öîÝ«={ö_9r„đ`;„—AŞŞŞäv»U]]ÍdlŹđ"DçjKěŽđl®ľľ^;v쨶Ľ5Zš’Ű!Ľđz˝r»ÝŐ–#c¤źLŹ[m™ŕ$é*“„3$ŠI,Žđ,,\µĺ?¦FiţÄÁ]ţÇVBŚô_§E«úoě8t–č [Ŕ:/Ŕ˘ÂU[.u Vzâ n}Ć”ä(MIĚdl‰đ,¦ąąY{÷îŐÁŽw§Ú€HBx rňäIy<Ő××›ÇnŤ––ş˘5%™Đ0°^€„«¶üAR”–ş¨¶ L„—ĐĎŞŞŞTPP@µ%ť^@?:yň¤^ýő€cT[ĐfSý§şş:čج .á%ô«9sćhĆŚǶ•\Őîż\UËĺV&0 ^@?ËËËÓ˛eËc;ôĹwú×?^QyýwL`Ŕ"Ľ pą\zĺ•W”™™ikhˇ 0°^€E8=ńÄ!«0·ą˘s—0 á%XŚQ…yÇwÇľl”ţőŹWôáiÚČG4SÖăp8´víZ-©Ł sţÄÁşďŽ® č_üżGĹÄÄhěرL"ĐC‡˛˛˛ —^€Ť8ťNmذA{öěŃŢ˝{%I__‘v˙ĺŞNÔ~§»˘•üľ±cÇjíÚµL ŔVhZ°`ÖŻ_PMiTa~RĹŽä€Č@x 6eTa>đŔ汯ŻHď”^ŐoŽ]QËĺ(& `k´Ť€Í-X°Ŕ\ łˇˇA’t˘¶U'jŻ09[łUxYꍦań<5–Ô×oמ8q˘ąćď˙{ "Ř*ĽĽp["O Âp8Z˛d‰\.W@&veů5/ďĽóNžŰq8ývmŁ sĆŚć±Űn»Ť‡°ť¨ÖÖÖV+°ąąY^ŻWőőő<-¶(—ËŐŻ¦áäɓޮ®–ËĺRb"Őë{±|x ``İ"ÂK–Dx Ŕ’/Xá%K"Ľ`IŃLě ąąY{÷îUUUUź^711QsćĚ‘Óéä!ô1ÂKŘ‚×ëŐÁűíúyyy<€>FŰ8lˇľľľß®ýŐW_ńú•—°ťż›ś¬ż›śŇ«×řŰWM:Q|ŠÉčG„—°ťa·ę6çH& ÂŃ6Ŕ’/Xá%K"Ľ`I„—,‰đ€%^°$ÂK–Dx Ŕ’/Xá%K"Ľ`I„—,‰đ€%^°$ÂK–Dx Ŕ’/Xá%K"Ľ`I„—,‰đ€%^°$ÂK–Dx Ŕ’/Xá%K"Ľ`I„—,‰đ€%^°$ÂK–Dx Ŕ’/Xá%KŠf Đ×ęëëU__Ýďé/---:uęÔu˝'11Q‰‰‰ĺőzUPP`«1WWW+??˙şß÷ŔhÁ‚ÎáŁâ4Á5V;ßŘ­ó›/}­–ĆoĚŻcbb´dÉ8ŔMjmmmeĐ—š››•źź`ţýŹ2ôwßO±ĺýüíĽO˙{ç˙Ń•oŻHj .׬Y#§ÓÉø ´ŤŁĎ9­YłFcÇŽ5Źýźß•é˙űKŤíî…ŕ ÷^˘_DB€Ip Đ»/Ńoě`\ô>ÂKô+;—}đýÎN&Á%@ß!Ľ„%Ř!Ŕ$¸č[„—° +—}Źđ–bĹ“ŕ ^Âr¬`\ôÂKX’L‚K€ţEx ËęĎ“ŕ ˙^ÂŇú#Ŕ$¸°ÂKX^_—ÖAx [č‹“ŕŔZ/a˝`\Xá%lĄ7L‚Kk"Ľ„íôd€Ip `]„—°Ąž0 .¬Ťđ¶u3&Á%€ő^ÂÖn$Ŕ$¸°ÂKŘŢő—öAx‰Đť“ŕŔ^˘Z[[[™DŠććfĺç竺şÚ<ö÷?ĘĐđŃń—6Cx‰*ŔŚľ%šŕŔf/‘BÁ%€ť°ć%"R¨50 .ě…ĘKD4ŁłľľžŕŔf/Xmă,‰đ€%^°$ÂK–Dx Ŕ’/XRt¨/^Tii)ł OdffjÄÇ‚ÂËĘĘJŤ?žŮЧľřâ Ą¦¦š_µŤWVV2Kú\çl2ş«“căš0)•YĐ+Î|^©¦ĆćŻu^N”Ş× 72zĹÓ9/ęÄŃĎBľĆnă,‰đ€%^°$ÂK–Dx Ŕ’/XR4S¨˘¬R˙y´LŁÇŚŇčŰoSZF*“ô^ŻĽ|:çEÝźńOzlîĘkžű«W=ş?ăźôâŠ_öȵëÎť×ŰŰv_ńĐ:ÝźńO:ţÉgÇ_¶@+Z§_˝ęŃK+~©•­ëňüľňöö]ň]j <¶m—îĎř'˝ţLĺľ©Śg^äů€ź0ܰ^/ż8yVR[řŃîC]ž{¦¬íÜ©Ó'ßôuKŐʇÖÉ×úů.5éLYĄ$i¤qçhß9óµtůb=3?ěů}ˇîÜy=6wĄŠ<(nXlŔkź÷Ď®ç™Oąk2?a¸a˝Ú6^wîĽ|—šďPScłŢ˙÷}şwQvŘóOm«lśĐ­ÚE…űä»Ô¤ “‚?k“gŁ$‚ÇŰŻ=wa¶ţů™\ó¸ďRSČóűÂńO>SÝąóš"Ě]ş|±ćÜoąđŇxć’hąŔMéŐđŇhł6ÄG?ÓńO>ÓÔyíŐŤ’Bľ~˝ŚężÎá^ܰ؟oT}v® w~_8Ń>ˇ®oŐ`ĐxćS¦Su €›Ó«áĺ™öÖć¤1Ł4ĺ®É:qô3~2Ś;aťˇ+ +Ę*ő§ßSݹ󊋏UŇQšłđŞ!Oý, úŻŮ׬G?3Ă´Š˛J5űš5~â8Ĺ ‹5«=Ť˙ŤŠjűóčŰG)į óýů.5é`ŃĺklRÝąóJ3Jsf+į÷ęü)Ó'ÍGÝąóúë—çu¦=€UűŚ1Ż;â!CĚ’ÇtćóJťůü¬&LgŽ+ÜĽúßź˙<'ŤĄ˙ďů×Uqj<ó ăĚű­řĽ2ě˝óňĹÉłaď§«ű­;w^Š©éRł|ŤM×|°ŹŢ /Ë:ŞgĚ™¦7ăúÓÁŽ`,ŕÜĎŤsSŽű.5éĺ•ů!7Ëy{ű.­ţ—˙¦s§›ÇžÎy1ŕś§s^Ô„Iă´m÷k’¤—Wć«îÜymňlÔ„Iă‚Î76ŔٰuŤ’ĆŚ 8ß?xűh÷!ýzSaĐF:ďîÓO×ĺµÇżůjˇŠ Co`3ő®ÉZżuŤţęUŹţtđXÇ}nŰĄ·%m}o“â†ĹęŔîCz{ű.=3? Ě«(«4Çk(9pTR[ý&÷†  ňÍW uâčgÚúŢ&˝łýÝ 1ľ_¸O˙ĎG˙˝Ű¦ńĚ%iĺCëĆ"IŹŻËŃÂÜűŽŘ}Hon*Ô”é“őZáĆ Ď,*ü@ďî şßŹvŇćg7,z{Ű®×€˝ôę†=ţkXĆ ‹ŐŚ9m!c¨ŔĎ„hóö]jŇşĽ—tü“Ď4aŇ8mňlÔe˙KoŘjn¦łůç˙fdĆú”?ś3MRŰú•›<µú•ećëĆąFąÉłQćĚ7Ż˝ÉłQ›<5ĺ®É!Ď—:BłÖÖV-]ľX[ßۤ­ďmŇÜ…Ůň]jŇŻ7„vooŰĄ˘ÂďĐęW–ië{›ôÖ­Ú°uŤ&L§ăź|¦×ýB¸ĄËëńu9’¤Ń·Ź2ÇdwťŰńĄ¶ŕň™Ľ—TwîĽ~8gšŢ:°U”ý/m}o“&L§3e•Z—÷RŘg´ůç˙¦#ŽjĂÖ5ćýLÔV=ů~áľë~ćďîÓřIăĚ{]ş|±$éÍM…AA´\‡kĎďÁ;î·îÜym~¶@±ńóűbç˙űsŢB]öŇkáe¨5,ŤđOż?T±hěčíżćäŰŰwéLYĄ*ź“4f”ţů™\ýpÎ4ů.5™a¨±>eÓĄfIŇŚ9Ó4ő®Éfč׹5=řüéšzW[ksܰحěuçÎë×› %IO˝˛LKW,VZFŞŇ2RőÔ«ËĚŔĎÎęÎť×ŰŰŰĆ·ÉłQ÷.ĘVZFŞ’ĆŚŇŚąÓőxűć@˙y´ĚĽ†uᄌqć f(ě7®Í?˙7ů.5éÁśůÚ¸m­YŮš–‘ŞMžŤŠŤwčLYĄY‰))0ÜkmŐ¶Ý›4cîtó~ڰą»! ˙3ź»0[·­5ďuéŠĹfëúN»ÎźąĆîäĆýúĎń sî7ŹÇ ‹ŐÂÜűÍďł’Gů °±^ /Ci©š2}rP5ź8fwuçΛçlض6dŰňÂśűÍs®f×rŁÂŻsHv&Ěć>ˇÎ/*ü@ľKMš2}r@»şáńgrµtů‶řMžŤÚúަ.7ŮqÄ9B^;­S˝ĆĆw¬˙řŃîC:SV©Ń·Ź Ř)Ýŕ_őj|®Ô‡›c_c[ŔÜÝ ‹ŚĎŽŤwčńgr‚^źŃ^ë˙Ľ|—šĚq„ZďÔ˙~C­cYŇľ>§żą łµaëš°ë|ŔzmÍËpkXÎ]”­G?Ó˘CZş˘­•جşôۡڨüáśi×Ü|Ĺ+ĄŽ 4TŘŞÝÚ˙úťĂÎPçkQ.lŻîë¬s•dŇQć8ŚÍq¤¶ŻîÜy3äí´N?ś3Í ŤŤĘQŘ[ď…—aŞď]”­··íRÝąó*9pT3ćNď¨2:Î=ŢŢu®< Ĺ?ôě¨â ~ß!Ćd”Ćîâ]ť_QViVůMéf5˘ďR“ŢÜT¨? n•—ÚBV)°şŃ­ÍŕjĐĘ óĂťăhl űŚŚŠČÎBµ§w癇«xl q˙f…nFčkŻw®ţLÓ¶¨±áĐ݇t`÷!Í]”Tů {ęµ¶ńpŐŚRG…ŕGEmAgÇąĆ:”]rF°;¬Łĺ:Üć/uçΛáˇU^Ç8Ç]óü¦Ć¶1…«ěĚŘpčŔîCrÄ9ô`Î|­~e™6y6šęźăźՆギ㿻q CWŐ†'Ú×Ô4ÂR˙{5ÇţëWv·ŠŃřĽpç´°Xa„±Ó»^ď2T€š–‘Ş× Ű7Xj˙݇Bîrűé•ĘK˙5,C­—8wQ¶Ţ޾ˬFějÍĂ®”üľ­…ŰXĎQ żůK¨Öt)üÚ’áΗ¶ŞŻ˘¬R_ś<«ńÇ)-#U%Ź™kQnŰ˝)(4ZÇ;ßű‰0ííRđzžÝ éęÎť7ďÇX§Óżâ´«ví)Ó'_÷3Çhą÷6_U·©!?łsĄ«ďR“ľ8yVޏŽ5?§Ţ5YS 7ęř'źéĄż”ďR“~őŞG·­ĺ'ŔĆzĄňŇßÂ_ţČĽůjŰÎÝţĐ„ú¬ÎJŐ™˛JĹĆ;ڞáÇʆnO×rî|©ŁťĽł_o*ÔëĎ!ťŃâ}o5 ‹ ? ŐA÷˘ĹÝΙo¶ďŽ>eúdó}×j×ŐžŢ˙ÍBµÇ´űęÎťWlĽC÷.Ęs¨ŕúťö]ÚýççĚçgőt΋z&怒ó§Ţ5YO˝˛LRGĺ.ě«WÂK3|Ë_IilÖó§öęÉÎU†F y čPPVQV©Í?˙·¶ĎYľŘ ¶ü«˙µ[ű·'wą¶dó§Ţ5Y±ńů.5U~´űŽň™băŰÚĂ%).ľ=p v~´űą›zĐZ›aŞõ¸!±RvľŽ±ůŃSŻ. zFáÖíÜž~-Çý6MňßIŢx^żn›.óÎëĎ­ëţ÷ëß.ŞÚÓÜń|źn›ë•¶ń/Âěí/iĚ(M™>٬|ě\á7wQ¶Ň™ĎĎjĺCë´0ç~Mȧ’ÇĚ@sîÂl-Ě˝?ä睏?“#)¸ÝZęzmÉPçKŇÂśűőöö]zyeľ–._¬ŃcF©äŕQŘ}H±ńmňl4?ë‡s¦éíí»Trŕ¨^¶@3ćLWSc“JSÉŁš0iśÎ|~6(h3Ő÷˙}źNýL?ĽgšŇ2Rö¸?ţLŽÖ徨’GőŇĘ|Í3M±ń±zżđ3P}ü™Ü€ÔxFáÖ w˙×zć«_Y¦ÍĎčĚÉłš0iśš.5PtHżzŐcľ6úöQZý/˙- 3>ĂD‹ ?Ř%\jkM÷đ­-î|©­bÔ×ؤ÷ ÷ŚiĘôÉZşbq@wZFŞVż˛Loľę1wĂ6ƹɳQuçÎkółA-ÎK—/6CĎ’GőĂ{ÚB˝pëy¦e¤j“gŁ6?[`ľÇ\Ź?“0.˙gŞ-Ľ«Ř»zć&ŤÓ˝‹˛ućóJ˝_¸ĎGlĽCŹŻË 4oňlÔşÜu¦¬Ň¬,ť»0[Ź?“Ł×ź-yż˙üL®ââcőŃîC**üŔlżwż°§¨ÖÖÖV˙ĹĹĹš={¶¤¶ čµÂŤý>H˙Ťm­ŤéŻ˘¬RMŤÍJÓ˝đíFř.5u´(_cL×sn¨{îîş“ţ÷.©Wďż'Çď??×;f«Ü/nĚÓ9/šŽřĂ4kÖ,óµh;ÜŔő†R}Qu7,¶Űˇâőś{Ł÷Ü—÷ŢÓăż‘ů±Úý ç b Xá%K"Ľ`I„—,‰đ€%uąŰx“ŻŮܦzZ“Ż9ěk]†—gĘ*őtÎ‹Ě €>GŰ8KŠjmmmí|pŐŞUňz˝Ě€>árą´eË–€c!ĂKčo´Ť°$ÂK–Dx Ŕ’ţč QZIEND®B`‚ceilometer-6.0.0/doc/source/5-multi-publish.png0000664000567000056710000012167312701406223022530 0ustar jenkinsjenkins00000000000000‰PNG  IHDRH – \ŰbKGD˙˙˙ ˝§“ pHYs  šśtIMEß :2MCŮ IDATxÚěÝX”uľ?ţ'í®3ź…ˇ´śŰÍ:¶ ŮQÜ2ÇOÉţčě2ÖvZµs5ČîÚ¶u˛ł§ÎI®SşmV3çłuvŃ®†p7uŹ9lĘt6űtck-94¤Ň×Ń H»öă÷Źńľą‡ągůń|\WWŔĚ}ßďű}ß÷“÷űőN¸téŇ%MbW± h˛c@BDDDDDDD“"""""""šôѤǀ„&=$DDDDDDD4é1 !""""""˘IŹ Mz HhŇű:»€®¤ĆĆFř|>ř|>čt:dggC§Ó±chTq Ń$R\\Ś„„$$$@Ĺďu:ťň{`łŮÝżŰíÚĆétŞľĎç󡼼©©©0 0Ť0™L0 ČÉÉÁÚµkĂn ‹%č8j˙Ą¦¦ş""" """˘IÂív…V«5¦íG\Ţ#Š"rrr`µZáóů ×ëQYY‰ĘĘJÉű)..FUUŐĎ×çóÉű‰&Ü!"˘ÉŤSl†ÉëőÂëő†|˝µµ5äkyyy!_ËĚĚÄ´iÓŘ‘DDD4â¤ŕ‘GAmm-ęëëáóů ŐjŁÚ>š÷×ÖÖBŁŃŔď÷«ľîv»Q\\,#‡#d:ŤĎçŮl†Ýn‡ĹbÁĄK—`±XT÷WTTv„(Š0™LhnnFyy9ŚFcÔçJDD“’(x<ôőőˇµµ˝˝˝đx_Đô 0ŤEš cłŮ H´ZmÄi6Ęé5áH#8;–’ŃhDyy9|>_TAÎ@R»ÄŐ÷¤]39ů7`¶nžj@qĄÍÉÎĹśě\,şĺ6Ŕä”űN¶üŢĎO…śkCC233qÇw@Ż×łĐ+Ń$7pôÄ`0 ;;[®ńnĄI¤i6őőőČÎÎŽj„ÇPCµ"ŤŤŤHHH¸ťFىjůa""šÜ&l@âńxpŕŔ°ŁEŇ®™Ťë„[“R4©ăęܤŔä梕¸đeNµźŔÉ–ŃrôüžŽŽŘl6$%%áŽ;îŔí·ßΠ„hRNÍUÝa2™PUU%/©I¸i6Ň4–Gy$Ş6 µXj¬ÁŠ4bĆb± 9”!"˘ÉcÂ$---ŘłgOČj0@ –HNţ ¸N¸%ćú!cUâÔ$ää߀śü°t…-GŹ ů]'zş|Spđß˙ýß«WŻćô""˘ID*Ś ©©á˙(äv»áp8ť†˘6Í&šé5†´šŚň˝jS€ŠŠŠä•q†cÂ$‘‚‘Śkçâ梕crúL<%NM¢[nâ[nĂgímhi~OUŇ××—Ë—Ë…‚‚%DDD“„´ú‹^Ż[cÄívŁ˝˝6›mĐ€Äd2Ál6Mł±ŰíňňÁ‘‚€ĆĆFÔ××:ZE"…Ť†EV‰hDŤű€ÄëőÂfł©#ů‹–ŕ梒q7…&¤i8KWqôţ„Łţ“\Ü•A Ńä`łŮŕóů Ńh"®ÜgłŮäÚ"n·{Đé(FŁv»‡Z­>ź/âę5“É„ÚÚZ˘¨ş"ŤÓéDss3ĘĘĘä0§ŞŞJ>&ŃHşjĽ6Ľ··{öěÁcŹ=Žä/Z‚ř§ÇqűÝß›”áRâÔ$Ü\´˙đOŹcÉm+0%qŞüšËĺÂÖ­[±gĎ~'M@RqÖhF…h4ÁSr‘öçp8äâ§MŻ#HÖ¬YňŇ˝J‡fł999¨­­Ĺúőëĺ)6ŃŽ8!""Ş„K—.]oŤniiÝn‡×ë úúd1­ _ö…Ś(€´´4”••!??źťDDD4¸Ýnäää<8čę2Ň’żZ­çĎźŃQ\\ ř+ŁV«…ßď‡V«EvvvČie™Çöů|Đétđűý555ňë>źOIü~żĽÍ+ŻĽŔX,TUU± Ĺ͸Aâőz±cÇTWW…#×ÎĹş7sÄH”#JnĽyYPßVWWcÇŽ!ÁŤ?RáÔh—Ţ5›Í!…´m$Ň(źĎŐč‰V«…Óé„^݇(Š(..FNNŠ‹‹Q^^»ÝŽHŰŤ´q455aëÖ­Ať’8KWaüᦠł*ÍhIśš„e+×âîl@Ú5łĺŻ‹˘­[·âŔě$""˘q,Úé5A ×ë¶ŤDąßXë‚§Ó‰ĘĘJdggĂívĂétÂfłÉŁAĘĘĘPYY ŤFµkע¸¸#EhDŤ‹)6uuu!ěů‹–`é #§&ń*ĆÁ{ŤűB¦Ý‚€˛˛2L›6ŤDDDD#FĹ z$‚ ČŁFÜn7, ěv;jjjä‘.DDDń6¦Ż×‹íŰ·ŁŁŁCţZÚ5ł±tĺš żdď•Đí?ŹőŻáô§źô÷wZ6lŘ€¬¬,v]1n·Z­–ÓmhÄŚŮ€DEŘl6ôőőÉ_Óĺ-ÄíwŹŁFFŘŃ?żĂűA_+--ĹňĺËŮ9DDDDDD4!ŤÉ€DmJÍŇF,şĺ6^±Qr¶ó3Ľő»—ŃÓ<Ü•Snh"SIoo/věŘÖÖVůkÉÓµXőťűY„ő ¸đeŢŞ{™SnhÂ3Ioo/¶mŰTo„SjƆ÷÷áČ;űĺĎ“’’PQQÁ„&Ś1¨…#śR3¶|Öކ˝uŻČ«Ü$%%aÆ ČĎĎgçѸwĹŹÇm۶c-^ý]\§ż™WgŚ9Űůę˙s{ĐRŔeee(,,dçѸvE†#ăĎŮÎĎđvýoáýü”ü5†$DDDDDD4Ţ]±€d`82%q*Öüp#‹±ŽľěCýn IVŻ^Ť»îş‹ťCDDD4Á˘4žh¬+((@ZZZĚŰ]‘€„áČř§’p$ ŃÄâőzńŘcʱ#h\ÉĚĚÄO<óvWŤvCŽL ‰S“°ć‡ˇË[(Ín·Ł©©‰ťCDD“†ÓéDUUśNç„9'·ŰŤŞŞ*TUUńΞ=ËN ˘qg¨ŁŢľ>šŤěííĹŽ;ŽL‰S“pűÝß Ib·Ű‘––ĆŐmhR’ĘĘJ † qNn·‹PYYÉ‹L˛o\¸€Ţsě"ł:gg kűQ H¤Ą|˝^/†#…4’D’ěرČĘĘbŃ„¦ÓéPTTťNÇΠ oĘĹ‹Č8}šADcÖp’Q™b#…#Ęa.%ĄëŽLRH’<] čëëömŰŕńxŘ9DD4ˇ™L&8ťNL&vŃ87*I]]]P8RĽú»“ťËŢź@§&aŐwîǔĩ!‰ÍfCoo/;‡Ć­ĆĆƸěGĹ!mçóůŕv»cÚ&Ö÷§}CmÇPÎÉçóĹÔo±śS¬ďęý1”ë)ť˙P¶#"˘ŘŚx@ŇĐĐ—Ë%^Ľú»¸N3{~š9kÖüpŁ’ttt`ÇŽě""W/^Ś„„ $$$ ''UUU!é‹ rÍŽpű‘ţż~ýzř|>$$$ !!!čýұ ŞŞ ©©©HMMENNRSS#Mu8(..–Ű*ýßn·‡ÝĆétĘŰ,^Ľ©©©(//Ź)ö#ő•ĎçĂÚµkÚ±víZyźµµµň9Iď WŕÖívcýúőA}0X«ŞŞä÷*ű>\?Äú~e›”÷G,íR^Oeß vţŇvŇ=DDDń7˘5HZZZ°gĎůóo^Ćpd„$%Ąëńć˙ #­­­hhhŔęŐ«Ů9DD4ćŮl6¬_żPTTAŕv»át:a±Xŕt:qđŕÁöŁ×ëĺýŘl¶AG*¬_ż6› z˝z˝>źÍÍͰX,8ţ<¬VkÄc ˘ĆĆFy Đ+ŻĽUű¬V+ÇúÎď÷cńâĹp»Ý(**aáp8ŕóů ×ëQ[[+ź—(Šp»ÝX»v->řŕ :.˘(˘¸¸>źŮŮŮňţDQ„Őj•ŻV« é7ŤF#_;§Ó Qa2™pňäÉ  +Ö÷«µI«ŐBE477ĂjµBĹűC:ŽtOét:˘‹ĹATű2ŇůK÷ĐoĽÁÚ7DDq–péŇĄK#±ăŢŢ^<öŘcňŠ5×Î…ń‡›Řă“ÄŃ?żĂűűÁzüńÇY´•ĆĽśś¸Ýnź5550›ÍŞÁ( 3 ň”ŤŰY­V”——Îź?/Ň(xĺ•W‚jˇ˘ÁŇfźĎ‡śśśŰޞ ŮŮŮp8ňżňµmôů|0 hnnę?ĺu(++Őj•Ď×çóÁd2ˇľľA#Ün7rrr Ńhŕv»‚ĺ9Içëű ¸¸N§eeerࡶÍÉ“'ĺŕÂáp`íÚµĐh4p8A÷”Éd’GŞŤ¤YĽx1DQŚúüGRKK Ş««Wwwc~ëqţCADc–xÓ7ĺŹ_xá…·±)6—ó]Uz?ŻÖ$˛č–Ű Ë[t?° ŤuRť‡Ů×ét°Z­¨¬¬ôŻöV«>źEEEAáôP\VVqű˛˛˛í”ź+G HŁIĘĘĘB Ĺ ‚ ż.…+ŇĂüpÚÉŔQz˝°fÍš 6jµZ9´Qž“ÍfŰíFvvvP8 m#ŤúF{(Ż[NNNĐűĄsެ¬DeeĄ<5%Ö÷ű|>\şt ŮŮŮŞÓ©”祬"M‹2›Í!Óhl6˛łłCö%Ť‰ĺü‰(>F$ ihh@kk«üyIéz$NMboO2·ßý=yeŻ×q4ŃX =ĚKu(”»&“)â´‰4 $ÜĘ6­xŁVŹ€ęĂt}}}Ä}J„˛Č©4…&Üq†ł"Źt<%é_­ß¤°IYSCIa4C iĘ*Ęý˘µkעľľ>hź‹‹EŢ_¬ď×jµp:ťp»ÝŞ™(Š!ç§,üË˝ mc0žżtí†:ŠÔĹ˝ÉŔş#Kn[Ák&)ie›]/VË?đ8€ĺË—łshBđx<ňH9"ŠżĚĚLL›6mTŹiłŮ`0ŕv»ĺ‡WA`0PVV6h8˘|Ŕ 7Ňd°}„ŰN§Óˇ˝˝=ä8@`¤B¤"®ŇűĄ)7‘’hÎ1µúHűU;W)”ŞŻŻGssłęľ¤~B ťN‡šš”——Ăáp…@FŁkÖ¬ :V¬ďWŞŻŻ—ë§¸Ýn˘¨Z4Uy}bą¤í¦'©ő‹µŤá€¤··7hŐ’Śkçâ梕ěĺIlć¬9XşÂ(×#©«« HKKcçиvŕŔÔŐŐ±#FXMMͨ†$‚ Č…@ÚŰŰ!Š˘ü5Á€7Ţx#b0ś!ʇăHµB¤źŇqOž<9*íî~5MŘóĘÎÎFvvvPđ`6›!l6ü~?śN'śN'Ěf3Ěf3jjj†ü~i´ÉŔĺv‹ŠŠ`0ŕp8Â:Cé“K—. zţDD4†’şş:Öˇ‹ną źąŹĂÝz @ŕŻsě×>řŕvŃ(đx<ČĎĎŐcJőF¬V+Ün7śN'ęëëĺ‡çE:>ŘKŁ4Ô |ŔN;%6›-ęMA@cccŘŃceT‚ŃhT­÷‰Á`GƢ‡Ă›Í†öövX­Včőú i-ŃľßçóÉ«ĘHµ[AęóS‰Ł „"Ý Ň”.""=q HZZZŕrąäĎYw„”nżű{řÍ<…‹ľDkk«ę\]˘ń*sV’żĆŽ Š“ŽÎ>ô]řۨWZN÷äÉ“ňC´N§“GH+ÉHu?"=¤××ׇ¬Z"Q®V2:ťNcDQT H|>ęëëˇÓé ×ëˇŐjˇÓéĐŘاөZ3$^í*)Ŕ‰Řl6äääČŁHÜn7𛛎Ńhä>‚ Ŕb±Ŕh4Ę×Äd2Ĺü~©°mvvvŘţŘ^Aäëăp8˘îkéü#`u8HMM ECDDc$ Q5×ĺ-dÝ ’85 7•MµÉËËőąĺD#ˇ´$ ůşvQś<űJ ŽÚsEéaY­@¨ô ŞŃh"îGZ†Őn·Ăl6=Ŕú|>ÔÖÖĆ­ÍFŁv»UUUŞŕV«UUUČÎΖŕĄměv{P!RÉ•.Şn4Q[[+÷ßŔ?¨¸ÝnyIÝB§ÓÁáp ĽĽ‚ ¨Žđ“®t®±ľ_YëDŤ´ZP¸ëS[[RtUEŐ°M:iÄŇŔÍçóaýúőđů|xăŤ7ĹQ\V±9pŕ:::¦Ö,[ą–=K!Ýr2®ť °ŞÍŘ)DD4fčt:ą^ÇŔZn·[.‚:Ř*/FŁEEEđů|XĽx1jkkŃŘػݎŋÇuiV‹ĹŤFQQ\\ÔćÚÚZąÍĘ©Ęö)·‘ĽŻôĘ(!č:(GY8ťN¬]ř=SŞý!ť“ÔĺĺĺAÓ„¤0Hyí†ň~ P8UŮ?RŕĄ\FYą/«Ő*/É[\\ »ÝŽĆĆFTUUÉSvÔÎÍš5ňů+Źçv»ĺíôz˝j(FDDW0 éííECCâ!ř[HѤ˛gIŐ˛•ý?Č÷ěŮŻ×ËN!"˘1ĂfłÉ´999HHH@BBrrr Š˘\b0‡C!Ěf3 L&Îź?Třs¸¤Ńjm–ÚůČ#Ź„„:‡z˝˘(Ęۤ¦¦Âfłˇ˛˛ňŠ_©}R ťSqq1DQ„^Ż t:ť\Ćjµ"55UŢFŞ!RYY)*±ľ_”••ɡEjj*/^ŚÔÔTÍf”••ÉŻ+iyŕěěl˘“ÉÁ‹Ĺ‚ěěl<ňČ#aďCéţY»vmČ}¨×ëŻřT("˘‰hŘSl”…Y“§kąj E4sÖÜxó2|řŢ!ůţٰa;†Ć©ž…´˛‰ňA×h4† ŇôŔiұTřSúšÉd’G śŞ#=<‡›2îuiYb›Í§Ó4Äb±¨îO«ŐBEŘl69(Ú먝N1T‰t^Ҷ_S¶ĎápČçî:Q'Ož„Őj Ą#ŚFcČ5ŠőýŇĐĘö”••Éç'Őx.‚ ČĹ~ĄăHÇFö ść$Ý?Ň5•FůD:""ľ„K‘Ö…DKK Ş««ĺĎďţÁÖˇA]ř˛O.Ř ›7oő †ëŮgźĹńăÇ÷pYkĹóűKQd<ţŚp»Ýhoo—‹˘äp8°víZqŔ'Š"ü~żźh§'aÍJ}H`$ó ~_3€@áÚ˘[çZDU­mjű–öďďęż+đW]­űEŮ&XłRöĽ„ěĎľË·Ç aa&ŠnÍ‹ąď‰&}@ŇŇŇôK4RfgĎ“?VsD4<ö]ĺł5Ó“bZĆf5@؇đÚť`©ŢRl …y!«ÔHůĹëŞwmŽięNUőXwňń.}öBH0WÖÁTZ€WjLÚ+›ípě ĹöJMYŘţ ×6seŚ%^©. $Ě•ur€!µÓpď6Ő~qěQn© )^kŮÖ]Vj,Ą0–„ÖmRîĎľË[]˙2ęŮ™ip˙Ď/ůÍADDDtYTÉńăÇU`‰â-qjŇ®™ ďç§ŕőzáőz‘––ĆŽ!·ç¬ü`m±h¤Ń ëËmň·fz„…Y0äÉŁśM­(^WŤ»6GÜO´Ö>°C-4Ó“`(Č.+ ΦVy”Ë`Ç“Ú,µW &ü]}°ŐąPT‡ÚťoC<ćAvftYiA#P֗ۡËL u”}‘ť™aaf`šŇ^ÍuŔ±W„xĚ»6ËŁI„…™AÇW¶I¤Řęš°^±ÂMQA yp{Ľpěáöx±öĂëÎA#O†r/Mú€D9z$yş‰S“Řk4˘fgĎ…÷óSňýÇi6“GCC\. q×]w±CâxŁ /j_z@`ô„Ízl5&ůýÖťT÷•ť™˛˝°0 ¦Ňůó˛uAáJ ˇ_ńáîđĆÜ6kU)ŔŮÔ gSKÔç.MŇLO‚ăĺ !—V3 Ž—7@3= >/,ŰÂÓuĽĽĆş¬™0•ZO…F—(Šččč€×ëŶmŰŕńxŘ)DDŁlĐ€D9‚dv6y¬C2yI«e€ËĺbH'Ęz]Ö𧬉Ç<ň¨óŹ–‡-ôi(ĚGvfŕxjő<˘ĺŘ'Ęu=Ě?Zö}ĆA1N'éŹ^őëÚéýç  K†9]­QµM9őű/úľjÇW a ]ÖLWęŹÔďW?oý‚L"Dc\oooĐĎC†$DDŁoĐ)6Ęú#ABŁuHHârM&;ct™ýß?‹|…ł©?¸LH@PˇŃR5ÓĐŢáEă»Ç‡| ®ćŔ$C’áQuşb›¶ćöśEă»ÇQtë|Ő)EŁPčÓ×Ő7fűVٶ‘ě ý‚̨G¶Ä2†Ć&}ľ­îô]řC"˘Q1 éčč<];˝EŁffúůcĺś\š « ł±ÍÖĘ„hE,ŇŞ|0UţEźh¤M™:UţĘ&/“Q‡}˙ô*n:eíŽp+Ľ¨©Ýů¶ü±TÇB9e°)nĎŮa·]Y7d°é6R{4ÓGgIzeŃŰÁÎ5ÖiFĘ}ÖĎĂ™ÂDDcSVú4Tň”ř5,ÜJD4"$Ęl”Ń'is+&ť;wŽ2‰1$‰ăJA ŞŞ÷DU×Ăúâů}Eyr0˘¬ăiuźż‹Wţ ć<ăýۇÜveŃÁŽ'Ť4‘Vuń~UO‘j„¸=g‘ş  s‚ąňwQí[—5S^HZÍ&śµ?Ú„9A÷wŹńf'š@’Ť®«˘}Łň/úDŁIY,&'†$çŐL­Ć$^Ľ®µaF’řü˝¨ŞŢrK «fz¬UĄAîeë äw[]“ę~Ę-uňȆÁVź‰DX%×÷î¬ßlďżgÂ,Őoۦ\y&¸/vÉÇŢHK»=^¬/WżçmuMňĘB†®VC4Ń0$!"=k-ń›Í%~ite\;§?ý@`š çÜNn¬I2|Ć•›ďBUőřü˝0WÖÁşóLĄ…ň{Üžłpěkš˛a­* YŮĹZU Ç>ţ®>¬/·Ł~_3LĄĐNź_W/¬;Číú™AÇ kU) ÷nż«‹W<óËĺ ÁÝá íňČŹnŐzʶŻ«†©´@>ß}Q¶® lŰ]­X_n.k&Ö¬ÔCXóËa«kBóG°Őą ë€ůÁĺňŇͶş&yäJvfZPED/$aM"˘+ŚçÂďż{G\ďČź§h40üýťť•=á.`˱ŁčéîBŢő7 EŁťPç–8µż†€×ëĺ/Ä$,«ˇËJą˛ţ®>¸=^X¶5¨ľ7;3 ¶š2Őz­fś»+`2ŰĐüQ{EŐé/EypĽ´aŘífÁą»Ćűw ýr b}1tLĺć»`©X=Ş}*µM IÂ-É[¶®6kč˝jţŃryj´ť~A¦J9wWŔřŔ4şZ!óŔd¶©^+Ç˸‚ C""‰€D9l/ăÚąăâdîŰgź|§<í!Żý»e –܆ŞgwL¨ äß«¶ŕýwáĹßýK n›P7çĚYsŕn=&ߏ‚ đ;–’ÄŁK a\)Ŕ±/j¸=^4Ô!?ë˛Ň`,ő!,Ě‚řÇ'`«k ÚĎ`űĐeĄˇró]ňÇ@}ް0 âţÇa«sÁéjŰㅻà aa„…™0˙hąęĘ5‘Ž'1ä—ßľß `(ČS݇°0 îwÔ6©/„…YmĂŚ1–x㥠°Ő5©.i,…QΦŘę\pwxŃčjEvfZ źW Ń;*áH¤ţ$"†$DDe@2ŢĽąë7¨¬ü•2#óZݏ ) €Ŕ( çţßăë|÷ŰËđűCN¸ŃD I’ÄB«™Siá°§ľHK,űŃeÍ ;Ęc°ŃZÍ4\˛ÜđPŹ'$…ůNËě‡Ň6‰±D´NK4mڵ?‰hüaHBD4r&L@ňě“?¬ľ÷>|ĚÎĘƓۯ5ě~•Wžhŕę6DD4Yqu"˘řűúd:Ů%·.ĂM·.ĂěĚlśň´‡Ô"9ĺiGĂî×päÝţŻł3ł±¤`Vß{_Čţ^¨ůWddfáîu˙€#®wđÚËŰŃÝĺÜ˝î>y›SžvĽöň´|tPĽň.¬ľçű!Ó|”űkŘý*ŢÜrR¦kۨ´a0 »_ĹÁ}{ävĺ/X„ďßża\ÔaIŃÎ?ĎEidq$ MVIBD_a’ŽŽů㙳fŹé“Č_pŁüqeĹTmۡţŔ­ŃbgÝ[ŞŻ˝öňvü»e‹Ę+ďŕÍ]żÁ«/mÇoß:ôĘŻj~‰›n]†÷ß=Ś7wý&čµ#®wđ׿ĹÝëîĂß˝Sá"˝öć®Wńâo’¨í/#óZśîř÷íÁ›»^EőŻ_‹Ş~Ę)O;6˙ă÷ŃrěhH»ŢÜý*~ňĎ˙Š»×ýĂľ®)ÚTůăáü5äwżűŢ~űm~·O` I&Q±cÇvŃe Iâ'ěĺ_ě§ŽíeS4Z|˙ţŤĹZżuc*+6 a÷«Ş+Ú tÄőŽŽü¤ňi|đi·üßO*ź(ô:0K żąë7x¨üçřÓ‡üéCŹÜ–×^ŢŽż{'ň®żż}ë0>ř´/ţîHž® ěOeŞŹ´żďßż|ÚŤ?4Ăoß:ŚŚĚkí¬ztĐóéöűđŕwďDË±Ł¸éÖeřýáżŕO»ń§=x¨üçčöűPY±!$<™¨¤‡e?¤á±†$śnC4ń555±ŕt"˘ř¸j˘śČO-ĎŕˇňźËÁ›»~ŢücÜąôÜąô<[ő(ޏŢQÝVšĘňýű7âľ6˝vß›ú§Ęt|ŞşýCĺ?ÇŹËCŠF‹Ť?µ<ŚĚkÉ)ÓQóâ!á"Ŕ’‚Űpߥĺ؇Şű[}ď}ř©ĺůóü…‹°ów€¨BźW_ŢŽSžvä-¸;ëŢ’§Ó¤h´řqůcr?=ű䣓â&ďëëăwú82C3«‹‡6jŤ! ŃÄÇ)—4Ńéó5(fĆĽC"˘á›P5H~\ţî»#ŢÜý*ޏŢs˙ďÁ†§Żľô<^}éy,)¸-dšĘ“ŐżÂÝëî šŞŁ4;ëÚËűQHŠWÜşÍĺ©1wŻ»/dJLţ‚@XrŞŁ=Ěyü\Ą Ů—§ßÂÁý{B‚%鼥 f űîßjţU.n;™–<~ˇň&~×OpśnC4yl.ËCľ.…At§Ű Ď„+Ňš˘Ńâľ6ÉÂ×;8ňn ćÇéŽOqÄőüîť!őD–Ü 0ú¤őăżŕTG;Ny>Ĺ‘wß‘Gž„ 4¤Ń!Q·qş&âëá ¨.)¸ ďż{(lP#‘¦ÎĽ˙îaśîüW–Ź>”Ďťh˘`HBDD“C"˘ˇ›đ«Ř,)¸ K nĂŹËĂ«/=Źg«•ë‰(‹”Ü·/X˙5¤.GňtŤ\(u4Ütë˛Aß#­†3µš)D“C""𬒠MŘ€$//Oţřłö6ÜŚ•cö$6?ř=twůń“~:âhŽűŘ„–c˘a÷«8â:$$oîú *+6Î{ÁŤ(^yfg^{y‰ßŰđ«š_â…š3ç;;3ş%z«¶íô˝á¦ŤżüRţxÚ´iünĄ1$!"˘ÉŠ! Qě&Ä’.żďňę/Żâ§Lw‘ę‰twő/»ű쓚?©|Zµ¶G·ß?jçňţ»‡Âľ&Ťn‘Î!śäéôtůĺ€gĽ:{ćłţňüANCÄdlq{ÎB;}´†žDD#m˘‡$Éč9}ššâfB$Rmކ×_ [Ť’4ě~MŢpíöµb«ŕüăďGő|îŰâ•Áméöűđţ˙ŽŘNą?n]çţßăµ—·«$G\ďŕÁď|đ§=“ŞH+MN I®·ç,ŞŞ÷Ŕéj…Űă zÍPSi!Ö¬Đ30!"!)$™‘›‹ą+W"kŮ2$g¤˝Ö)Š8±wNěÝ;"ÇÎ]µ sW®ě7—úőÉ 1%P$űBw7żŃ,yřaĚČť‡sm'päąçĆü=Âë§nB,ó{ßý‘<]nż~÷N4ě~U=”ˇŔwW-Ĺ)O;’§kp÷ĺĄ{•Qî߲ďĘŠ .«oĎ>ůhP-”nż•?Ů€nż7ÝşlТ°Ň(űö„Ô!éöűäĺ} +îd8B“*$áŔŁÇçďĹÚv çÖ_ŔVç GŔŮÔ “نś‚_Ŕ±Wd§ŤpH2^—NLIá©­¸s狸~Ý˝!áĚ>şwî|3rsăކiłfa– `– Dőő O-[†o˙ú×Hť7Źß`—Íź‹Y‚€ósÇü=ÂëŢ„A’˘Ńbçďţ€}çŰčöűđĎ›,}vćµAaCňt vţîAÁŔ÷ďß×^ŢŽg«EëGÁěĚkŃÝĺÇÁý{pĘÓĂŠ;áÜ˙{ôtwŤĘůt]z–Üş )Óµ8ňî;8ĺiGFćµxrŰŻÝ~IÁmřIĺÓx¶ęQTVlŔk/ďŔ’[o“Ď©ŰďCŢ‚ŁÚŃD IŽ$iâ1Ö>°CE4Ó“`*-€qe˙/î/lu.4şZĺ0ĹR±•›ďbŤ`H2ŢF’ĚČÍEŃÖ­r(Ňsć <‡ŁăP˙´ôÔůą¸îž{śžŽąą¸Ł¦řLJ8ýf„¤  OmeGđúM®€dćĚ™ňÇŢÎScţDň.Â˙ŻľĽ]^Ň·ŰďCËĺ‘$ÉÓ5(^q~ZůtȨ‰źZžAŠFW_Ú4ââ¦[—ˇęŮČ_p#ľucZŽĹ)O{Řexăeçďţ€ŻÚ‚űúG´¬ľ÷>Ő¶‡sß›ż`~UóKĽ˙(Ö}])ÝţsňÇ,ŇJ IĆźżëËír8R¶®ÖŞRŐ)4¦ŇB8›Z`|`ü]}°lk€~A&Ś%;’!  ŕŃ-r8rbď^4=ýLČ{Î">޵…ŹnÁĽ’$¦¤ pËĎâ:ťá¨Í†ŁyJ4y’´´ţˇč/|9.N&EŁĹŹËĂŹË(jÚÝĺGţ‚ ¤íNyÚqŞăÓÚ|:7KíkrČQ÷VŘ×–ÜqŰü…‹°łî­@}”.Ä)5ggAŕő#®w˘ę‡±¤Ë×dffň»•’ŚÖť óČáÍąO …ůpáŢmđwőaýf;„…™ĐeÍdgMň$PÓ!7b8˘Ôôô3HÎČŔ,˝łą«Vˇí­·xщ(j§Řdff˘ŁŁp¶ó3Ěś5g\ťÜ`µ:ÔĚÎĘń"±´%n?`Ćáj6Ę‘KĘMDńŔddÔľô6 ;3 ֪Ҩ¶fÁ˛y5Ę-uđů{aÝyÖŞďČŻ»=gŃŢL‹ ňT÷áó÷˘ůŁŽËÇž6`ńů{QżżYá˘ËJCŃ­óĂľ?šc@Ł«5Şc7ľŰ ńX ťÚéIXłRĎ0’„!¶ĽŘÓ÷źßŐ6G_±áď­5€kô‹Â$ÉČZZ)ÉÉ€sÇŰđůŃŁa V&gdŕęk®t67Ç|.3rs1KĐG}ĽąąřĆŐWă‹Ď?—§ ÍČÍEÖ˛ĄňöžĂ‡C¶›WR‚äôYßMŰÔ¶KLIvî\¤ćö×­HÍť‡K—.á«/ľŔą¶6Őm2—.•ŰÔs¦ťÍÍa§?Iý,í/1%sW®Ŕ”ädxV=Ć`¤Đ,9}.öôŕÜń¶¨®a´ý/Ă=Ţ,˝é‹yŰóź|ÔĎCą~Ęľ“®_ÇáĂ~źHűKÎČŔĽËßÇC˝~c* IJJ’?ľđešĐ¨RŽ\RŽh"bH269öŠđů{ć-ŹieóËa©n€ż«őűš[ť UŐ)‡—>{Au{ńĹ몕›ď‚ĄbuH8QUłÖ¨ß Ą¨±„NŠćŘ`¸w[Řc@UőXwűG>ďĘ:KĽR]Ć•|!‰˛}Ë–É«lśŘ»/ę•6Î"ţh.GOg§ęCxbJ ¶ü YË–…Ľvˇ»-ŻżŽf›]%¬Y ˝© đ Ĺ1=ô.yx“jÎHÇ[ňOc–^Źf›‡ˇŕŃ-!Ĺg/twăżË7ă\[˛–-CÁ–źÉ}&9×Ö†˙.߬ÚµíC»ď~˝˙azŢ<9|’ŰůđĂrh´˙sP?ßřĂâúu÷ŞöKŰ[{ń·oi—ÔĎťÍÍ8ňĎ᎚jůś™Lp=óoQŹ Št~=§ĎŕČóĎĂŁ¨e#Il٢Z X­_âŚDş˙wűŽç¬7•!˙ž{B®ýŔ~Žőú}săFä®*ąb×oL$YYY8~ü8ŕ”űćdç‚h4śíüLţÓk!Éř H$Ć}ĚŰW °ď ¬x#ó@Xź_Đ}ţ^Ż«–§ţdg¦AX aa{E4Ô[ť â±ܵ9îAĹúrlu®c;]­htµÂ±W„ÓŐŠöý‚ŁI!ÉeʇĹ•‡ŘÁB5ÉřöŻ_ŘηťŔą¶ă¸Řó˛–-Erz:™L6+®gž‰ËŻňń|Ű śELIľZ€;ŢŚůą¸îž˙ŤÄ”śo;‹_ô`ĘŐÉHÍť‡Ä”ÜQS ×3˙&Ý”FF¤Î›‡)ÉÉ—‹Ü>RŹe`Ű:››qîx¦$_ŤY‚€äôt,yřa\ťž!/Y{±§ťÍÍňńĄsşřE`T†ňáúŽšj9Đé9sçOśŔąăm¸vŮ2¤ćÎCîŞĚź6Ľőg>ô‡»¶j›20ň>ŚsÇŰ+ÍčőHÎH‡á©­řŁąÚ˛E1zÎśA§(âbĎH„~Žĺú­yíUąMŇ~ĄďËäôô¨®_ŃÖ­Cľ~c6 QĆĽđe/ŠŚ°›n]ĆNď·ţKĘ‘LD IĆ.wG˙RľCyĐ×eőŹs{Ľq H,Ő r82p„‡Ąb5luMX_n‡xĚseÝ uSb:ö¶9Q+X+© ¬äó+|°˙qŢHDÄär0˘­OĘlÎÇź=päąçpýş{±dÓ&ä®*ď“řx×î!K SRp±§ŤŹ?t‰)ŰqÓ¦ŤWR‚ÜU%řę‹/T€ł–.ĹĹžžö* ŇžÚŠóm'ĐôôÓň†Ä”=µU®Ç2#77řµËŻęmK‘Űvý˝÷ Sá9tçÚÚ°˙3ŇA‰päąçB®ĎŤeer8Ňlł·=jłÉ!ÄŚÜ\Ü´iŁjm™Yz˝Ľý'űöaĘŐWcĆüůQ­L”’‚%›6Éç÷GsyĐÔŽäŚ ü}M5’ÓÓń­­O˘nőÝňץpDm»Ä”<şYK—"wU :Vż{Dý:(Ă)9±w/Ţ>xD‡˛źóďąGm¶¨®źňűäČóχ|Hß'á·á^ż+íŞH/ćĺőĎ·>;V˛ďvÖ˝±čędrĘ}˘˙Ă^zŽ&VHR ď@wą\hjjbÇÄ ýr@˘_0´Q_EŤ)Đvhă9‹Úťş(kVęU§żJ ĺĺ…#XÎĆĺŘ>Ż\“Eż 6«)dtЎ0_®Ő"óŔŮÔ‰®XH’”ř5CŻ×;aÎ1wŐ*ůˇ=ÜÔŠŹwí–G`ÜřĂëxĘ)MO?ňzˇ»MO?#oîĘŞS$ŕăÝŻ‡´w`MçO=Ě_čîĆŃWú ©®Ě-Y)O‰Ô¶ž3g‹.O-ŠFrF®ż÷Qj+˙´˝ő–<­h^I ’32T÷ubď^µŮĐsú4εµE=5Cy~ŤŹ?R÷˘çôiyîy9„¦[-*ëżćĂ©_\Š~Y˛iÓ¨Ü#á®tʱgBFr´˝ő–|Í[ą2Ş6Ą ‚ĺč–ąąýu@ąć玏ë=r±§;b?}ŐÓ?2$yÖ,ôś>Ťóç«¶GíĄ}RÓŮܱ0rĎéÓ8ßv©ąóÂ^?)xšP‰ô -ő{ŞýrňoŕOq§?ý¤˙9N±ˇ+ŽTTTpôRڤ‘#ŇH’Ř…ůce=’áđuő×3’Vš‰e›áPöÉl‹ji©b"˘ŃG\Íç‚‘+]‡KFWD»ŠMřżtůˇtE­őŠśÇ`Ň 8júö±š%Q÷EjnnTő6”S´űVnD sIŘž3ťQżWنóÇ#7KŐ*§SĹr¤@F¦Ĺsé\íÜąŃßŰ—ąo\}µz˙Źz#j Hňňňä€ä3÷q$4â>ko zPĺC*Ť$†#ńŁËLCŁvÄľ Ťr´‡˛IŚ$ ęÚ äççăí·EćNµÂź"4â”Z9˝†FĂ‘ř2•Ŕľ+PCĂV×kŐw˘ŢÖçďEă»!˘Ů™iq_î6'k&ś»+Fµ?„…™ň´šŃ>6Q4Ćj8¦¦HE0g BLÉ"S˙**u«ď I:EQuŐ”xŠő/çŇCé™F~ Tĺh{÷©QŤKś9ýŹÇĺ÷ErFaZĘPB„”Ó^†{â1ÂJ"M›‰E¸)^ăŐU˝Aů€ęýś+ŮĐČSŽ ÉĎĎg‡Đ`8†Â|dg¦ĆŘw˝v5“نőĺ¶ [¬;ČMMĄq )€Ŕi˙á uőšpŰé2űCžÁęš Ö6"˘xËá2H«wĚ]ą"ęQé——´Ĺ6ĄÇóm?ŔĄććFX 7 Óf"™E{âMš¶®vÄpúA ˘¬é1‰{'Ęľ_ňđĂ0<µą«VÚy‘‚kőBÎczËp®ň¸©Ú;#7+¬5X˛iSTýÝÓ軫ݙ5č{Ąş)#9 lL$24¤V IDATÓ¦MCff¦ęĂ+ŃH`ýb82~IKÖúü˝XűŔŻTüë÷7ĂVçBńşj¬}`luM¨ŞŢ 0Fą˘L´A‚cź& Éô=ŕljAέż@Âś‡`}ń€ę{Â…Î0ĹW …ý`pě#,© Ę‘0硨k•MäpD˘\ŠuɦŤQ7=Üżôę_«ŐHŔ3rs#®<’»jJŢĜî* ©Đç,˝>bP \¤#†Q2Ă!-ýšµtiććü{îÁśńçÁ¨Ă źbZM¤ţK¬ýŻ×đçA\żîŢřž_}ź’‚ëď˝Y˖ɢEZr9p^ý+ő|ŢÜ|E®ň¸™ú9sŮ2Ě„¨űXę»ÁVčÉ şoO¨ŻŠćMĘQ$ĘéDń¦ ŕfĚ´´4v 1GŚ%ĘÖČBńşę`á}ż@ĺć» ™žÇ^ëËíýaĂîŠe‚•[Ő–ÁŹy`ßő®j{LĄ…ň¨–rË.ŐĂçďEąe—âú˙2¤¬…b«kR 7”Ű g¤Ú#UŐ{‚FĚ()·Źçč"˘ńŽ^ä‡ČeËpçÎĂ>H¦ =˘ \–ô¦‡7©>ĎČÍĹ77n0ř¸ůdo˙ŇĽ[~öxzS™üpĎb›‘(Ł˘­O†mŰu÷üoąmŃNj{ë-yÇ77nP!30ČŠ÷r°ÝÝ­żąQ=XS~ý“˝{î‘Ä””°Űĺ®Z…y%%€{÷kj‹ň:F¸G¤ëĐŮÜ,_‡ ÝÝ8qąÝ×ß{Źj?'gd¨n;Ř}+Ť)زEµMŔ2pý.öô ëűd,úz4oRÖ!9ŮňÜ\´’?]hD(8NŻ!†#ă“4ŠÄľËń‹W<Ca ýßÓí^¤j¦ÁŻ\e¦0şĚĐPÔ¸R€yzü]}(·Ôˇů#LĄ…ŁBlu.\şt Ů™iŞK ŰjĘPĽ®>/Żx ć—ø2đW_W/,ŰöČÁÉ#?ş=¨ţ‰4m¨˝Ă‹ÚťoĂßŐ'Űéj…uçǶV•Âpď6ř»úPĽ®¦ŇKh§OŻ«Öťŕl Ś@)[WC!˙Ý#"†#JŤŹ?żŻ© ,'𛋵˙ő<‡O1ČÍ ú‹öů¶p ¨3Ňsú4Ž<˙<–lÚ„ąąXóÚ«řĐn—§„$gd`QY™ü@řˇý?‡Őî3˘Źwż.ŹR¸:=ÝýşüšľX@ţ=÷Č™MO?=j}ŞlŰŚÜ\|ű׿Ć__ß-÷…ԶĔն)§TlŮ‚OöíĹĹ/ľŔÇ—ř]O?ż·Ö 1%wî|ďÚ-?DOINƢő&ůţcEźÄËą¶6ůürW• %#ďÚ-·űúu÷Ę÷ËÇ»_—©¶·ŢB沥ČZşTu»ą%%Č]U"÷ÁűĎoöu8±w/ć•”`– zŽüÇsA۵˙'˛–-Ă”ädÜQSŤ–×_—ëŘ$gdŕ¦MĂnîú]čîĆűĎoGÁ–ź!9#ßţőŻqÔn—ŻŃŚůą¸Qń}Ňôô3q«2®ĺ4ďç§Đí?ŹM*ĘPÜťlů‹ü1 ´Ă‘ńI«™›Őaa,Ő đwőÁŮÔ*éd˘ůŁ8›Z±xĺS°T¬–GˇHűłV•Ę#Mlu® ‘$šéIpą˛N5¤0ć㍗6ŔTnż«Ö¨NŁ)[W ZXÖńň9äP;¶µŞ¶:—ę±……Ypńţhďđ†lŻ<¶ÍjâÍCD G¸ĐÝŤ?–—ŁŕŃ-ňŞ6YË–…ťľńńî×ńˇÝ®úĐ&=Ŕ/ŮA˛äá‡U÷ázćßĐöÖ[Ănű‘çĄRQřč–÷ôś9ĆÇźµŃ#jmKÎHWí‹@8ňLHŰεµÉĹ<“3ұČdÂ…înąĎ"śŹ?ÂG·`Jr2®_wŻęŹ{÷Ęí‰ó›’|µ>ĚR™.˘v|×ÓĎŕ⦍·;ßvÎ'žK0Đôô3¸ŘóĹ ×Aíé9}4—Łč©­HN\‡E¦Á·=#Š!ׯçôůúI÷ţM›6"9#]őľ•6zľf±X,˝iÚ´iđx<輼¬PŠvfÍÉćOŠ«łťźáČ;űĺĎM&ľńŤo {ż{öě‘?^mÍŽf8—p¤©© çÎ~Ń,Ň0S›ČŽŕÖ›ćâчKä)2é×hŕëęĹuóŇq]n: y°YM°T¬†±DŔ»ď‚–ťpěátµĘ#5¤ ÁTZźżZÍ4´wxQTc‰Ž—6ĘŁ>„…Y0ä…¬‚s]n:~üo!ýiđĺ…Żţ®>äÁPkU)Ěޡzé×hđÝ5KSżtl[ŤIőîŘé×h`*-@ú˙Ň`jâ×15ńčü˙ş _‰[ż™_=}_ŘcOćďŮsţ‹ňCÜĚ™3ů˝Gt…ÂŻ× —+ěNąxiŢsŁŢţż]Ľ÷ŰoŁăĐaüíâWř_}…)ÉÉřÚ”)¸ŘÓł ϡĂ8´ő)¸ß~»x1üď›}„Oö~ßü_}Ő˙uĹ>Î|đAČv zÎt˘Sl–ë4DúşäÔź˙tĽ)ÉÉčúÔłŚŹţë·x˙ůçĺ))AÇKHŔą¶6|ŢÜ6<‰ć=@:Ĺf|.Š!ÇQkŰß.^ÄŮŹ?Ɖ˝űđç+Î~ô‘ę^ŰÄUS¦ŕ˙}őľčěD—Ç÷ĺĐőé§hkŘľóçđµÄŔżÓS’“ŃŮč§÷ź{+¦ÂDŰź±đ:ŚĎĹŔ­ŻľřÉéé8ßvg?ţ®®Q=ţß.^TÝ®łąçOśŔż~Gž{Nµ0i¸ëÍ=2đŢV^‡Ă[ź‚˙ÓOUϱďÜ9śÜ·?¨źĄűYş†j÷ĆŔë÷EggĐő;×Ö†öN\ěéABB‚Šś?q}ýu¸žů7ŐýĆóú Ő™ŮýÓđVŻ^óö —.]şíĂ€Ýřë]Ú5łQúŹ\˛âëĐľ7đá{‡bţÁ=‡zHţř…Ę›ŘŃ Gâ2räŮgźĹńăei7—ĺ!_—ÂÎŽÇ/ńuM°lŰKĹ]A M.Ͼ҂ăź~ůÜĽysĐ”K~ďŤ^8---¨®®\ÝÝŤů­ÇŮ©D4f‰7}ł˙Ůď…bŢţŞhß((†IÓlâÉÝzLő~#*N«LĄ…p˙Ď/ĺ!DD4tăuZ Ń•u@2mÚ4čk>7żëdďQÜśíüLÝ’’’а1ß®dCDD±a8BD»«bysaa˙pgĺ_ű‰†ëč˙üIţá Ă""šĚŽ Í×cył HJJB__şýçńY{ćdç˛iŘ”«×ŚŘqö4žfgŹqISż!_‹4í”!mĎp„hňp‰gqĽ˝‡AJć¬$×i‡Ľ=Ă"˘ˇűz¬‚ W˛ni~Ź Ű_›ßĂĹ _fĚTŚ/Ţś§Řáă@ÓgńďľâIEůH4‘”­ŃˇPH‹y;†#DDĂsU¬(—Ęi9z„ĹZiŘŽüiźü±rWĽdff˛“Ç™ŽÎľ·a8B49dee±hÂóú.ÄĽ Ă"˘á‹yIZZćĎź//±÷^ă^Ü~÷÷Ř“4$m~/¨8ëňĺËă~ŚŤ7˘©©‰ť=ěŮłgHŰ1!ško÷ó@M‘=BĂ""šĚŽĹ×UCÝPůů“-Á…/űŘ›“#Úôťµ †#DD4™1!"Šż!$ůůůrńË‹ľ š*A4ĎÚŰpŞý„üůwÜÁNˇ¨1!"˘ÉŚáŃČřúp6^ľ|9ěv;ŕČ;űqťp k‘PTď«úˇž––ĆNˇ¨09â1Ę-uQ˝WXCAÖ¬â˙‹]ě»ËÉÜUóöĹë¶ĘÖŔTZ·ýĆ»ťDDCú·‡áŃV@RXX¦¦&ą ÔŰoţÖü`#{•":úçw‚jʰ€*E‹áČČňů{áljŤę˝Î¦VX_<]VŢxi„…ń[zŐíńFÝŽpm€˘[óâşßx·“(V GFÖׇ»ď|ç;xę©§§ÚOŕdË_“{–T]ř˛/¨¨ďňĺË˙öî?(®óÎóýGUXIÝš™H¦ ľł†)klÚĂdvஊ–¨ŠśYPU$ŤíÔŇÚh,ŰŮ*!p&sťń_ĺ&+Ô­Č–íD0cYJ•!kÉSÍj™$5JäÚĆÉĄ1M˛Â4 ¶k§úţÎńéź4Đ čó~U© M÷9Ďůž–éçŁçŁG‘ĺUVR OIAÜP 8:.˙őMŢšQpd\Ű÷4ëę…o(ßµ‰ÂŔ!€Ą·č€ÄĺriÇŽşté’$éÇoujKŢčӟɢşńă·:őńGJ’6nÜČ褄pdůyJ ÔP—řďghrZ5őj?Óo~Ýůý•=‚°ˇ®2é5ŔJE8Ëă®t¤˛˛RYYłČÔä ¶"®÷†ĄŔµź›ßó‹© Y™śŽlµµxĺŘ0ű˙ţs( ,ÂX>kÓqěělíÝ»—[‘TĎŻ›_©°°˘ )‘•Żj§Ű\¨Ô×§töďuo˙ěÚyąN˝ ŽÜÔđčě‡ţ˛8Óy¬ü×GĚ&ß•Ł]ź/’Ó1ż÷A*çë픯˙“uEÜ[sçµmpä¦Î]PčÖŚś˛TVRŇú,ˇÉiőţdPţ룒$ç†,íÚY”°vˇÉi Ľ=j^Khrzv4Ď­íÚY”Ö5aÜ9„#°ĽÖ¦ë@Ń ¶ľůďkď¬ęŹY?ë˝ ©É Ił łîŰ·Ź˘ )‘ŐÁé?ťŇóĄŮťdęk+NkiëčWcs·$)üމ„AŔî/Ä,†ętd«ć@ąęk+Rďh$9_çyżö×¶+49çç>WhrZűkŰŐyŢ[‹Ňť}ĺ©„Ncs·Z^ąsîšúU=âÖÉćę×úŻŹhűžfIŇÄŰÇ´}Ołü×G$I M]Ië`u €ĺwW:fíôŽ˙vL˙xá,†Ţţ•~~ů-óűĘĘJfĹśGV‡ŢţwĚŻó]é˙{˝}Oł|}ĘËÍQőžíÚY$dž,…&§ŐĐÔĄý‡Ű}__@»żň‚B“ÓrÜąQ_[s®†¦®¤íě<ď—ăö¨‘˛’sú‘ŻoĐ 3˘í?ܦ†¦.…&§•—›cžŰáŇyŢŻűJľˇŕČÍ„çŢ}ŕ310‚XÝ.ţä·„#p¬MçÁ\.—öîÝ«ŽŽIŇ/~öŹş'˙~vµ±±Ź>śŃůŽ“ć÷÷ßżĘËË) RF8˛ruž÷›ó˘r—d˙őUď)Q[Ë'Đä´<_jŇŔŰŁjëčWőžsjĎBn8c^ď‡uŁ5¬çjýŢĄ„Ł2ü×GtčŔµ4î‹űZ˙ő‘)HŇěHʶŽŮéIŐ{JÔҸ7âÜľľ€Şn7»żň˘®ľő7qĎíëÔ®ťEfŰ|}ŞzÄÍXĹf>úWókÂX>wĄű€ĺĺĺ***2żżôĆisjěçü™“ć®5YYYzúé§) RF8˛ňôöŞ·P‡:´ű+/|ŇŮ_˘ée%á4;ĺĹ÷Ă:ĺĺÎŽXi<Ö˝¨s!OŐ#î©,NG¶ę*•—›Ł˘rcFjví,ŠGŚ×v~˙)ó{ëÚ&ˇÉiµ~ov÷·˘rŐÖâŤ9·§´P-Ť{Í6úúqĎť—›ŁÎď?-÷V—Ü[]ŞůKBh SŽŔňşk)ęőzµqăFIŇÇ}¨7đ}*mC?ë˝ ±á_›ß?őÔStt‘2‘;«±ą[kî9óÇóĄ&yľÔ¤–—/šĎ­Ż­X˛ 5âwöťŽlU=2Ćűúă®’*c*Lű™ţ¸SYŞq+řOGäűa]©+Ţ˝ĄqO4ŞĆ×˙I›“…KÖăŁMbźSÂČ@„#°ü–$ ÉÎÎŽ)Ŕz$ö˝îHEE»Ö`Nąąąć GVľ˛’ťýŢSKşh˛ŕĹSňÉ˙SŤěH…ÂGĆußź}Cţyµľrq^ÇtnČšó9ÖăYżžśš1GćÄűc„2ÁŃńu(ŕÍd€‚‚Oţ.ŽŔť±v©o=’MźÍŐ}ŽŞg¸xëŽTV˛›ćVWW§ţţ~•””ŽÜaŐ{J’ŽLXĚšéb %B·f|śšĺňÝ#¤Ůđ˘¦~6Ŕp:˛UµłhÎuNR©‡µŤÁ‘OÂoM[Jí4¶ő™ U[[«ńńq•––R¸Ö.ĺÁËËË400 Iúń[ťÚôŮ-Úôű÷Pů őч3:÷wÇYw ’ťťÍ"ľ+Dľ+gE„ ËÁXÓÄ×P[Gż:/ř5y;ĚMN«­Ł_mýňî-ŃÉcŢ´śÓ:¤Ś nc´-ÜYk—ú^ŻWMMMŐÇ}¨sw\»ţăÓ„$ČGĆ;f>Ć4 ‰„nM§íX©Lq™‹§´Đ …ü×GÔyŢŻÎó~säF[GżŠHĎ"¨î­ąćßëx3¬w-ő ŚőH˛˛f?Ľ! ;Űdžž®×#‘ęęją\.  .˙őą§Ś$[|Őúú|W΢Ú˝8«{«K u•ň˙ĂsşúÖß ąv^đ§ĺÚós7Y®cdÁ5@úܵ'ÉÉÉQ]]]DHň澯Ź>śádKośÖPŕ—ć÷ŐŐŐĚźt”H*kj$ $ÚĎĚîę’—›“p·ąřúZsĎAÝ÷gßPçůřç2¶ĎM'Oé'ÓjťWš nî~ŕ°ÖÜs0ĺµJ°0w-׉\.WDH2ţŰ1ťű»ă„$ŕҧ¸ösóűŠŠ ÂŔćŠÝ‘¨ýĚO⎀ظ-Ą‘‡ÎÄ}^cs·9ň˘ˇ®bÁí´­ß»÷9ˇÉi3ĚÉĎÍIK}Ü[]ćÚ#ŤÍÝňő^żí|–Ö]Ëy2cg!Éęwí§—#‘’’v¬`nĎšśÖĂ;źWű™~őöęÜż¶ďiş˝žGîśÇ MNëľ’oDĽ~÷W^PCS—¤ŮN˝{Č:Ů:t`‡$É×7¨í{štî‚ßÜb·ýLżŢůĽҤcýCKă^sęÎö=ÍÚ¸Í<·Q'ctÉ\»č`ńÖ.÷ Ť‘ííí’> IvýǧőéĎdqGV‘č‘#%%%ňz˝€j”› śGĆc¦‡ěÚY¤šĺÚľ§9éqN«ÖţĂíq§—”•¨ó{O-ş­-ŤűšśQű™~ůúĺëŚyŽcC–Z÷¦uŞŤ{«KľÖ©ę?˝ áŃqs·śhŐ{JÔÖÂ˙[–ÚÚ;qRB’Ő/:ą˙ţű G€U.ß•ŁúÚŮé*žEn=ëtdË˙Ď©­ŁOťçý悪ŞzÄ-ďŢRGnšçłň”H·÷î-U~nŽZ^ą÷ń$şëqٵµxĺÝ[˘–W.*tkF˝ý·×6É‘§¤@Ţ˝%1ëś$;^ĽöÄ[HÖ˝Ő%˙[ٶŽ~ůú ŽŚkŕíQ=«|WŽj”Ç9b˝W‹] łÖ„Ăáđť:y__ź’HŇzÇÝzdď~¶^Á>úpF?~«“‘#°˝ŁGŹęťwŢ‘$ŐV¨0=EŇő÷ëd@ďĽűÁě߯ÚZ2˝¸S€š›gGűý›©)Ý?řE°bů‹˙Řüúĉó~ý]w˛ńĄĄĄŞ®®6żźšśĐąż;®›żyŹ;»}ôáŚÎýÝqÂ@ĆąëN7 ´´Tµµµ[’¬"©®®Öľ}ű( #ܵRârą"BIúń[ť:ć$ŰßA×~zYoüý úřŁÍÇŞ««Í…vČw­¤Ć¸\.=÷ÜsĘÍÍ5 üRg^nbĘÍ2űčĂť?sR?~«Ó|,++Kµµµ„#€Śs×JkPNNŽž{î9íرĂ|ljr‚)7ËčćoŢÓ™—›4řĄůŘý÷߯#Gް“ #­]© Ű·oź ŐÖÖ¦™™Ů)6?~«S7˙eT˙çç«ôéĎdq÷–ŔĎ/żĄźő^xlÇŽ¬7ČhkWrăÜn·ž{î9?~\ŁŁŁ’¤Ŕµźk(đKýűť»UřĐźpÓä˝á_éÇÎE,Äš••%Ż×+·ŰMí®•Ţ@cĘMII‰ůŘÇ}¨KośÖążg;ŕĹúčĂýř­N˝ń÷/D„#ąąązîąçG¶°vµ4ÔÉđü@ďż˙ľ$ilř×:órł>W¶S~nÓnći(đK]zătÄ5YYY*//Wee%ŘĆÚŐÔX·Ű­‚‚]ĽxQÝÝÝćă?ë˝ ˙á˙©>WöÓnRđŢđŻôó˙ţ–Ɔńřý÷߯ýű÷+''‡"leíjkpvv¶*++ÍŃ$ďĽóŽ¤Ůťn.˝qZ?ë=OP’@˘`dăĆŤÚ·oÓi¶µvµ6Üĺré™gžQ__ź:::ĚťnJb% F$©˘˘BĺĺĺĘÎÎćoŔ¶Ö®ö (--•ŰíÖĹ‹uńâŸAÉş˙Ôvk”|ôጂ×5đ“ŢĹW %%%ެ¬d: Łż™Ńš5ÔH—™Źţ•"€e·6.ÂvS^^7(ůYď ü¤W÷ţ‘î+|P÷ţQĆŢĐ›żyO×ţéżk(đËĹW #@úuśˇŔ*·6“.&:(éëë3wĽůřُös®ý\źúôgô‡EźSaŃç´é÷ďYő×=×h‘¬¬,ąÝn‚ Ť6mÚd®`i·,‡µ™xQFPRYY©ľľ>uuu™A‰4–\űée]űée­wÜmŽ,Ů’÷«ćdž­ˇŔ/ô^đ×qCivńŐŇŇRÖ–@yyą$éćÍ›X"?ü°\.…ËbM8ŰáBGFFÔ××'żß–DŰ’÷Úôű[´%ďßjKެuKnţć=Ť ˙ţ•†żLř›­#5ÄE€md†– >(Ŕď>°=|P€ß}`{$ř żűŔöHdĚŶ¶6аŐď>BHóA±żżź`«ß}„$>$VőĹçž|@%E9ćc„$€L˙ÝGHK€ŔŞç­Ę'$Ř! , `'„$~k)€Lá­Ę—$őŚĎţ·żöqŻ—âd||\ăăă÷ľž/#$ijÔčof$͆$’TZZJQ`žH2@ww·ş»») ř!ÉČČęęꔝťM€UîâĹ‹ęčč Ŕm„$>L±q˘§ŰŚŽŽŞ©©IÓÓÓXĺ®^˝Jń˛łć÷oL·€ô`É*Uż^ýăzňcŠŚ¶ăOwAŻóVĺ« ˝ÚĎ%}’0’ČążźĄ¬O˙…@FŮt÷§ĺ.tÎűuŚ$€Ĺ# YĄ ó×ë[5R ‰R÷ě(B 3í}ÄĄÂüő¸Ť‡)62Z©;GŐ»ňÍď™nČdL·€…# ńIvBH C@Ŕâ…$Ď?˙ĽFFF( ă’Ŕü°Ťčd||\MMM„$€ŚDHóĂ"­lĄÔť#×głÔÔ6¨™ŹţU333ć­.—뎵ËóĄ&őöJ’ÂďťHé5ľľ€¶ďi–$őś©•§´ĐüYCS—›»“źł´@’äŢęҡŻěPľkSJçJĆ˝Ő%§#KU;ÝŞŢS"§cţ‹á®ąçŕśĎq:˛ĺŢš+÷V—v}ľ(âÚÓĹhG}m…ę*çőZký­÷3Ů=»íůX¸5±uň' zݶâb˝yâDÚÚšššýݶ~˝-ë˙ěOčŮ'žČŘëSŢ–-üĎh•` Űq}6[uŢs{P#$±ŰH_ß |}jyů˘îűłočpC‡B“‹[—Ĺ}DľľAŐÔwčľ’o¨­ciţ•249m¶}űžfí?ܶč¶@¦b$ÉĘuŞ»[ôĹ/ę#Ă„¦¦t䥗´ő‹_¤«#HŘ’’¬´‘$KˇzO‰ň]9±ż¸oMË}ÔąŇňňEGĆuö{O-čx†¶Ž~ ŹŽ+49­ý‡ŰĺŢę’{ëükš—›#ďŢ’„?÷_Ńą ć9˛ÔҸŹ77ÄÁH’ÄîÝĽY_®¬ś×óÓáň•+:ŘĐŔ›3C?}ZG^z‰B¬2$lË.!‰woIŇ©ľľ€Ľ‡Ű5<:®Îó~ŐÔ˙ iĐ0×ńę*ĺ­iSű™ţŮÔůý§çÝî|WÎśSFü×GäůR“&oͨő•KŞ9PžtŞĐťć)-Ly ¤!I|y[¶dô©cŠ [K4ÝĆNĂŽ=Ą…ňý°VŽ łCŻ[_ą¤ŕČÍEł­Ĺ«ĽÜŮQ&Ć(ŹĄŕŢęRÍňOÎŰŃĎ›’`ş $ƶo$‰ÝţE-ßµI-Ť{µ˙đěu·ĽrqŃÓU<%ć(__`IR5ÎÓxűëŕȸůxp䦆Gß—$••$|˝1Ĺ(/wcŇŃ'Ć”žŕČMĺ»6É˝5W»vşçŐÖĐä´Ţ•$=wŰsüň_Up䦜Žl97dËSRô¬‚#7gŰ9:[‹üÜíÚY4çČšĐä´z2(˙őQůŻŹČ˝ŐĄ|WŽĘţěţ„Ż5jg\Kű™~ůŻŹČą![Ő{ţlEŹćII’×M~đ$éÁ‚‚„ ­^ľrĹüz[q±._ą±îńµcÝ:=TűűrxlL?ęíUhjJď޸ˇ{7oÖ¶âbm+.Ž˙˙ô©)óŰŠ‹5<6¦SÝÝšüŕ=TP Ç+*"Úe´Ý8Ď𫳿7oÖ(+›s‘Ńá±1ýă?˙ł†ÇĆôîŤr¬_o^KEYŮ’Őľ»·wÎóYkqďćÍIŻĹúÜčű9ßó˝{»†Ö:':żqlă˝äX·Neeqß F˝ß˝q#âýrŞ»Ű|üŢÍ›őxEEÄąBSSz­»;âŢ>VQa»Ĺ H ĹäHÍŮúâwo©ję;4ykF˝ý﬚v'Ú)§­Ł?îN21Ë—š$%ßf˙ᶸŁSň]9:ű˝§R^cĹ}$á.6ţë#Úý•"Bžůž«±ą[ M]1Ź×Ôw¨ˇ®Rőµq_×yŢŻĂ çî<ď7ë[s <îkŤÚőś©Uű™ţ54u1ť $ÉxĂ7nčŃgž1x»Ű|÷µ×ôőćŮ˙÷?űÄÚV\¬/ŚÜ±íŻššâ#45ĄżnnÖ«]]qĎ˙Paˇ^ü/˙%¦#ý‹ÁAó}§NéĎź|ŇÜ-G’}§®Î|Λ'Nčň•+q×Ěř«¦¦„;ÍĚŐ>iv Óé˙ú_vöç+45ĄGźy&"tJv>çúőú‹ş:M~đ*ĘĘôúíZÇs䥗tüôi9Ö­Ó{>ß‚Îg­˝Ář>şŽ×}˝ą9Ľô’*=˝P_bśęîÖ‘—^Ҷâb}»¶VŹ~ík‹yý‹őőúreĄ^íęŇ_77GĽŚçü·_LŰ˝! >,fOIÎ]˙úB“Ó Ú¦×üđu{¤„¤-Ňš*˙ő‘!ÝZżwIˇÉi96Ěn_śďʱŚ&×ö=Íę9S»¨k MN›áő<Ňě¨ö3ý湆ú˙ď¸÷%Y;ŤŔ˘č\U=⎠’úĚ‘CŃç6ÜmhęŇÄä˙J8ިó‚?&@ŞŢSÂ˙T~ďeĽJŹGŹWTčTw·._ą˘ďľöšľúŘc1`ivD‚Ń9ŢV\w´ÂAŔź?ů¤®’"“S·Ď§k€ţüÉ'“vrźüć7c:Ć_®¨é(_ľrEŽuë´­¸XšŁŚź?XP JŹ'âuÖöEŹhyµ«Kď޸ˇá±1ýů“Oę—oĽ‘–Ń ÖsZGIŁdŚóýřÔ)ógŹNuw«űö(śDíx­{öV·,Ú;ßó54Fsµ1îˇő˝a ®¬ő3jßĺóiřĆ ý·_ŚŰćwoÜ0ŹaÜë}{˛±Q“SSúzssĨ”ËW®čň•+łáĎ׾¦ëoĽÁ_fŕĂb4÷V—١ö_Yđ´–—/šÁEYIÁ˘‚–ą‚…Ă gĚďŁ;˙é:GŃąęüţSSF:Ďűĺ˝˝˝đá†őś©[đ9:/řÍŃľÖĹ„-žŇí?Ü®Đä´Ú:úUó—ĺ Űéűa]D˝;Ďűµű+/aµFÁ‘›f8ď­ î¶ľrIž’¸5n}ĺRÄë#7ş5Ă˙P~ď­ “|pt@<ŃÓZľ]W§ËW®čÝ7ô­—_V…ÇŁĽ-[ššŇ“ßü¦¤Ů)Ż=jľĆ±aŚ,řN]]ĚqŹĽô’Ů1Ź7‚ăZ  /߼v Šçň•+ć9ßţ¸BSSúîéÓúN]ťůř©ŰáÇ©®®¶JR—Ďg†F€Ôu;„šĎů*,Ô›'NčČK/™#râŤ,2‚+Çşuz±ˇ!&|zµ«KO66ęZ  #/˝d^‹ŐđŘëÖ©ďÔ©{jĽV’ľŢܬ ôúŃŁSnŚ{3<6fŢ@ĘsrrTČD Ľ=Ş5kÖÄý™˙ú:/řĺëűd^uC‚is~`˝5c®qÜš–ŻPmý MNK’ví,Z˛‘*ŃÁĆ4ŚTępC‡|}‹ZgĹ:µ%Ţ5x÷–Ę×7¨Đ­i9Y ŹŽíܵłHç. Ä,ŰĐÔm~ÝÖ⍻fH[‹WľţA ŹŽ«ő{†PÖ׳ö°úďuttŘćwž4¤ęź˙<â{çúő:ŃĐđIXŃŘhvŤÎő‹ s®ăń{njJÇOź–$U”•ĹťŢňPaˇ^lhĐŁĎ<Łk€^íęŠ@l+.Žx}Ľç8Ö­‹ GŚ×V”•©»·7&Dęľ=ĺŢÍ›ăFHb\‡uMŽĹ$‰‚*Ł&O?ú¨~18¨‡,Łq*,Ô˝›7ëÝ7tęG?ŠŰ^ăz,(.“ĘůŢ‹8ß\¬ÁËłO<Ž÷éÚŕ Žź>­ă§O뫏>÷=ôŐÇ‹ Ľľ\Y©Ż75™kšD‡#FdÜŔŐÔw¤üܓǪ[ř¦˘¬¤@mÇĽKr˝Ő{Jvř˝{Kt¸a¶ľţÁ_«sĂ'ˇĆá†Ő® :ÚZ’_ß®ťE GęXGYť{kŔ¬_˛pÉ»·DŤÍÝł!Mś©Wyą9K:Ť Ŕň›™™Ńôô4…H‘BSUľŢÜlv>ݍŰNĆč¬ŕD*=9Ö­3GÁÄ ?RY$őˇÂ„ÓN*,4§lX˝ŢÔ4»hlÔÚŃáQ:Y§ =Ůب˙§¶6ćńFZHłŁ=ľŢܬk€†ÇĆb05FX§-ć|鸿Ń!FĽ€$Ńý5¦Ň$Z–uGH %ÓţďE“¤ęęjFʤ(/7Gž’5ÔU,ůH‚˛’y÷–Č»wé†'[×ÄéČV^nކGÇĺK0Ú%UŹ©ˇąK“·fÔňňEµĽ<;RĂŘ˝&•đaľEpä¦9úć>WNÂŃ:’"F Ĺ›zµkż¸óż÷\.—·˙•;ÓĂŤxS ćëŮ'žP—ϧ_ 껯˝fv°ż˝€´u´Ĺ\S€ňîąGׄ#4Ré/tô€sýz9ŁŽ-Đ»˙ň/şĚkęR*¬Đ«]]zµ«KŰŠ‹Uéńčß˙ń'˝Ö ŹÇ\ćTwwĨk`a]d[qń‚Ď—ĘýÍ۲%ĺ-ôţÎgäH Ą‰+q¶˙úhÚŹiťę‘¨Ó{¬aŻÜ[săvĐÓąÖHŃąjiÜ7”XÎŃ ž9¶×ÍwÍ$‹ a6É÷Ă:Uý§Ěcuž÷›;Éä»rTő[‡ľ˛#mˇ“ő^·uôÇÝĄ'ĺöçüŢ4;•aëżh~˙ťşşŤ˘¸f ¦ţ"Ĺ€Ĺ:d9ťęîV·Ďg.úąÔŢ’Ň k@’Ž‘ţë#樂ąFMd ë(ŠÄuMHL¦q·§#[Ţ˝Ąć”!__Ŕ\Śvxt\ˇÉiíŻm×ĐOޤµŢ˝ĄqwĆ@8‚Ô] ô­—_6;ľ“| .źO]>ßĽ× ŮV\lvÚÓ1ýg)‹ĂJł#/ľúŘcćvĹˡŇă1ëjLçyµ»Űťń×ÍÍŞđx"Ú1E§»[ß),4C-ÇşuIďSôůŚmx“ťo®űkěx•ă.J€pdúŽH´$Ö`’éígöú†¬´L9ilîŽč0ŰA*[ŃGŹ2 MN›Ű/6Tš ¨"Ďĺ)-TC]Ą‚˙tD»vͶgd\ľľĹłµ†>Ć5$¬ÁČͤk” Aä¶­×»şĚ>źjlÔp’…Lăq¬[Ľ$c,:şś¬Ł.*ĘĘôzS“¶Ç„KŃ®k@LM*,ÔW{LýŻ˝¦o×ÖÎţŽžšŠ;íČ%ňŁŰ ĎžúŃŹ"Oő|Ď>ń„ú_{ÍśŞ“č|Éîo*Ď_®©K 3™ůč_%IYYYzę©§–ýCbŐÎO¶NmýŢEsńĚD|}´uĚ%©­ŁĎ\ď˘čÜ„[ą®Ú $A=}ýs ńvů¤nź¬Ű±šĺ˙»gőđçź—÷p{Âç,Ehe„.çŢHúžŰ_Ű.Ď—š´ćžsľ7ŽŘQô–ľÎőëu˘ľŢě8?ŮŘ8ŻăUXF2|÷ö.&‰‚ŠŇÇ×Ö/~Q–íz­ †&›"=ĺh±ľpđ J\Ź~ík źcmOĽ`ÁŘĄfxl,b»]ëî5ó9źu TŞA†qCSSzµ«+áó^íęŇTîöíIźX’p¤®®Nn÷ň‡NG¶Ř!iv„Ŕö=Í ˙U˙ÜżvxŃüľˇ®rAç MNëÜż¶ďiŇţŰsdž¬9·“]-¬Łršc?Ttž÷«ő•Ks'82®ý‡Űb÷_Qă±ŮQ7©î4“HŐ#łAEo˙ TEk?ÓoŢŁt-R[s Ü|/lßÓ7ühëč3G5Uď)IëBĽG2Áĺ+Wt䥗f;ľeeć4 c”ńcgC˛Q"ĆŇě˘Çă„$ˇ©©ŽűW}tٮٺým˘QÖ)Géblik¬űʱŕŞ$=TP7@1Ú˙×·wµy°  nĐłó%»ż·§úm·“͵@ŔlźcÝş”×7Áâ° ÂK8ârąîX›j+Őy~@ĂŁăň_ŃĂźţöÎ0YrnČVčÖ´ü×G#:±őµsv–·ďiNéüŽ Yj;ć]Öb–RŐN·j6thňÖŚZ_षGUµÓ­Đ­ůŻŹ¨óĽ_yą9rnČŇŔŰŁÉëŇŃŻŕ踼{KĺÜ%_˙ Z^ľh©[ő˘ď}[Gż&oÍh÷W^wo‰<Ą…ĘĎÍ™mëżRÔ(O[Há)-Ôˇ;ÔúĘ%ůŻŹčľ’o¨ć@ąÜ[sş5#__Ŕ%“hW!„#™p¬ű“?™×kŢźžllÔă–§ş»Í@ÁXó#[ţ>^Y©ďž>­woÜĐ“ŤŤúÇţgm+.Ö˝›7ëęş˝›Ž$=ýčŁ w–ůrE…ľŢÜlÖ/Ţč‘ĹžĎz?ýÚ×tďćÍÚV\¬gźxBÎőëőzS“ľpđ BSSúÂÁúęcŹéˇ‚9ÖŻź˝ż]]ć”…l Xµá4;ŠÄ˙ÖßČ{¸ÍśÖ‘h‰cC–Z÷¦eÚ…cC–ŞvşŐPW‘¶-dW§#[ľÖÉóĄ&MŢš‘Żo0b}—˘rŐÖâUM}GŇă´󪡩+ćő’”—›ŁÎď?µčşm5¶ůM´íîˇ;­S–ĹSĚĽ~ô¨ąínĽó¦R“Ç++őőććďÓ}>#ĚúĹŕ ąŰMôĎßŢRoéšÎó¸·şüɵuô«óÂěÔç†,U=â6kčÝ["OIAĚZ.F;Ü[sĺ˙‡çÔÖŃg†ŃÇHµţÉęhÝć××7¨ŕčxÄńĽ{Kâ1Ćń’­E3×űˇĄqźj”«óü€Y'ëű-Q—ĘąެTĆô—…¸wóf…¦¦tďćÍzö‰'’î€ňPaˇN=jĆö˛’ôťş:=TP woÜŘvÖ‰·e‹Ţá0§JËÖĂ@"$ ŕrąô­o}‹BKč`Ô<ŃTE‡#EEEňz˝„#@†(//WNNŽFFF(2J XPřG8éC@ cD‡#%%%ňz˝Č0n·[n·›B ăĚ7 !€ôş‹Č„#;!€ô# °ęŽě„p–Sl¬ęçGG¶úÝG8K€@Ć|@$Říwᤠ€Śř€¸cÇíŰ·Źâló»ŹpŇ‹€É4Ň– IDATŕwŘ‹´ŕ"üîŰ# ŔDřݶÇ+ŢxčcýW|@Řá,?+Ţßľř¶f>úW> laz†pî+žŽdeeiďŢ˝|@d´K?ýźßŽŔň °*dee©®®N.—‹blp–‹´XńGvD8Ë‹€ŔŠ´qăFI„#{"€ĺÇ+Ň3Ď<Łľľ>•––*''‡‚2Zii©ş»»%ŽŔťB@`EĘÉÉQee%…Řć÷Ţ‘#G4==ͨI¸CH€ ''‡Q“p± °=`{$ŔöH€íŰ# ¶G@lŹ€Ř °=`{$ŔöH€íŰ# ¶G@lŹ€Ř °=`{$ŔöH€íŰ[K `qúý7őÎđH“÷o}LŔ˛# €Eęxź"«Sl`\.E–Avv6EË‚$°•••Ú´i“¦§§)°Drrr#Ŕ˛! €ČÎÎVyy9…2Sl€íŰ# ¶G@lŹ€Ř °=`{k)€ą|řńÇúőŤ@Ć" 0§±÷ß×w»ş(€ŚĹqeggS«NVVÖ‚^Çqą\.•””čćÍ›ŔŞńđĂ/čukÂápň;cŠ °=`{$ŔöH€íŰ# ¶G@lŹ€Ř °=`{$ŔöH€íŰ# ¶G@lŹ€ŘŢZJÉA +//Oůůůw¤ ˇPH’¤˛˛2nĘ Ż'÷kőa ̡¦¦FŹGťťťŹAmßľ}YÚŕóůäńxT__źńőnll”ĎçKëń˘ďťßď—ÇăѡC‡Vüń±<As0F¸Ýnó±ÎÎNíŢ˝{ŮFřýţ6d˘űî»OÁ`PCCCKz<#€Yl=—úřX>kÂáp2ŔüÔÔÔ¨µµUőőőjhh  iŕóů´}űv9…BˇEĎď÷ëá‡NŰń–űřX^L±€vŽ%F,EM=ĎŞ¸GĽ2 Sl,ą`0¨öövIR}}˝ü~żZ[[ ĺt:učĐ!łS ŐŘب`0hv–­» ŐÚÚjvT%©ŞŞJŐŐŐr:ť1ĎokkÓđđ°ęëë …ÔŢŢn®‘źźŻúúúEX}>źz{{UVV&ŹÇcŁ··W’tîÜ9 ¨şş:âµ>źOíííćuäç竺ş:nç? ©µµUyyyňz˝ęěěTkk«YŻüü|µ··Ëáp¨¦¦&ćz:$§Ó©öövµµµ%˝kűZ[[ …ät:Íş÷ĘhK:B„öövóYĎe}ÎąsçĚi)“““jll4ký>˛®Ob¬óa˝ßŃĎ[łfMÄń˘ßŹÉŢSńÚ»ăK2_kĽ7’˝gSiŇ$ KěěŮłaIᲲ˛đÉ“'Ă’bţôôô„Ož<v:ť1?;tčPĚ1[ZZ"žSTTd~ív»Ă1Ż‘v8á«W݆óóócÎăt:c^WVV–>yňd8Ç}ťµk511öx<ćăyyya‡Ăa~ďőzÖçСCáúúúă^˝zŐ¬YYYYÂëq»Ý)]ĎÄÄD¸ŞŞ*î5x˝Ţđ±cÇÂ’Âőőő‹ľďÖ{]·Űm>/úšŤ?FÍŁŹď:­ĎťëxÖ÷ăR´7Ńń'&&âŢ'ㆆ†Ô¤ €%gt(óóóÇ#˘3kFDZľľŢěÔťu§Óq<ăńčc ™A‰Çă‰xMOOŹŮ§Ó޵k—Ů!ťçĺĺĹtĘĂá°Ř\˝z5ćüŃŕp8l†#EEEážžžóÜčső1jQ__oFáp8|čСĐâęŐ«1×c´Ńz=ÇŽKŘ>ëőWWWG܇łgĎ.ęž E8Övm‹aŚÎt¨cŃ÷{bb¬Mô{ÄzĽčŕÁ¨·5x›;ťÎ°Ăá¸o©´7ŐăáZô{ϸďÖ÷ěBÚ+śŃŚî,[;“ń:ĺF¨aˇatĽŁC ëĎŤ ÂÚq5BŤxˇA8ďÚµ+¦Ói=—•&DwPŤk)**Š;‚Ĺźź·>‰®Éřąµ>ÖŃń:ĘńÚ8Wű¬Łp˘;ýóeÜ»]»vĹü,Ţ ăůyyy1Ď7‚…č`):¨°Ö.Ůń˘GYź/ô2Úkµ1ßăANĽăOLLÄĽĎćŰ,‹´XrĆz5551 Zk+ěÚµKUUU1ksHŠŘJ×Ř1¦şş:îâůůůćăÖµ*ŚóäĺĺE¬ă}.ë1Ť×Doĺklű˝ž±nHKKKÜő$Śë3Öž>^}}}Ük2ęgý™qmyyyqwѱ®}2ßöĺĺĺ%\»d!÷ŢXçĹşnHOOŹZZZb®'Ţő éęŐ«s®‰bms˛TŤzĆ[&Y{Ož<ą ăkĚXßżVN§SőőőęééYp{°x,Ň `IYPŤLá@ĽÎoĽN¨±řeĽc% ¬çI´%ŻŃ©Ť^h5^'8^»:;; …”———pk(a,Ž Íp&Ţ5YkŰćşăçFŰÚÚćlź!»˛x<i``@»wďV~~ľĚĹHŁŰ0×6ůůůęíí•ßď7ë …Ěú8ŽúÎuďGD=çŰŢůß8’Ő>ú>η=X‰ÂT®'zDL˛:G·/]ŰÖú|>Ő××Ëáp( Ęëőęľűî‹!Ĺ:Ö6íßż_wß}·<ŹjjjÔĐĐ ††ť={Vq_—h”O˛‘ io*Ç·îVłTő €U$ @ŠŠŠâ†‰:ÍÉ:đˇPČ b3Ń(Śč6FOĄI6µ%^G^RŇ©)ÖvD?–(¸×á^Čő¤ŇľdSOÂétŞˇˇAÁ`0˘Łż{÷îN~˛ l÷îÝjkkS^^žŽ;¦žž )+ šu±ľ.Ůń’˝ÓŃŢdˇZĽ÷x:ę«@Ľ!YçßúłD#&''žĎ:ť!zäI˘`%^§6ŃTŚDlëčąÚfI0×Č“x#’Ő-ŃĎŤÎy˘vú|ľ9Ź›ŞÖÖV566ĆíčWWWK’>Óޢ˘˘ëđů|r8ňűýŞ©©‘Çă1ď‡5 ‹Wźčă%ŞMşÚ›Ę˝‰§±±QŤŤŤćkçŰXćšŇ0Wxbať–ŻŁ …ĚŽĄuM“ą:­ńBśDŻI4ŞĹč´ź;w.aoý”dR(ŠŰŽTkh¬}ˇPČěp'Í3Ć4čĹhťN§yíÖźĄ˛ÖKĽ6ëŞDż6Ůű*^˝ÓŐŢDÇźkTQCCŽ;f>oľí €n®© ‰Â†Dť|·ŰmNO±ţ »ŃÉß˝{·‚Á ŠŠŠ"˝L´^D˛Ŕ!ŃÔ—x»ĂXżß3ýÁď÷k÷îÝ’fwŞ1^k‰lý‘čőNćš Ż“^UUeNŃŘżÄőlßľ=ačŇÖÖ¦ĆĆFµµµĄ|ßÝ#k]­Á—qîčšß ÄbťťťŁ(â­}ĽDďÇTÚk-2ßă÷©µµ5â:Śé2F(b\ĂBÚcŚBÁ"°Ó1€Ąrěر°¤đ®]»b~644–NÔ-)++ K ź={6âń«WŻšŻs»Ýᆆ†pMMMŘét†%…‹ŠŠÂćó'&&’ž§§§',)ěp8"w»Ý1ç·Ëív‡=OřęŐ«ćĎëëëÍź{˝ŢpCCCŘëőšŹUWWGśăäÉ“qŹ>žőçÖ6XŻ3ş>Ń×c=źń'??߬Y^^^XRřرcŻ1žsčС”ďűŮłgc‡#˘nĆ˝v:ťaŹÇ>yň¤ůł˘˘"ł­Ćq<Źůľ2^ge}ÜzĽDďÇů¶wľÇŹwÖ÷lôýźo{Ś÷IĽ{ŽÔX2ŐŐŐaIáúúú„ťč˛˛˛řť•Űġˇˇ¸!€ŃI5ţäĺĺ…ëëëcB#)**šW“(„8věXŘáp$ ]Ξ=k† Ćź˛˛˛Nt}˘C‰čޏőçĆőäĺĺ%˝žDuíéé ďÚµ+ś——.++3kf´ŐÚń¶>ŻýÉç±ÖÁ¨łőĆý´ÖĚz®ˇˇ!3@±Öł§§'aČ–čxÉŢŹ===ćĎÚŢdÇź9~^^^ºΧ=F}â3HÝšŰń`UňűýĘĎĎ_ôşéfět’®-s—’ĎçÓöíŰĺp8â®í˛fÍMLL,¸ĆÁ`PˇPhѵđů|iŰag9Ú›ě:­«˛Đöx˝^y<žµw0?$á|>źvďŢ-·Ű­žžž¸ťëöövUWWǬ5b,:ź5H°Ľü~ż<Ź‚Á੠W°CçoÍIұcÇ"vŃimm5żŠX\´ˇˇAťťťňů|tĽW(źĎ'Ż×«¶¶¶e]“ŃGH óŐÔÔ¨µµŐüŢétFL§9yňdĚôŚĺšŇ‚…3FŤ`- Ř„ĎçS[[›‚Á ‚Á ňóóĺv»USSwfŔNH€íÝE €ÝŰ# ¶G@lŹ€Ř [jllÔÝwß­5kÖhÍš5ňx<«F0Ôöí۵}űöź…B!ůý~Š4O$lÇç󩡡AˇPHyyyr8r:ť«ę=ěóů‡#ooo×}÷ݧP(D‘ći-%`7mmm’¤ęęjók`5ÉĎĎW}}}ÄȧP($Ż×+Ir»ÝižHŘÎŔŔ€$©ŞŞŠb`Uňx<1ÓÂŚi5yyyŚZwDooŻ$©¨¨(ĄÎśßď×äääĽ^#Íţ«úŔŔ€ňňň”źźŃ‘L¶îH:Ď—čŘńžc=oYYYŇsA ›ťâDçKÖÎTŻĎz®ąÚµú-ć˝äp8R9aľ €#€‰6#rČXŠ@(áÄD &á`#čűÇnsh䏱nŮëßSĺŞw M1oU7§ED%I˘‡r]W‰H÷ł^Ż'Ëĺű~ď~Q«ŐĘz–eJDÔápP»ÝnT—íçx>*Qžç©(ŠTE*˲îz*˲^˝ş>ÇqTš¦ęp8¨,Ë”ă8JDTÇŻ®o>ź÷®§iŞ\×íÚ1źĎ»ľé ‘çy]›tťŹŹŹ˝úĚĹp’$ÝýĂç™tŰŇ4Ua*ĎóTš¦˝ńp]׺`×Ď´ÍٰmfŕĺµóőŢŕ~—ÇéĆŔ싼0ď7Ç8MÓÉąmš¦{–3]N÷Ç÷ýQ`Ĺ ’éąÎĎpÜš¦é*¶ţč ÍđĐŤăńŘý3çJ—Ńż·š¦éÚ;$#@$I’n‘¶Űí¬‹÷áýp8t Fsq¨uza;ü+ľąËa¸5Ű2 Čč¦ă8Łgšő ŰŻ—Ső™Ń.‚ éö$Ib]ĚŰa–î0çóů|´P¶-Âő®Űřëş9gŻťŻáµ÷Đý ‚`Ô=żišŽî7ÇR3wXĎŇý·•3űcÎůKóŁßőáüč ÓĄţ耛Ůóťą´SDZl]·ůüďŠc~\ťÎů¦iwʆ¶Z­ş| :‡B۶˛\.ED¤(ŠQ×u»çEŃËÝ ĄiÚ=ۤsB O˙Řn·Ýó†ů%Ěútu}ú8U[}f{GIauŽŽ$Id˝^[ÇÎě{]×’¦©ĚçóŃ8^Ę=ˇűE‘äy>™Äü˝˙őz=]wŁű7›ŤuľôŘóőu]Ë~żďž5ěËb±Çqzżó}_’$‘Íf3zž9×u]Źňy8Ž3:éhę}xi~lůDęşî˝{Ăţ¬V«Ń1Ô¶wXżkćś űhöϬŰqśÉ÷ď;!I+€«Şëş "Řzán&Î,ŠBÚ¶Ďó&ýzÁ¨źm.=Ďł.řÚ¶í‰ćsó<—¶m%I’Éúâ8–ívŰ[dęúDZöÍlĎđşŮŰÂ]/ĽÍ|†“I;«ŞęÚ¶ÄŻSăoqĚ$¶u]‹ă8Ö…ľČ?OúăçyŢäýúŮć|˝‡«ů|nM”:›ÍFuL-ţ۶í˝wćŘšcf (ŮŢż—ćçééiô{|I’ÄÚź8ŽGýŃmłµ×V˙T€äţţľëă[ü €wĐ ·(Š&aú/âf€DDäůůYnnnŢ\×Ôń˝Se×ő]:ýöHÖĎ[,ŻŰžkOlă˘Çd¸ŕÝď÷R…ÔuÝ nĚ2ćN۸ócŹ×ś>óŢůzdxËI-z§DUU˝]?¦á;ˇ'o= ş,KŮď÷]]6fŰßzňŚľx”ďĄçÄq,˝Ý.eYJQ“Á=$pĄÉĄ ^°˙*˙šăaÍŕÂK˝@¦‚¶˛o©ďŇőK Zs׍٦ĺrŮí8đe˛Ťż­ć¸^šź0 eą\ŽćGbŢrđ[ßa=Vú“.‘‡‡ůgGÎ5Ź`&@@ŔĄ¬^PęĹť^4n6›7íĐĺ¦ĘLH^óهmçÂKő]ZüNµĹĽfîęČó\ň<Ďó¬yYĚť¶ —>ý–¬lóšľ˙­óő'^SO۶Ýg$ŹŹŹÖťúł$[>ŹKA ý>č{ôü8Ž#EQŚÚg]lmź ř ÇÚ6ź/tĚO§t[˲” &?‰úŽHŇ ŕŞĚ6:ᦹPÓaÎ3XđóçĎ^n ˝p´%©ÔlÉ-Ed”ÔÓ°îą”ÓlŹă8oú„fj®I«ŐĘZF_7*—ú<śŰ3mٶmĺÇŹrssÓÍŹ^ŔO}RRĹhľţ”íÝČó\nnnz¶m%‚Éü+—vĐLőGç\1?[Ňó˝Z­¬cmm'ŰXE!···˝÷çR‚ÖáÜ›ôµ¶m{»G@€Ŕ˙si ”E!eYŽň čĹŞž źy/eYľéŻţ—ţĘ®ąĂÓJô‚R˘‘eY·¸}ĎÎ [[.ĺ'yín 3A«Ůž©¤´ÚTG?Ă„¸żż—¶m{9eôý¶$¤UUÉrą”˛,?$¨çy˝¶›}Ő Ńc<•EĎëp܆'MŐóÚÜ—čęŰĄzĚţŘćó5;^t˝łŮLęşî}ţq";€kŮívJD”çyJDÔl6SçóY5MŁ6›Ťr]W‰:˝rçóY9ŽŁDD- u:ťÔů|î•IÓ´Wf>ź+Q»ÝÎÚ–Ăá DDEQ4şv:ť”(QëőZ5MŁ”RŞ( †ˇÁ›ęÓ׳,›lËđ™šnËů|î~÷řř¨DDůľŻŽÇŁRJŤĆqXßńxĽXŹ~f’$Łń×ĎŰl6]]«ŐJ‰rGťN§ç+Ďó®móůĽWGÓ4]Ăůż$Ë2%"ĘuÝnŽÇc7OćüÚú1Ľ_˙ŘĆßó<ĺş®*Šbô> ß#ý®›íRJőĆŔö>ŘĘťĎçŢ{§ßÇ©ůŚ˘¨ű÷U–eon†ăf{·đďĽ3®%MÓnQ¨‚ćŹă8zŃ=ü±ô‚Ҷ84ĂŔŠąPťŞ/MÓn‘úÚúôus±<[[t°ĆqśŃµ &ÇĂÖ?ý»apâ5AŰ|é ­Ď—ćËÖO˝ŘĎb]·{řEŃhžtČvŻŮGŰřźĎç.¸gţ$I2ŞG)Ą’$™ě˙Ąąxm¦ž1 ~ŘćÓď©ßIZ\Ťąő6›u§†Ôu-qwżł™ÍfRU•EŃĺ’ĂP‹…µL’$’$ÉägľďK–e“ź, ‰ă¸«O—yo}—®‡a8Ů×u%Ë2kťUUIžçRUŐh m§ěĽÔç8Ž»>NŤ‡Nč†a7ţSóU×u׾—ćKDd>źżëÓ›<Ďe±XtďR†DZµź:'ţśKß§ď~Fdľłľďwc®Ű9›Í&Çł(ŠŢüý7ß©Ký©ŞjÔĆ—Ţ›őz-ľďw}±ťš¤?7rçCsÁü—Ü(ĄĂŕ* Žs?śĎçÉ?˙–őz-mŰ~Şdˇ«ŐJ¶Ű­dYöź $üüůSʲś<Íó ŕJ^:ĹĽz·ĹgréTźŻĘ<Ö—ŕČ4$®â5'kŕ{Ň»Fʲ´yË{ű1ýŘn·Ň¶­E!"ëű$®şĐä(Q ą®Ű-Ú?“˙Ň®'ťFŰívü[|9H\…™¨ółílÚ¶•ŞŞÄuÝ/ż¤®ë.u)2ţđíýCľ;$ŕŰ#@ľ=$ŕŰă_źÚŻ_żDD$‚/sN]×ňüü,žçqzđE°Ŕ§U×µÄq,q©Ł‚×ëµÄq,yž3‰ŔA€Ŕ§UU•üł{ä+ůýű·ÄqĚ$_źŘř´t€$ Ă/ŐîÓéÄä_ ;H|ZeYŠ;1\;H\]]ײßďĹó—1Ă@CY–â8Ž”eŮ»ćű~Ńe‡Šóů,eYĘjµę>g™ÍfŁ:Í˙­EQtőą®Ű•5tĚ …~VUUR–Ą¬×ë®~Ű}""ËĺRÚ¶•ů|.›Í¦»? Ă.Ҷí—;ář¬¸:˝đ7úšďűây^`–1 q‹RJŞŞšĚ×aŁwZ¬V«Q®łŚ-XáyŢhGŠyÝĚA2u,±eYv1Wľf‚6›Íč>ß÷»:Ř=| ’´¸*˝{ĂóĽŢn Óíí­:čŁ?Ű™ÍfÖ@ôv”řsH\•-h0uĎđżÇíи»»ëí4ŃĚ\%¶üAŚNś1Żۧ4¶ Žô°Őe šĽTżYF÷o* $"ň÷ߏĘx?$®Ęö)ŠÉ vź ˙:i©ÎˡwWGéŢŢŢŽĘ˝ ™Ęu"2Đ Ë ČľvWÉKőëŕĚÔ®­ĺĽĘ8N IDATWĄűStŽŤ$Iş€€-¨°^Ż»ŕČđäé’˝zžgÍ 2 Đí{KPc*€óÚ]%&ŰI4Žăt;Dlt˙‡ďG’VWÓ¶m·CÄ–@µm[Ůď÷"ŇĎőaۉa;Ć´ÝnGeDěó™ş]¶­SAŤK»N^ł«Ä6>fýúŰAUUu}%A+đq¸3·Č~żďýw۶ÝQ¶I’Ľ Đ»Bt bř[™©\&ĂëĂSgl»:L¶˛¶]'UUYw•ĽTżÎ=˛ßď{A’˛,»Ďڦž)ňO°čááa4VZžçtř/âWŁŘóů\ŞŞ’żţúK‹…¸®+EQH]×Á( 2ţTfµZÉ~ż—ý~/ĎĎĎDZ´m+EQHÓ4EŃčsž×î1mŰ^,w)ě°ĚT®’a_‡ő, )ËRöű˝,—Kyxx¶m»`ŇÓÓÓĹ~éÝ8»ÝÎz}ą\ľęúăă#»Tđm°ŔŐA˛,%Š"Éó\6›Ť4M#išJY–˝d¤SÁ‰0 ĺp8çyR–Ą¬×kŮívE‘ÔuÝ-äÍ]*S'Ô\ PÇţÚ’¤NµďR.“©@ĆĄü$yžwýSJI˛ŰíşŁ‚§vĹ»>lĎ}éş9~$€ĹwrŁ”R €křńă‡Ôu-Çă±wÚL]×´řţg|U«ŐJ¶Űíd˛ZďĂWQ×µ5©ëşŘřg|VëőZ~üřŃKZkŇź×đé 𱸊©¤¸Ě÷}©ëÚšÔöîî®Ëۢ?µđ1¸Š—ňoŔn±XHҶ­üő×_r{{+777r{{+EQçy|Z\§Ř¸Š¶m%Š˘îČZĽ^UU’çąEŃĺ[ń}_fłYw €ŹE’Vđíń‰ řö€oŹ řö€oŹ řö€oďD|× hřIEND®B`‚ceilometer-6.0.0/doc/source/testing.rst0000664000567000056710000000531412701406223021262 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Running the Tests ================= Ceilometer includes an extensive set of automated unit tests which are run through tox_. 1. Install ``tox``:: $ sudo pip install tox 2. On Ubuntu install ``mongodb`` and ``libmysqlclient-dev`` packages:: $ sudo apt-get install mongodb $ sudo apt-get install libmysqlclient-dev For Fedora20 there is no ``libmysqlclient-dev`` package, so you’ll need to install ``mariadb-devel.x86-64`` (or ``mariadb-devel.i386``) instead:: $ sudo yum install mongodb $ sudo yum install mariadb-devel.x86_64 3. Install the test dependencies:: $ sudo pip install -r /opt/stack/ceilometer/test-requirements.txt 4. Run the unit and code-style tests:: $ cd /opt/stack/ceilometer $ tox -e py27,pep8 As tox is a wrapper around testr, it also accepts the same flags as testr. See the `testr documentation`_ for details about these additional flags. .. _testr documentation: https://testrepository.readthedocs.org/en/latest/MANUAL.html Use a double hyphen to pass options to testr. For example, to run only tests under tests/api/v2:: $ tox -e py27 -- api.v2 To debug tests (ie. break into pdb debugger), you can use ''debug'' tox environment. Here's an example, passing the name of a test since you'll normally only want to run the test that hits your breakpoint:: $ tox -e debug ceilometer.tests.test_bin For reference, the ``debug`` tox environment implements the instructions here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests 5. There is a growing suite of tests which use a tool called `gabbi`_ to test and validate the behavior of the Ceilometer API. These tests are run when using the usual ``py27`` tox target but if desired they can be run by themselves:: $ tox -e gabbi The YAML files used to drive the gabbi tests can be found in ``ceilometer/tests/gabbi/gabbits``. If you are adding to or adjusting the API you should consider adding tests here. .. _gabbi: https://gabbi.readthedocs.org/ .. seealso:: * tox_ .. _tox: http://tox.testrun.org/latest/ ceilometer-6.0.0/doc/source/webapi/0000775000567000056710000000000012701406364020325 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/doc/source/webapi/index.rst0000664000567000056710000000241712701406223022164 0ustar jenkinsjenkins00000000000000========= Web API ========= .. toctree:: :maxdepth: 2 v2 You can get API version list via request to endpoint root path. For example:: curl -H "X-AUTH-TOKEN: fa2ec18631f94039a5b9a8b4fe8f56ad" http://127.0.0.1:8777 Sample response:: { "versions": { "values": [ { "id": "v2", "links": [ { "href": "http://127.0.0.1:8777/v2", "rel": "self" }, { "href": "http://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.telemetry-v2+json" }, { "base": "application/xml", "type": "application/vnd.openstack.telemetry-v2+xml" } ], "status": "stable", "updated": "2013-02-13T00:00:00Z" } ] } } ceilometer-6.0.0/doc/source/webapi/v2.rst0000664000567000056710000006336512701406223021415 0ustar jenkinsjenkins00000000000000.. docbookrestapi ============ V2 Web API ============ Resources ========= .. rest-controller:: ceilometer.api.controllers.v2.resources:ResourcesController :webprefix: /v2/resources .. autotype:: ceilometer.api.controllers.v2.resources.Resource :members: Meters ====== .. rest-controller:: ceilometer.api.controllers.v2.meters:MetersController :webprefix: /v2/meters .. rest-controller:: ceilometer.api.controllers.v2.meters:MeterController :webprefix: /v2/meters .. autotype:: ceilometer.api.controllers.v2.meters.Meter :members: .. autotype:: ceilometer.api.controllers.v2.meters.OldSample :members: Samples and Statistics ====================== .. rest-controller:: ceilometer.api.controllers.v2.samples:SamplesController :webprefix: /v2/samples .. autotype:: ceilometer.api.controllers.v2.samples.Sample :members: .. autotype:: ceilometer.api.controllers.v2.meters.Statistics :members: When a simple statistics request is invoked (using GET /v2/meters//statistics), it will return the standard set of *Statistics*: *avg*, *sum*, *min*, *max*, and *count*. .. note:: If using Ceilometer data for statistics, it's recommended to use a backend such as Gnocchi_ rather than Ceilometer's interface. Gnocchi is designed specifically for this use case by providing a light-weight, aggregated model. As they manage data differently, the API models returned by Ceilometer and Gnocchi are different. The Gnocchi API can be found here_. .. _Gnocchi: http://docs.openstack.org/developer/gnocchi/ .. _here: http://docs.openstack.org/developer/gnocchi/rest.html Selectable Aggregates +++++++++++++++++++++ The Statistics API has been extended to include the aggregate functions *stddev* and *cardinality*. You can explicitly select these functions or any from the standard set by specifying an aggregate function in the statistics query:: GET /v2/meters//statistics?aggregate.func=&aggregate.param= (where aggregate.param is optional). Duplicate aggregate function and parameter pairs are silently discarded from the statistics query. Partial duplicates, in the sense of the same function but differing parameters, for example:: GET /v2/meters//statistics?aggregate.func=cardinality&aggregate.param=resource_id&aggregate.func=cardinality&aggregate.param=project_id are, on the other hand, both allowed by the API and supported by the storage drivers. See the :ref:`functional-examples` section for more detail. .. note:: Currently only *cardinality* needs aggregate.param to be specified. .. autotype:: ceilometer.api.controllers.v2.meters.Aggregate :members: Capabilities ============ The Capabilities API allows you to directly discover which functions from the V2 API functionality, including the selectable aggregate functions, are supported by the currently configured storage driver. A capabilities query returns a flattened dictionary of properties with associated boolean values - a 'False' or absent value means that the corresponding feature is not available in the backend. .. rest-controller:: ceilometer.api.controllers.v2.capabilities:CapabilitiesController :webprefix: /v2/capabilities .. autotype:: ceilometer.api.controllers.v2.capabilities.Capabilities :members: Events and Traits ================= .. rest-controller:: ceilometer.api.controllers.v2.events:EventTypesController :webprefix: /v2/event_types .. rest-controller:: ceilometer.api.controllers.v2.events:TraitsController :webprefix: /v2/event_types/(event_type)/traits .. rest-controller:: ceilometer.api.controllers.v2.events:EventsController :webprefix: /v2/events .. autotype:: ceilometer.api.controllers.v2.events.Event :members: .. autotype:: ceilometer.api.controllers.v2.events.Trait :members: .. autotype:: ceilometer.api.controllers.v2.events.TraitDescription :members: Filtering Queries ================= Ceilometer's REST API currently supports two types of queries. The Simple Query functionality provides simple filtering on several fields of the *Sample* type. Complex Query provides the possibility to specify queries with logical and comparison operators on the fields of *Sample*. You may also apply filters based on the values of one or more of the *resource_metadata* field, which you can identify by using *metadata.* syntax in either type of query. Note, however, that given the free-form nature of *resource_metadata* field, there is no practical or consistent way to validate the query fields under *metadata* domain like it is done for all other fields. .. note:: The API call will return HTTP 200 OK status for both of the following cases: when a query with *metadata.* does not match its value, and when ** itself does not exist in any of the records being queried. Simple Query ++++++++++++ Many of the endpoints above accept a query filter argument, which should be a list of Query data structures. Whatever the endpoint you want to apply a filter on, you always filter on the fields of the *Sample* type (for example, if you apply a filter on a query for statistics, you won't target *duration_start* field of *Statistics*, but *timestamp* field of *Sample*). See :ref:`api-queries` for how to query the API. .. autotype:: ceilometer.api.controllers.v2.base.Query :members: Event Query +++++++++++ Event query is similar to simple query, its type EventQuery is actually a subclass of Query, so EventQuery has every attribute Query has. But there are some differences. If a field is one of the following: event_type, message_id, start_timestamp, end_timestamp, then this field will be applied on event, otherwise it will be treated as trait name and applied on trait. See :ref:`api-queries` for how to query the API. .. autotype:: ceilometer.api.controllers.v2.events.EventQuery :members: Complex Query +++++++++++++ The filter expressions of the Complex Query feature operate on the fields of *Sample*. The following comparison operators are supported: *=*, *!=*, *<*, *<=*, *>*, *>=* and *in*; and the following logical operators can be used: *and* *or* and *not*. The field names are validated against the database models. See :ref:`api-queries` for how to query the API. .. note:: The *not* operator has different meaning in MongoDB and in SQL DB engine. If the *not* operator is applied on a non existent metadata field then the result depends on the DB engine. For example if {"not": {"metadata.nonexistent_field" : "some value"}} filter is used in a query the MongoDB will return every Sample object as *not* operator evaluated true for every Sample where the given field does not exists. See more in the MongoDB doc. On the other hand SQL based DB engine will return empty result as the join operation on the metadata table will return zero rows as the on clause of the join which tries to match on the metadata field name is never fulfilled. Complex Query supports defining the list of orderby expressions in the form of [{"field_name": "asc"}, {"field_name2": "desc"}, ...]. The number of the returned items can be bounded using the *limit* option. The *filter*, *orderby* and *limit* are all optional fields in a query. .. rest-controller:: ceilometer.api.controllers.v2.query:QuerySamplesController :webprefix: /v2/query/samples .. autotype:: ceilometer.api.controllers.v2.query.ComplexQuery :members: Links ===== .. autotype:: ceilometer.api.controllers.v2.base.Link :members: API and CLI query examples ========================== CLI Queries +++++++++++ Ceilometer CLI Commands:: $ ceilometer --debug --os-username --os-password --os-auth-url http://localhost:5000/v2.0/ --os-tenant-name admin meter-list .. note:: The *username*, *password*, and *tenant-name* options are required to be present in these arguments or specified via environment variables. Note that the in-line arguments will override the environment variables. .. _api-queries: API Queries +++++++++++ Ceilometer API calls: .. note:: To successfully query Ceilometer you must first get a project-specific token from the Keystone service and add it to any API calls that you execute against that project. See the `OpenStack credentials documentation `_ for additional details. A simple query to return a list of available meters:: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters" A query to return the list of resources:: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/resources" A query to return the list of samples, limited to a specific meter type:: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/disk.root.size" A query using filters (see: `query filter section `_):: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/instance?q.field=metadata.event_type&q.value=compute.instance.delete.start" Additional examples:: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/disk.root.size?q.field=resource_id&q.op=eq&q.value=" or:: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/instance?q.field=metadata.event_type&q.value=compute.instance.exists" You can specify multiple filters by using an array of queries (order matters):: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/instance"\ "?q.field=metadata.event_type&q.value=compute.instance.exists"\ "&q.field=timestamp&q.op=gt&q.value=2013-07-03T13:34:17" A query to find the maximum value and standard deviation (*max*, *stddev*) of the CPU utilization for a given instance (identified by *resource_id*):: curl -H 'X-Auth-Token: ' \ "http://localhost:8777/v2/meters/cpu_util/statistics?aggregate.func=max&aggregate.func=stddev"\ "&q.field=resource_id&q.op=eq&q.value=64da755c-9120-4236-bee1-54acafe24980" .. note:: If any of the requested aggregates are not supported by the storage driver, a HTTP 400 error code will be returned along with an appropriate error message. JSON based example:: curl -X GET -H "X-Auth-Token: " -H "Content-Type: application/json" -d '{"q": [{"field": "timestamp", "op": "ge", "value": "2014-04-01T13:34:17"}]}' http://localhost:8777/v2/meters/instance JSON based example with multiple filters:: curl -X GET -H "X-Auth-Token: " -H "Content-Type: application/json" -d '{"q": [{"field": "timestamp", "op": "ge", "value": "2014-04-01T13:34:17"}, {"field": "resource_id", "op": "eq", "value": "4da2b992-0dc3-4a7c-a19a-d54bf918de41"}]}' http://localhost:8777/v2/meters/instance .. _functional-examples: Functional examples +++++++++++++++++++ The examples below are meant to help you understand how to query the Ceilometer API to build custom meters report. The query parameters should be encoded using one of the above methods, e.g. as the URL parameters or as JSON encoded data passed to the GET request. Get the list of samples about instances running for June 2013:: GET /v2/meters/instance q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}] Get the list of samples about instances running for June 2013 for a particular project:: GET /v2/meters/instance q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "project_id", "op": "eq", "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] Now you may want to have statistics on the meters you are targeting. Consider the following example where you are getting the list of samples about CPU utilization of a given instance (identified by its *resource_id*) running for June 2013:: GET /v2/meters/cpu_util q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "resource_id", "op": "eq", "value": "64da755c-9120-4236-bee1-54acafe24980"}] You can have statistics on the list of samples requested (*avg*, *sum*, *max*, *min*, *count*) computed on the full duration:: GET /v2/meters/cpu_util/statistics q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "resource_id", "op": "eq", "value": "64da755c-9120-4236-bee1-54acafe24980"}] You may want to aggregate samples over a given period (10 minutes for example) in order to get an array of the statistics computed on smaller durations:: GET /v2/meters/cpu_util/statistics q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "resource_id", "op": "eq", "value": "64da755c-9120-4236-bee1-54acafe24980"}] period: 600 The *period* parameter aggregates by time range. You can also aggregate by field using the *groupby* parameter. Currently, the *user_id*, *resource_id*, *project_id*, and *source* fields are supported. Below is an example that uses a query filter and group by aggregation on *project_id* and *resource_id*:: GET /v2/meters/instance/statistics q: [{"field": "user_id", "op": "eq", "value": "user-2"}, {"field": "source", "op": "eq", "value": "source-1"}] groupby: ["project_id", "resource_id"] The statistics will be returned in a list, and each entry of the list will be labeled with the group name. For the previous example, the first entry might have *project_id* be "project-1" and *resource_id* be "resource-1", the second entry have *project_id* be "project-1" and *resource_id* be "resource-2", and so on. You can request both period and group by aggregation in the same query:: GET /v2/meters/instance/statistics q: [{"field": "source", "op": "eq", "value": "source-1"}] groupby: ["project_id"] period: 7200 Note that period aggregation is applied first, followed by group by aggregation. Order matters because the period aggregation determines the time ranges for the statistics. Below is a real-life query:: GET /v2/meters/image/statistics groupby: ["project_id", "resource_id"] With the return values:: [{"count": 4, "duration_start": "2013-09-18T19:08:33", "min": 1.0, "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1137.0, "period_start": "2013-09-18T19:08:33", "avg": 1.0, "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", "resource_id": "551f495f-7f49-4624-a34c-c422f2c5f90b"}, "unit": "image"}, {"count": 4, "duration_start": "2013-09-18T19:08:36", "min": 1.0, "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1134.0, "period_start": "2013-09-18T19:08:36", "avg": 1.0, "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", "resource_id": "7c1157ed-cf30-48af-a868-6c7c3ad7b531"}, "unit": "image"}, {"count": 4, "duration_start": "2013-09-18T19:08:34", "min": 1.0, "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1136.0, "period_start": "2013-09-18T19:08:34", "avg": 1.0, "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", "resource_id": "eaed9cf4-fc99-4115-93ae-4a5c37a1a7d7"}, "unit": "image"}] You can request specific aggregate functions as well. For example, if you only want the average CPU utilization, the GET request would look like this:: GET /v2/meters/cpu_util/statistics?aggregate.func=avg Use the same syntax to access the aggregate functions not in the standard set, e.g. *stddev* and *cardinality*. A request for the standard deviation of CPU utilization would take the form:: GET /v2/meters/cpu_util/statistics?aggregate.func=stddev And would give a response such as the example:: [{"aggregate": {"stddev":0.6858829535841072}, "duration_start": "2014-01-30T11:13:23", "duration_end": "2014-01-31T16:07:13", "duration": 104030.0, "period": 0, "period_start": "2014-01-30T11:13:23", "period_end": "2014-01-31T16:07:13", "groupby": null, "unit" : "%"}] The request syntax is similar for *cardinality* but with the aggregate.param option provided. So, for example, if you want to know the number of distinct tenants with images, you would do:: GET /v2/meters/image/statistics?aggregate.func=cardinality &aggregate.param=project_id For a more involved example, consider a requirement for determining, for some tenant, the number of distinct instances (*cardinality*) as well as the total number of instance samples (*count*). You might also want to see this information with 15 minute long intervals. Then, using the *period* and *groupby* options, a query would look like the following:: GET /v2/meters/instance/statistics?aggregate.func=cardinality &aggregate.param=resource_id &aggregate.func=count &groupby=project_id&period=900 This would give an example response of the form:: [{"count": 19, "aggregate": {"count": 19.0, "cardinality/resource_id": 3.0}, "duration": 328.478029, "duration_start": "2014-01-31T10:00:41.823919", "duration_end": "2014-01-31T10:06:10.301948", "period": 900, "period_start": "2014-01-31T10:00:00", "period_end": "2014-01-31T10:15:00", "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, "unit": "instance"}, {"count": 22, "aggregate": {"count": 22.0, "cardinality/resource_id": 4.0}, "duration": 808.00384, "duration_start": "2014-01-31T10:15:15", "duration_end": "2014-01-31T10:28:43.003840", "period": 900, "period_start": "2014-01-31T10:15:00", "period_end": "2014-01-31T10:30:00", "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, "unit": "instance"}, {"count": 2, "aggregate": {"count": 2.0, "cardinality/resource_id": 2.0}, "duration": 0.0, "duration_start": "2014-01-31T10:35:15", "duration_end": "2014-01-31T10:35:15", "period": 900, "period_start": "2014-01-31T10:30:00", "period_end": "2014-01-31T10:45:00", "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, "unit": "instance"}] If you want to retrieve all the instances (not the list of samples, but the resource itself) that have been run during this month for a given project, you should ask the resource endpoint for the list of resources (all types: including storage, images, networking, ...):: GET /v2/resources q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "project_id", "op": "eq", "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] Then look for resources that have an *instance* meter linked to them. That will indicate resources that have been measured as being instance. You can then request their samples to have more detailed information, like their state or their flavor:: GET /v2/meter/instance q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "resource_id", "op": "eq", "value": "64da755c-9120-4236-bee1-54acafe24980"}, {"field": "project_id", "op": "eq", "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] This will return a list of samples that have been recorded on this particular resource. You can inspect them to retrieve information, such as the instance state (check the *metadata.vm_state* field) or the instance flavor (check the *metadata.flavor* field). You can request nested metadata fields by using a dot to delimit the fields (e.g. *metadata.weighted_host.host* for *instance.scheduled* meter) To retrieve only the 3 last samples of a meters, you can pass the *limit* parameter to the query:: GET /v2/meter/instance q: [{"field": "timestamp", "op": "ge", "value": "2013-06-01T00:00:00"}, {"field": "timestamp", "op": "lt", "value": "2013-07-01T00:00:00"}, {"field": "resource_id", "op": "eq", "value": "64da755c-9120-4236-bee1-54acafe24980"}, {"field": "project_id", "op": "eq", "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] limit: 3 This query would only return the last 3 samples. Functional example for Complex Query ++++++++++++++++++++++++++++++++++++ This example demonstrates how complex query filter expressions can be generated and sent to the /v2/query/samples endpoint of Ceilometer API using POST request. To check for *cpu_util* samples reported between 18:00-18:15 or between 18:30 - 18:45 on a particular date (2013-12-01), where the utilization is between 23 and 26 percent, but not exactly 25.12 percent, the following filter expression can be created:: {"and": [{"and": [{"=": {"counter_name": "cpu_util"}}, {">": {"counter_volume": 0.23}}, {"<": {"counter_volume": 0.26}}, {"not": {"=": {"counter_volume": 0.2512}}}]}, {"or": [{"and": [{">": {"timestamp": "2013-12-01T18:00:00"}}, {"<": {"timestamp": "2013-12-01T18:15:00"}}]}, {"and": [{">": {"timestamp": "2013-12-01T18:30:00"}}, {"<": {"timestamp": "2013-12-01T18:45:00"}}]}]}]} Different sorting criteria can be defined for the query filter, for example the results can be ordered in an ascending order by the *counter_volume* and descending order based on the *timestamp*. The following order by expression has to be created for specifying this criteria:: [{"counter_volume": "ASC"}, {"timestamp": "DESC"}] As the current implementation accepts only string values as query filter and order by definitions, the above defined expressions have to be converted to string values. By adding a limit criteria to the request, which maximizes the number of returned samples to four, the query looks like the following:: { "filter" : "{\"and\":[{\"and\": [{\"=\": {\"counter_name\": \"cpu_util\"}}, {\">\": {\"counter_volume\": 0.23}}, {\"<\": {\"counter_volume\": 0.26}}, {\"not\": {\"=\": {\"counter_volume\": 0.2512}}}]}, {\"or\": [{\"and\": [{\">\": {\"timestamp\": \"2013-12-01T18:00:00\"}}, {\"<\": {\"timestamp\": \"2013-12-01T18:15:00\"}}]}, {\"and\": [{\">\": {\"timestamp\": \"2013-12-01T18:30:00\"}}, {\"<\": {\"timestamp\": \"2013-12-01T18:45:00\"}}]}]}]}", "orderby" : "[{\"counter_volume\": \"ASC\"}, {\"timestamp\": \"DESC\"}]", "limit" : 4 } A query request looks like the following with curl:: curl -X POST -H 'X-Auth-Token: ' -H 'Content-Type: application/json' \ -d '' \ http://localhost:8777/v2/query/samples .. _user-defined-data: User-defined data +++++++++++++++++ It is possible to add your own samples (created from data retrieved in any way like monitoring agents on your instances) in Ceilometer to store them and query on them. You can even get *Statistics* on your own inserted data. By adding a *Sample* to a *Resource*, you create automatically the corresponding *Meter* if it does not exist already. To achieve this, you have to POST a list of one to many samples in JSON format:: curl -X POST -H 'X-Auth-Token: ' -H 'Content-Type: application/json' \ -d '' \ http://localhost:8777/v2/meters/ Fields *source*, *timestamp*, *project_id* and *user_id* are automatically added if not present in the samples. Field *message_id* is not taken into account if present and an internal value will be set. By default, samples posted via API will be placed on the notification bus and processed by the notification agent. To avoid re-queuing the data, samples posted via API can be stored directly to the storage backend verbatim by specifying a boolean flag 'direct' in the request URL, like this:: POST /v2/meters/ram_util?direct=True Samples posted this way will bypass pipeline processing. Here is an example showing how to add a sample for a *ram_util* meter (already existing or not):: POST /v2/meters/ram_util body: [ { "counter_name": "ram_util", "user_id": "4790fbafad2e44dab37b1d7bfc36299b", "resource_id": "87acaca4-ae45-43ae-ac91-846d8d96a89b", "resource_metadata": { "display_name": "my_instance", "my_custom_metadata_1": "value1", "my_custom_metadata_2": "value2" }, "counter_unit": "%", "counter_volume": 8.57762938230384, "project_id": "97f9a6aaa9d842fcab73797d3abb2f53", "counter_type": "gauge" } ] You get back the same list containing your example completed with the missing fields : *source* and *timestamp* in this case. ceilometer-6.0.0/doc/source/conf.py0000664000567000056710000002275712701406223020364 0ustar jenkinsjenkins00000000000000# # Ceilometer documentation build configuration file, created by # sphinx-quickstart on Thu Oct 27 11:38:59 2011. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import subprocess import sys import os BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) sys.path.insert(0, ROOT) sys.path.insert(0, BASE_DIR) # This is required for ReadTheDocs.org, but isn't a bad idea anyway. os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinxcontrib.autohttp.flask', 'wsmeext.sphinxext', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinxcontrib.pecanwsme.rest', 'oslosphinx', ] wsme_protocols = ['restjson', 'restxml'] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. if os.getenv('HUDSON_PUBLISH_DOCS'): templates_path = ['_ga', '_templates'] else: templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ceilometer' copyright = u'2012-2015, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['**/#*', '**~', '**/#*#'] # The reST default role (used for this markup: `text`) # to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] primary_domain = 'py' nitpicky = False # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme_path = ['.'] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "nosidebar": "false" } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] html_last_updated_fmt = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Ceilometerdoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Ceilometer.tex', u'Ceilometer Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ceilometer', u'Ceilometer Documentation', [u'OpenStack'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Ceilometer', u'Ceilometer Documentation', u'OpenStack', 'Ceilometer', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output -------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Ceilometer' epub_author = u'OpenStack' epub_publisher = u'OpenStack' epub_copyright = u'2012-2015, OpenStack' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True ceilometer-6.0.0/doc/source/ceilo-arch.png0000664000567000056710000027047312701406223021601 0ustar jenkinsjenkins00000000000000‰PNG  IHDRy Ç${$bKGD˙˙˙ ˝§“ pHYs  šśtIMEß ,JsĽ IDATxÚěÝw|TUţ˙ńWzŇ„„©ŠŠ " âR”ŰWËw×ůş_Ýuu× űwýa×*H HQZB'tH(†$@Ҥ'óű#L$d’Lúdň~><Ä[ÎÜsîçÜąóáÜsíŚFŁiÖěŐ""""""""Íź’<""""""""6@I $Ź p¬lĹŞU«ŚŚ¬¶€Ůłgbv݉'xűí·«-#889sćTş~ţüůÄĹĹU[΂ ¨k}&NśČ¤I“*]?kÖ¬F«Oc´­%őiڶměX©Şm3Vę«mŐ­»ZS¬¨ÖĽm­éš­~hýĐšbEý°ůŢ;©ęŢIýP÷Nę‡Í«ľđ L›6Ť°°°¦Iň\ştɢŽ?NVV–ŮuçĎź·¨ŚK—.±k×.ęz,U•‘””dQIIIU–ÓőiڶµTC·mcÇJUm۱R_m«~hÝýĐšbEý°ćm«ďCőC[ľf«6ß{'őCÝ;©ęŢIý°ůôĂččhŇŇŇHHHhş$Źżż?={ö¬¶€ââbŇÓÓ+]gIˇŇ2đňň޶śŞĘpssłčXÜÜÜŞ,§1ëÓmkIŤŃ¶Ť+UµmcĆJ}µ­úˇu÷CkŠőĂš·­5]łŐmŁZS¬¨6ß{'őCÝ;©ęŢIý°ůôĂääd‹ťŃh4š[Ç©S§‘Úٲe iiiŐ>ŇUĚŽäY˛d qqq899Ńżť+göíZ $$$Tú|śX˝B]DDDDDDDÄ(É#"""""""b”ä±JňHł5~üxĆŹŻvP;•Ľ]KDDD~SXXČ/żüÂÎť;‰‹‹+{1‡‡Ý»wçĆoä–[nÁѱy­°aöoßÎ… HOO§¸¸Ö­[ÓąsgFŽÉرcÍÖsĺĘ•|ôŃG¬Ył¦EÖ_DDD¤2¸»»ăăăÓ༡»‘*=z”ąsç’’’Ra]JJ )))DEEńő×_óüóĎŇ,ëyéŇ%^xáNť:Ua]FFěßżź 6đÚkŻáęęZn›¸¸¸f}žëZ˙¦Öś“k"""¶®˙ţ ýôS>Ě×_ÍÉ“')(( S§NÜ}÷ÝŚ=şÂgěر€—^z‰:T¨çÔ©SéÜą3$%%•­[˝z5~řaŮ˙›ć…™?>ˇˇˇ>|U«VqâÄ ŇÓÓ1ŤxyyŃ·o_¦M›F§NťĚÖ9&&†ĺË—sâÄ ®\ą‚ŻŻ/cĆŚaĘ”)ÎCe>üđCVŻ^MXX˙řÇ?ŞÜݶőŻíąĘÍÍĺž{î);W7ndéŇĄ$$$ŕääD~~>ß|ó >';;›űŃČ×_Í}÷ÝŃSÓ¶´´×ďł|ůrŽ;Fff&xyyĘ„ šeâSDDÄf’cĹŠ|ńĹ<řŕ888đńÇ“Xîłrssyď˝÷7n_~ů%‘‘‘|ôŃGeó­ZµŞÂgđŢ{ď‘——gq;M0ˇÜč‘5kÖ°fÍBCCٰaÎÎÎ<ôĐC|ůĺ—¬\ą’eË–1ţ|‚ÉĎĎgáÂ…ĺĘ‹ŹŹçÓO?ĹÁÁ§žzŠ%K–đăŹ?2oŢ<‚‚‚‹‹ăŰoż­ň–.]Ę’%KčŃŁ˙üç?-š?§¶őŻŹsµpáBn»í6>ýôS"##ůᇸůć›Řľ}»ŮĎ3-żĺ–[*=¦š¶eMë`ňĺ—_RRRÂÔ©SY¸p!+W®dĹŠ|ňÉ'ÜsĎ=8;;ł~ýz]TDDD”äi¦Ń¬ýű÷·xźn¸0? qQQáááLž<___\\\/{ĚćÚÄIdd$………Ěś9“‡z:ŕěěLűöíą÷Ţ{™1cĹĹĹeI;;;®\ąBź>}xę©§đóóĂÁÁÎť;óä“OpćĚ™ źőđĂăęęJTT3gÎäÝwßeóćÍ\Ľx±Ömřú믳|ůr¦OźŽźźNNN´jŐŠĐĐPţüç?püřńrű¬^˝š’’¦M›ĆĉiÓ¦ ®®®ôíŰ—żüĺ/¸¸¸päČ‘J?sÝşu|úé§téŇ…W_}777‹Žµ®őŻéą˛··/;W;vä™gž! ÜÜÜÇŮŮ™._ľ\îł®\ąÂţýűqvvfذa•SM۲¦u0ąpáBYb¨]»v899áěěL`` Ź=öË–-ăĺ—_ÖEEDDDI‘¦azMşŻŻŻĹű´m۶ܾ×3ŤÎ¸ÖŕÁřő×_+¬;pŕ·Ţz«ŮňLŁ8:TaÝ]wÝUa™iţ›+W®TXÂŰożÍ€¸|ů2ëÖ­ăÍ7ßdćĚ™<účŁ,X°Ŕě1Ö–éXrssË-7ŐeÄöéÖ­Ë—/gŢĽyfËܶmďż˙>AAAĽţúëUÎsTßőŻËąšrrr0 äĺĺ±gĎ C† ©×¶¬mşwď΂ Xľ|9[·nĺřńă?~ś%K–ŕĺĺĹ<`vňh‘–"++‹´´4BCCüł”ä1ŁGŹ$''łoß>‹“<111•ľ.ş   Â$Ŕ¦G§Ě=ZÔŞU+®\ąÂ÷ß_ĺ[źZ@@3gΤ{÷îüóź˙dýúő%y>űě3ŇŇŇh۶-=ô˝{÷¦M›68::âŕŕP–Dą–««+999dgg×h4އ‡sćĚaţüů,]ş”ÖhőŻËąŞlä—łł3ááá¬_żžÝ»wsÓM7±{÷nňóó3fLµ‚×´-ëRoooyäyä’’’Ř»w/۶măСCĽ÷Ţ{2iŇ$]XDDDŮ»Š¨¨(˘ŁŁ‰ŹŹW ‰H‹dJ»|ůrłŁ/®—źźĎĘ•+*}Uôąsç*,KOOĚŹćčС@…·Ő·””¶nÝZí[ĄüöXOuLsĽĽúę«ÜrË-´k׎V­ZáääDjjŞŮ}Úµk@BBBŤęđî»ď2|řpžxâ JJJxë­·*ť©!ęßPçĘ4ŹÓ¶mŰĘîŃ®]^•š¶e}Ő! €;3yóćń§?ý (}•»4ĽJ“<Ű·oW’GDDZ¬‘#G@JJ oĽńF•ʞARR]şt©ôYëÍ›7WX¶{÷nŕ·ÉkŻe‰˛téRłĺíŮł‡G}”E‹Ő©®/ľř"Żżţ:K–,©r»ŁGŹUOF}í\;¦Ç§|||*l÷ő×_WŘ Oź>lذˇÂ>§Oźć®»îâ™gž©°Î”$»í¶Ű¸é¦›HOOçťwŢi´ú7Ôąęßż?^^^ěÝ»—ĽĽŰ·ogńâĹ€ů7 M0¶mŰĆĽyóHJJ˘°°ôôt"##yăŤ7HJJ2ű¶¬š0=z´xńbćĎźĎáÇÉÎΦ¸¸Ë—/súôi/^ĚkŻ˝Ŕ¸qă*”aztčČ‘#eÇcJ\-\¸¬¬, 8q⯼ň ŮŮŮeoßľ˝,Ń3~üxěííYż~=‹/&33“ĽĽ<<ČÜąs)(( wďŢUÖçŹü#íŰ·gçÎť,_ľĽQęßPçĘÁÁQŁF‘››ËŇĄKąrĺ #GŽ´h2ăš¶emë0oŢ<¦OźÎçźÎąsç((( ¸¸äädľřâ :wŠH#°3šyĹĆüů󉋋ĂÇǧě_`DDDZ˘“'OňĆo}ÔęZ;vä…^0;Ďřńăqttäé§ź&""˘ÂúŢ˝{3oŢ<ł?Ü·lŮÂ[o˝evbf(ťôöÍ7ß,{•öřńăXłfMĄ?üÍ­˙ňË/YĽxqĄoŢ2=z4łgĎ®p¬üă9yňdŮ˙ŻYł†M›6ńÖ[oU(Ă××—ľřâ ~ůĺ—rűüđĂ|öŮg•¶óüůóËć0ެ>ÇŹgÎś9ŘŰŰóÎ;ďĐ­[·*ëU×ú7Äąş¶.Ď<ó ­Zµ"77— ‰®ĘĘŞI[Ö¦P:BmŢĽy•ľ‰ÎŮŮ™W^y…~ýúé‚"""-Ň–-[HKKcâĉ >Gť&^©B÷îÝůřăŹŮ´iŃŃŃś:uŞl®zôčÁđáĂ«]QTTÄ1c°··géŇĄ$&&âććFxx8Ź<ňHĄűŽ5ŠŽ;˛téR>??ż˛˛¦L™BçÎťůńljŤŤ%//???FŚÁôéÓË%*ÓłgOfÎśÉÂ… ™;w.ďż˙~…IŻëłţ y®zö쉿ż?çĎź§]»vŐŽdşVM۲6u¸é¦›đńń!22’cÇŽ‘••EII ^^^ôë׏)S¦X  Ęr,iŰúŞO}´m}Ô§ľÚÖšbĄŞciĚXiNý°1cEý°eôCk‰őĂš·­5]łŐuď¤~¨{'őCÝ;©ęŢ©ľúa›6mš6ÉÓŞU+ZµjUaůĉ-*ŘŰŰ»Ňu7Ýt}úô©¶Ś.]şTZŽŃh´čX|||ęĺXŞ*Ł˙ţfŰęz=zô¨˛śĆ¬O}´m}Ô§ľÚÖšbĄŞciĚXiNý°1cEý°eôCk‰őĂš·­5]łŐuď¤~¨{'őCÝ;©ęŢ©!űaC°3ŤFDDDDDDDD¤YÓ۵DDDDDDDDl€’<""""""""6@IiöŇŇŇX¸p!999j i±4'Ź4k DDD››K@@sćĚÁ`0¨aDDDDDDęQZZ „……©1¬FňHłeJđtëÖŤ§žzŠÂÂB^yĺÔ8""""""őčŔ|ôŃGDEE©1¬’<Ň,]›ŕ7n®®®Lť:ooo"""”č©'ŮŮŮ”””0`Ŕ-Z¤DŹÓăZŇě\źŕąŢşuë8uęĎ>ű,AAAj0‘ZČÉɡ¸¸ť;wRTT@||<űöíă‰'žĐŁ[VȦ“<«V­"66¶Vű>ôĐCřřř(B¬Ś)ÁÓµkWnżýöJ·S˘GDDDD¤Ľ´´4ýĆ‘ýöš?>C‡ĹŰۻܺóçĎÓ·o_ ¤†˛26ťä™5kV­÷ť8q"“&MR„X‘.\Xm‚ÇdíÚµś>}Z‰ińNś8ÁŰożÍСCyä‘GÔ R%S‚§]»v 0 ŇíčŰ·ŻĚŠ8¶„JÚ:Ńş˝łEۦĽ˘¨°BQQQ,Z´aÆnŃ>·ß~;k×®%""B‰iŃZ·ǹطo€=R)K<IIIäççăčč¨G·¬D‹Hň´nďL»°Öm«$Źő1%xĆŤGź>}j´Ż)ŃóꫯňŕZś ±III:tNť:áééÉÖ­[iŐŞ÷ÝwźG*řć›o,Jđ:t;vč÷–•pT5«K‚ÇäöŰo'00E‹čÂ#""""-ć^ÚĹĹ…ÔÔÔ˛eŚ9Á@QQŽŽúI(żÉÎΦoßľŘŰ[ţ"n˙˛·né÷VÓSŹ«ţRŞk‚ÇÄ´ż.<""""Ň’îĄ @§NťĘ­óđđ`çÎť :T‰J<;wî¬Q‚ÇÄcú˝ŐôԛŪż”ę#Ác˘DŹ´¤{is s?ꆮ†kˇřöŰoéÓ§O­<&¦XËĎĎWŁ6!%yÄę,Y˛„M›6Ї‡ eëüüüpuuµ¸¬ĽĽĽ ĂSCCC•č›cQ‚Ç$99™M›6qâÄ MĆÜ];Ér]<&ť:u"??źC‡é­[MDI±*999lÚ´ €#GŽpäČ‘rëkňv-(}Ťú©S§Ě®[´haaa 5ĽŘwwwnĽńFüýý-ÚŢ`00räH¶nÝ č­[-IMޢUSIII8;;˘ĆnDJňU1 ,X°Ŕěşůóç׸Ľüü|&NśČ¤I“Ô¸""""bÓ’’’‹‹ł8ÁcbšŚY‰ž–ĺĂ?,K𤦦’––€““;vÄÉÉ©Öe?~ś;vpţüy˝u«‘)É#"""""ŇŚĹÄÄŕîîN\\\­Ë0%zňóóőÖ­â©§ž˘uëÖěÝ»—ÄÄDŽ;F›6mđó󳸬3gÎpŰm·áĺĺŔţýűqqqˇsçÎÄÇÇ+ÉÓÔsEDDDDDš)Ó$Ë5yD«2zëVËұcDzżŹ;–±cÇ0kÖ¬—ĂčŃŁńööŕůçźW7{5Hósí[´ęšŕą–é­[GŹU#·@oĽńFŤFńuQ’GDDDDD¤™1%x:věXď?Č ‰ŤŤĺ˝÷ŢăóĎ?Wc‹4#Jň436lŕěŮłdggÓ˝{wşwł3çÎť«QY999\şt©¬ 777věŘ”>şŁi&ôĄH3ó÷ż˙ÝěňcÇŽń믿ҡC‹Ëşrĺ ‰‰‰L›6 €îÝ»3zôh5r ućĚ kôv­°°0|}}ŐxV@#yDDDDDDD€Ź?ţĚĚĚí3`Ŕ|||ÔxV@I $Ź P’GDDDDDÄFôčŃ=zÔh777 ¤Ć“ZűĎţCBB‚ hâeiVŽ9Bbb˘Ĺۧ¦¦ŞŃDDDD¤Ĺ !33“ŚŚ ‹÷1 8PŤ'tîÜggçísáÂrrrÔxV@Ii6&MšDlllŤ÷ Să‰X`Ö¬YěÚµK ŃL)É#ÍFHH!!!j34'ŹŤŠŠbÇŽ5Ú'++‹U«V©ńDl€’<""""""6"--Ť´´´íSPPŔąsçÔxŔĆŤk<żÎřńă RăY=®eĄ""",ÚîŮgź­tÝ’%K,š¤xÚ´i•vČ„„ľűî»jË dúôé Zꍍ(˘ŁŁ«-cذa„‡‡×éXŞ«Oc¶m}Ô§ş¶mÉÔO®>¶ÖO,mŰŞęcŤ±­ënóŤ'k꫊këŹk}Y÷5[Äšlذ#F`0,ŢÇßßżFŰ‹’<-ŽĄ Ż=Rů¬ůŽź!íÜ™jËřĺh ~—Z›]—šbѱ¤].ÄŁŠc©Źú;–hQ9öžA\ň¨Ű±TWźĆlŰú¨Źą¶˝=ÔK 8÷+É §ŐO >¶ĐOjÓ¶××gHgwĽÝ­6¶uÝm^ńdM}µ)ľG×újč¶mŞk¶’<6ÎÎ#в/ÚĂé•®+°÷ÂÎŁ°Ú2v'`źiľś’ËK¦˝W•ÇRő)Ęv±¨śSŮ.Ä×ńXŞ«Oc¶m}ÔÇ\Ű*ÉSŞĐŐ;Źő“¨Ź-ô“Ú´íőőéćëŇ$?,Ťm]w›W}zĄë—,YBbbbµĺL›6Ť   łëřî»ďŞ-cذa„‡‡×©>Ď>űlĄë˘˘˘ŽŽn”úÔWŰÖG}Łm3VŞk[ső ăÖ[o­°\I±Zy'NGlllµŰ^ĽH–›[ĄëĎÄÄp&=˝ÚrR7n¤ŤŻŻůu/Zt,¬*>Ë’2˛"#+]—tâ„EĺÔG}ę«mëŁ>ŤŃ¶Ť+Őµ­ą2bcc•äpđęH«ŃĎńîônj ‘ÖjôsŠPđ© IDATLÎŐróôŁ]·ľôq]ŢŠ]=<“n­ż¶w/cgÁťÖ>ţ´ďޟࡷÔwx˝Ć±âFBŹ=ČĘĘŞŃ>nnn 4Č*ë“@JZĹ”Îo""US’GŚŃXBŢĺLŽD“p$š“»~bâě˙ĂޡńÂN7MŇ”¶}3—NýGâŮľSŁ|^Ę™ĂőĽěڞĎ”—ľˇ]·ţVŃoĚżú±ő;wb/kß˙3Ůiç+¬»”šČĄÔDâv¬!0ôFîxú}Zµń¶Év¨,~ĄßÇ”“w9“ĽË™\Ś?Ćፋńëܛ۞‡o§žŠ±Z!!!dff’‘‘ań>Ze}–,YB\\AFc˝L`+bë”ä‘zsý±˘Â|.§] .ú?ěZţ/ÎěÝȵ_rĂ„GÔXM¨är …±‰HlĹ´iÓ RŁ4;;Šňsůé_sňňběíţGćiË~,ţ6źE@Ż!Öó#ů´íýŘÉß÷-KN;3ú¦„‡‡Ű\ý’ŽífŮ«PR\w`wLř/‚ú Ăŕ釱¸ÔřăüéßÄF­&ńČ–żń0S˙ůŽN.6×¶żŐŶ-~Ź\{/SR\Dţ•K¤ź;M|Ěf˙ü©żĺű—ďĺžż˘]×>Š]łED¬Ž’<ŇpÁĺä‚gűN žü®mĽŘôÉß8¶uąŮ$ĎŮCۉYł 'cČż’Ť‹Áť¶]Cé;fÝ×ţUŽ×? S—Ăżꇧ|g=IÇvłs餜9BQA>AÁÜpÇĂôqWů/ýśl˘ż{‡“;ב% ďŔ`†Mű3ťĂnâł'Gp9ýO,<€ł«őżńĂXGIf±™““Ł@m .w:öNÜŽ5ě]ő ďzĽFűפO\˙ ?ö÷ q?ĺĺĹüđň˝ĺúŔŠąŹńëţź+í#•-H8Íţ5 ąp2†‚+Ů´öiOŻQ÷0pŇc8:»Müé+.ś<Ŕ•ĚŚ%FÜ<} č=„Aw=ŽO`pµÇĐsPĄÇS“6Şm߯«’Ě2!­O/›‹ń˘‚|Ö~đ %Ĺ…„ڏ‹±Źż‰ŁÓo8A‡t@×AcY÷álRÎaߪOrĎSŤö=p>n?ű"?ç܉=äegâŇş ţÝøaÂ#öZé~–Ä|uń[Óşä]ᣇú—Ĺę±-ËŮů)éI§ptvá‰/bJ뻟ýkľŕBě~r˛Ň°wpÄŕáK‡^č7fíĂôü·„ď{GZµń& Ť7=1`⣬yďiÎÚĆš÷ţÄ̵88:7ČuĎŇrĚ9¶ĺGöF~Jćů_q1¸Óeŕh†ß;Çě(şÚ|NmbŻ6}°)bÜ–ŻŮ"˘$Ź4‘śś)É<‡]k?ě]m˘^ÝŹeÓ'#ăÜé ëö®ú„m_Ď-·,7;ťř[‰?°•Aw?Îđ{çÔĎ ›ŁăŐ'ąś=ĹĘ7Ą¸¨ l}ĘéìűđYť ľq|é~q+Ţ|„ó±űŻŮî+çý;źű‚ÜËĄżgś[)€›™ŘŘX®¤¦bĚ7`çÚ¦^Ë.ĚËá–˙ú'IÇ÷°óű÷érĂ-řv ±hßĆě–Ú˙ź/Řňď×Áh,[–•|–ßżËé˝úň’r‰žă[—łî_˙Sn{€ě´óßş‚“;×]}T¬_­Ž§¦mT›ľŻŘ®Ú±-˸ś~ŻÝ3ëőň žëôvNĆp1ţ8žţ]5ćŹn^ʆ/`,)ţ­ě¬4NďÝČé}›¸ůˇżÓÜĚ:Ç|˝ĹŞĂo±z|Ű ~ú×oë r 8ąs-˙yďéru*.* +ĺ,Y)g9±m%ăź~ŹîCĆéš]Ź\[{0éţ˙ž3ž¬äłŰü#}nť^ď×˝Ú–cďŕČŃÍKY˙Ń_~»ŻĚĘçȦďH9}éŻ.-×Okó9µ‰˝ÚôÁ–ăő%**ŠÝ»wbń>YYY¬ZµŠ|P (bĺ:^^¸®$OsŔŰoż €ó ÷âŕŐŃ&ęe,)J'1ĽVjü1¶;ěě8é1zßô;Úřp%#…بŐD÷{V, ëŔ[ńľˇîI§«7ĘWŘřń_ =•Aw>Ž›—éçNłaÁó$ź<Ŕźľ*űˇ˝šó±űqtve̬7č:h ůW˛ŘőăGl^ô*EyWë¦IĄ››Ň açpśşŽ¨×˛KŠ‹hĺîĹ­Ź˝ĘŞ·f±î˙ćpďkK+ü«ďőjÓ'úŤťAż±3,šÄů®ż|ÔlÂç´„8¶~ő&ööŽÜôŕ_ 6GgWRNbă'/’rú0»~ü?§Ďţí¦~ËŹ8:»2dňSôyŠ‹ ąśÍ‹^!ĺôa¶/Žŕžż.ŞŃń×¶ŤjÓ÷ŰU;łŻtDŘ w8:»ĐmđزÇ#Oíú©ÎźSÓŘ«mTŚ‹Ôž’<Ň`Š‹ ÉJ>ËŢUź˛ő«7čsë˝ĺ¶9wbOéň[¦™-ŁçČ»8»ŻŢŹ/ěöŠĂQ}‚Jź=ĎĎÉ.[fúŃ×uĐ3eĚÔ‰–jŤz௸űv`ĎĘŹ«˝ÁoĘ>Q™¤c»ĚŽpńëÜ›§bĘßż¶¸<Ó…yWjuŇ0Lóy™{ü¶ľ®{µ-çÚGžLÚř“u±ÎźSÓŘ«mTŚ‹(É#Vů ŔCÚvíCß[ď5ű/˘¦ůś\ f‹0-7mWź,}VqAéë¦ÍM¬l:/Rť >ĂčŰýX÷o¶}3Ź›ú»Ů횲OTĆ4łĹűdĄ$đýK÷r%Łţ˙¶®mÔŢ‚×Úřp)5‘¬”‹'oě/ĚłěÍOąWęó Q7‡ŻŮĺm»„23b-ű˙óq;Öp!.† q1ě^ţO?†M{†>٧)HéQ˘ëĎO}]÷ęRŽąë›iάâ‚:NMcݶ}P1^7ááá8:Öěgž§§' Pă‰Ř%y¤ŢX2k…\—Vä^¦0/ÇlÂÄtsPŮ rc°wt˘¸0ź˘Â|ś\Z]wSrY'^,6|ĆsÄŘĘu˙¦ë 1t43—‰5ö 'Wą—ÉËÎÄŕácŃ>ŰľžÇ•ŚdÜýźţ,BâęööŽĽ?ŁG폧\7Z‚vÝúr)5‘_÷˙R§$OCžO'W9Ů<ţŮ>\ÜÚ4hĚ7DÝŞšĐßÍÓŹ3žcÄŚçČ8˙+gn%nÇ’ŽíbăÇ/P\X@˙q÷+PŔé˝đĐ ×˝ş”STW!ž J“׏ ­íçÔ$öjŰăuăăă··7–_Żśśđ÷÷·Ęú„‡‡Óh×âÎerÍ˙ź˝nýŻUě›uőĎő\ŚFÚ]ť«°*®Ŕµdw¬äďb}4'Ź4)ÓâôDó "ÓrÓvMÁĐĆ€ě‹ß8|úN˘ÔčßmOľ…ťť=>ú ů9نś[cź0}VĆąSď“x$€»˙÷szޏ“6~8»şáŕč\çGÍšĂuŁ%č6ř6öŻYHŢĺ¬j·?wb_?7‘c[–7ÚůôlßéjěžnđoĘXőňďL˙q3™ňŇ7Ś~ěŐŇóňźĎ¤ 'ó"1kJß6uý›ťęëşW—r2“ă+,3=~čćŐ¶ŢŹ·şŘ«mTŚËµÂĂĂŰŁýl0s¶므OŤF^^ŢľşĚôgËuÎVń§˛oć|;»*÷;{Íq]űY×Çë×ü1-űáj=Q1%ő+ ř57—ŘŘX%yÄúô ŔáMKĚ®?ňË÷ĄŰőÔdÇčX:×P|Ěć ëbÖ|ŮěÚÜŢ˝-Î7ÜËěŮł R62˙70pŇŁd§ťgó˘Wpşnh}}ô‰’:ΑRY?=şůÇ ëRăŹńáĚP–Ľ8ĄÜrÓc­˝ŰUŘgçď˙¶]QAŤŹż9\7Lśo¸—é?Exx¸ÍĹrđŤăńôďBNf*«ßyŞĘI«SÎá?ďţ‘‹gŹ“|ú`ŁťĎŽ}†°7ň3łëŤŮ¢?Ź!jÉŰuŽysń۱ÚcŘ€ꬶ%~Ź\ÉHaĹĽÇČ»śI»nýč:đÖąîŐĄśŘ¨Č ËÎěűżÎ˝ęíx-Ť˝ÚöÁ¦Šq[ľfKÓ's¶«ŚĆ˛$ÎŰ×$G¶»Żn—bÁ(ow#Áć˙ …‰Ă*˙3,”J÷˝öŹ%®O ­ş&a4ňŐŐÖ¶«Űi*őş;,LJ"""Âěz=®%MŞďŘÚđ-Gů§/ˇ·LĄµW;˛Ó.plË2Žüü=öŽô3ŁÉޱó ·`+;—}GűNôBaŢv-űWĚ7ŇĐě]qđęHHH7`ąqęÓśŮ˙ Ç6/«0’§.}ÂÁÉ…âÂ|Îß_çŢő6gTź[ďĺŕO_stóx¶ëHč­Ópr1|ę ?öĹ…ůř÷(˙Ř‚W‡.¤ś9Âöoçsă”?áäęĆĹłÇŮłbĆ’b<Úv$+ĺ,'w®ŁűĐq88:[|üÍáşQvNĽ:ÔĹŰ{tĚŢÁ‘ńz—^ľŹÄ#;řjÎxLz”Îa7áîÓ˘‚\2/ÄsbŰJ˙üEůą´íĘđűţ§ŃÎgß±÷łîKNî\ĂÚf3tĘźhă׼ěLNíYĎöoޢ ÷2×%¨jóćâ·ˇę¶öŮÄÜJč-Sé3z:­˝ŰcďŕŔĺô ěYQúZéşµwş“‹ł‡¶ł{ůG˝˝ÎÇ[ÓŘ«mlŞ·•köŞU«Ř˝{wŤ’U©©©,X°€çź^7jőĐIˇôQŞdŁ‘ük“6U$pZą@ Ż+µ-ÝÎÇ|®>éhp rňěęx´5Ű˙DÂoŹ˝ćď'Ζ&r ěHL-żĎµŁ†®×č|őżm)}…­<÷‡żŁ3ăž|‹Ĺý%Ĺ…őÖ'|»“rćK˙ů{ vód™ăŰ1„á3ţ‡m_Ď%jIQKĘ˙«w@w†L~˛Ü˛&<ÂşźĺŕO_qđ§ŻĘ–·önĎÔ~GÔ·o‘•r–µ<”«ĄÇß®-%¶Űv eĘË߲ć˝?‘y!ž-‹^eˢWÍnŰmđXĆ=Qanł†<źmüűř\Ö}ř,'¶ŻäÄö•fë0lúě:ÇĽąřm¨şu8šŘčŐě]ů1{W~\ńăäÂß˙Eq] Ő˝)ÔŻsoĆ?ýľŮ7ŻŐ×uݦĺü÷WÇJoęť]6}6ë>śÍĎźż\îŘ:ôD÷!·×ůxk{µí-5ĆĄů&uâxٱâ(śëţßŰÝOéh‡Áý®OŢŘYm]C‚Ě˙}RxůcNHśüŇDPNž‘„HĽhGn~ůň®Oţ´5édgG' ŕˇđR’ÇV1gÎţĽäT‹©sŘřń ęÁľŐź“|ňů9—piíA‡¸áއ č5¤IŹĎÉŐŔ=/~ŶŻçröŕ6JŠ‹®Ţ1꿲ţŁ˙ĺrF2Ţőű/ě'=†OPű˙óɧR—»Ź?ÝoĽťÁw?QaÔMĎw‘w9‹kżäRjCodŘ´ghăۡSž&ăÜiŇâp÷őŻńń[űuŁ%ĹvŰ.ˇÜ?-Ç·­ŕôž ¤ž9BÎĄtŔHkŻötč5ţ·Í¤]·ľMr>{ »ďŔîě‹ü”Ä#;ČÉĽ˝#^]é1l"ýoźYööˇşÄ|eńŰu źHkŻv\˙5çc÷‘›ť±¸—˝‡2pâc6™älŠk¶“ O_ü»‡<ěş ľ »JľóëëşWÓrJŠ ŻŢݏŃsÄťě]ů1çĎŕâÖ†îCĆ~ďł&ň®ÍńÖ&öjÓ[jŚKóp8a.©sݵ!8ŔݧAmK“9A~`pµýß ¦„Ui"¨|}O$@Ú%HH6›X~äOŠť)”>¶ĆŐ$OGŁ‘;;z(ějÄÎh4mµrłf•ŽşhŰĎŤva­-ÚçĐ—ĄŹßLś8‘I“&5ٱ·¤$Os–•’ŔÂ?Ý‚‹[˙l_ł:öw§ëq- É’<Ňr®•hµŹke¬\ÉĄŐ«›ěócF#±Pţń«k éhGŹ ň#]¤j'JGýś8k$.É|Űş\ĺŁG»¶P:ÇŔ‚ *¬×H‘jäçd“yţ ľťz–=÷n·c P:t_DDDDDlC2p8hšWçşäN˙nĄIť@?óŁVÄ2!W“b¦ÇľR 6±4é›XúWľť±”&Ű\ŚFzCěěh§ć3KI‘j,{őRN˘Ç° ůÝăŃ®#yŮ™Äí\Söö Ó›DDDDDšŇ¤I“h۶-ďăççÇ„ Öy?;ţ|âââ2™ŮS$Ä[ąfľ«źŮĘÂşCŹ ëÖ2˝j AmK˙Ü: ´}cN–ţ9qÖHz¶ůvv˘ôUíŔ( Żš­%yDŞ1jćó,óżŤ^MltĹa˘BŇwěŚfScQĆË©ÄĆÁ`ĐIi %™ $üš·c|||Ô bS±­ďŃ5[lI,ĄŻúÎşny˙nFÂűئűM"¬;WŰŢ®,ásróKĎŐ*J_R˛ç7JňT# ×¦żňű"?ĺěˇíä^JÇÁŃO˙λĆ?ŚŁSóąÉNˇ`˙b"öÁěŮł ŃIi ůűľeÉ>¸ŇÄóĽ‰4Dlë{DtÍ[`J\ű¦'o÷ŇÄέ4bÇš>9ys Vm/Ýc:‡;ŤFĆ^}CWK¦$Ź|;†pŰ“o©!DDDDÄŞĄĄĄ‘žž^é›ŕĚ),,äüůóx{{·¨¶ÚMéčow#“†ŰŞÄŽ53¸Bx(„‡–ŽîY‰©Ąočú ڵáú{ť\]q2?Ă·’Eľˇ8ú÷Qb[Dq-ŇěEEEŐříZ™™™¬\ą’ĐĐĐÓN«ŚF]3ßέ~›üWšÓ螨#đÝĎĄŹqí¦tâě)Ř書úńź3Çěz%y¬LNN±±± óÔ " ČÔ×ÝÔ˘ŘQ\‹H ń{5ÁčOŢ >mÔ.ÍYxhé„Ř ×9pĘŽłŔWF#÷Űٵ¸W®Ű+DDDDDDš—„„bccÉÉÉ)·<--­VĺĺĺĺUŘ·˛ĎhζP:É2Ŕ°Pxń%xl…ÁžĽŰŽaW¤ĄŘٱŢhlqí ‘<""""""ÍĚ_|ARRwÜqţţţeËŁŁŁéÔ©fSĎ:;;“––Ć÷ßπʖGFF’śśĚßţö7›x“^°íęßýŕˇŰG¶Čt^ŁŹŔ!;;úA‹šŚY#yDDDDDDš™9sćŕďďĎöíŰINN&==ťôôt&Ož\.Qc &NśHçÎťËĘŮż?ÉÉÉ<řŕU2Áksłĺšż?y·bČ–M»ąt®%€­-¬îJňH˝Yű᳼wowVĚ}Ě&ęłöKŢ»·»N¬¨ż©Ż‰XÁŔsĎ=‡Á` ##Ł^Ë.,,äđáĂ<řŕ5šŔŮÚ™^“Ţż›QŹhŮz˙p…a˝ŤĺÎ{KˇÇµ¤^ä^JçäÎ58ş´">f3—.žŁŤo‡f]§”3‡ubEýM}MDDÄzČ ĽüňËdggłsçNŠŠŠęĄÜÎť;3wî\«xDkÎś9d¬\ÉĄŐ«ë\VÖŐ˙úxč-Z-Ag;Ř_ú÷ ¨…Ô[#y¤^ůĺŠ 6íŚĆo\Üěë”rÚ6xÚ9ąbçHpp°M<[­ţÖüű›­ö5;Ź@;uĹÇÇG+6Űú]ł­‡»»;C‡Ĺѱî˙†@ßľ}mş'¤(Ţ[‚¤‹żý=ۆęuX”DDD„ŮőÉ#uf49Ľa1îľ˙ ű"?ăč/?pă”?aďPyĺçdýÝ;śÜąŽü+Yx3lÚźévź=9‚Ëéxbáś]Ý*ě{>n?ű"?ç܉=äegâŇş ţÝøaÂ#öZnŰÂĽţőP?<ý»đŕ;ëI:¶›ťK? ĺĚŠ ňđ ć†;¦ç»J;Íúořůłż—íozŚdĘË‹ č9ó±űŮżć .Äî''+ {G ľtč5~cfĐ>8ĚŞĎ—}붸śÁśéÝĽęoŐö·¦ěk@łďo®gpďÍţ·Óa±-ú[Ťëć|ÍvwwÇÍÍŤü‘‘#GâääTŁýăăăÉÉÉáöŰm6â éF@ŁylݵÉ<7ŞW&đkn.ÄĆš˙˝§S/u`+Y)gé>{{zŽĽ‹+)śÚ˝ľŇ}JŠ‹Xńć#Xű%W2’)*Č#ĺô!VÎűżĆl¦ ÷2Nέ*ě{tóRľé^Nî\CNf*%Ĺ…äfĄqzďF–ľr?Öý»|_ýŤ˘‚\ÎŽâÇ×$ápůW˛(.Ě'ĺôaÖ}ř,q;ÖT[ד;×ňýË÷ý˛ÓÎS\T@a~Y)g9¶yßż|/'w­SPMô·¦ěkęo"""5׿ [·nĄ°°Đňű‹řxöíŰÇСC[D;eçŘq"AńbËŇ.ÁŃ_[fÝ5’ÇĘ„„„°`ÁţĽäTł9ćC뿠רÉôľéwě]ů1‡6|KđŤăÍó±űqtve̬7č:h ůWbn*< IDAT˛ŘőăGl^ô*EyŘŮ—ĎCf&ÇłéÓt÷ă„Ţ<•ÖŢíą’™BěöHvüđ>[ľ|ŤŽ}‡ăŐˇkéO‡ŇĹ(Č˝ÂĆŹ˙Jčč© şóqÜĽüH?wš ž'ůäüôÁ7ާßŘô;ŁlTÁÓ‹O–}ţ—łÇa,)fĐťłč;ö>ÜĽÚb4ÉľxŽĂshĂbŽţ˛”îCĆ)› đţĆDN_ĚW»®ż5u_ZňŽú[ ŠmŵHÝ™&cž7o[·nµhDŹ)Áck“,WçŁFžťfGP[ĹŤ­ÉÉ-oąŁµ4’Gę$űâyÎě˙…vÝúá €w@7Ú‡‘p$šŚóżšÝĎ4ę`Đ]ł> '—V´önĎč˙úží;SRl~Ҹëľ*ť‹dęź~ď<ŰwÂŃŮʶA žüC÷ß”qčš9JěěJ;wAN6˝sË#˙ŔÝ×{G|zpËĂ/p1ţXµő˝”’PöŁ·Ť_ ŽÎ8:ąŕĺß…‘÷?Ď“ pçs+0¤Ů÷·¦îkęo"""µsí[·:TőŹáśś™ŕČÍ·cáZŁćç±19yńť‘ÄÔ–ű8ž’ Ă!$H±Óܤ]*=—1'ÁôÖ´[ ĐŻeÎËŁ‘ ‰ŹŹçŇĄKÍ"ÁĄIžő±±°đ‰, ĎN+MÄ&ÂŰßÁÂ5FMĘÜL¤]*=_/|bJđ€·»‘'î‚[´ÜvŃ?J­\˙ 'w®)›8ŐśCľĄçČňŹ|Ř;:Q\OQa>N.­®űŃxąŇ˛ś\ ädóřgűpqkÓ$ővóôcÄŚç1ă92Î˙ĘŮ[‰Ű±†¤c»Řřń ĐÜýV{ŢŠ3ÎR°1ł6ÁěŮł Q0«żYe_ł…ţ–»ió7Áĉ™4i’YlFî¦yú›Śk[˝f·nÝš‚‚6mÚDNN>ř N8Ą#z^|6îUQ›ŃGí> =!ĽOéŁ]Wµ•5‰9 ÷–&殝\yâ0¸u€]‹?_É#µ’žtŠÄ«svTç܉˝\L-·ĚĐĆ€ě‹ç*lź|şň â<Űw ăÜi«h/˙Îô7“)/}ĂčÇ^`˙>W€HłďoÖÖ×ÔßDDDjĎ4Gʧ§g‹śdą:·€×-M´şúÔ{l",\[:ÇËÂ5Ʋ‘"Ň4RJÉúó‡F>ZaJđ” Ż?“•Ťä±:iiiDGGSôkíű`çÚĆ*ŹóĐ†Ň ^űŽąŹŃŹľRév›˝JĚš…Z˙ ·<ňrŮrďŔîd§ť'>f3ŢÝĘíłćËJËëŘg8)§ł7ň3&<óA…őżĆlaóÂ<ě§Ϯ—ş–”coďPív=†MбÍLdd$©g.QěäŹWG«=ΦčoÖÜ×Ôßl'¶E×"ŤÇ`0đŇK/©!*k×Ň$Á­JGöD6’žmGn~éČžčŁĐĘĹHX7 ¶ŁG  -ć$ÄĉM´#í’iiéČťV.Ąç*Ľř´iYíŇč€÷ôéf×+Éce.^ĽČŞU«JĂ×#+Lňćçň˙Ů»óđ¨Şŕß;K23„l“fB– l""‹hµ­TˇŐW~.Ô*ÔöĄŐľoíŰ·‹±µµĄhUpCx]*‹b+¨,‰‚`€faË d˛3Ě’YîďŹ;3™!L€$ł|?Ď“'“™I2sî9÷ŢóťsÎ=˛ý]‚ Kîżčs'ÜöCř×ëřzçűz÷PŞ4€‘fŕäţřâÝç‘1ĂŻž‡ívżű7śk˝p§­ŕ¦¨řčUÔ~ń!¶üĺq\űÝ!>ulgŰpôËc×›@§ŐŚÎ ŻŢs1re,\;Ný%RGŽA¬f(¶üĺqś<°cg|ů3ç#.92ąć–z|ůľt)ç”,[޶¦Y˛†ÁjoÝÖ°˝ExÝ&b˝&˘P{ć ¨2凤°ÁjG@ŕHSşň˛€\˝t›.Oµ¨6Uuţ#u/…>~´â|…ŮŃ[N RŐjd^`ş4Cę{ă+Űűąä|s¶oJDžħ Cnq ľŢńOT•mBţĚ;c§ű·¬A[ýIlüĂżÎ^ îřĹëřż_ŢŮóßKŐá¦˙÷;|ôü2TíÚ€Ş]ş='íŞ±(ş# ´şl4?„w~µđč[µ¸jŇLT—oĆŢ /`ď†zě¬N]řSV űö6Řm Ű Ş<˝ôe±IˇCEmWŕă %ü¦ éREäé_čĂ‘>ÖÜ!MÁŞ6U†®Ëž÷düh…9 G•ŔÂëCę3ď°“ć- ęů“ć=€ŻwľŹżéët*UÜńäëŘůĆďPw`'Ü.§§Ă¸ Ůž‹# =7ŕܢ9HÖecߦŔxčsXÚš “+4|r‹J0ţÖ{ş]BúRL»÷çř÷ĘźÁÜÚŕ›â’W\‚¸¤tř÷8S˝Öł­].h’Rˇs-&•ü´úVŠö6mŤíŤB…FfĂ7z¤˘¨Ş“¦M]Ď3š¤ź·î“~VNJЧ ČË’¦éRĄĹžŁMµhnď v Ťđ›~ś?ZG— äęDäeyGě0Řé †<Ôg ~ó^źžź˘ĎĹŁkkşÝ?T›‰Ů?úS·űŰĄk^ěRę)ú\Üüŕď~ Ţ‘}y|řŐS°řĎŰz¸2†_=™˘˘˝ f[c{#""˘P$>Rđ`±“wš‘šS]„Ő.tí#…"4±ôiR€¤K•ľ‡ó”Żćé«Úŕ)“FŔ`aµ÷Đä —ť\= OőŽ€b°s©ňĐ °[΢íĚq¤Śř䊀Çj>˙<<›EÄöFDDD˛4Ş®i]s‹Ą`˘ĘMRĐŃÔüđMM:?üńň{ĽA ŤŇ&t=¦ŤďźE‡ Ť€µłëgď(@z?ÍŇë÷;Ö=¨ŃĄÚx)ÔŃĄJĺĆ@çĘbČCâÝ_ß‹Ćc‘[t¦|ç$¤gÁv¶ 5_|/Ţţł´s+şŤEÄöFDDDQlţüůhüřc¸>˙{`}üńǤ<đřÉÖĘpKŽÔu4Ť¨ŞŞb…! Q yBśw‘ĺ‚ta@ŻéŁdÝ^  ĽnHJBIIIŹŹsşV©ŞŞÂłĎ> ™päIY,˘~˛dÉiG8˛ĘQSY ÄşMÄzM˛¬V+@—08źÓ«•’Ô@«UZšÇ9Z-2çÎíńqŽä!""""" Éxód5Ëź(Ô1ä!"""""""Š y"C"""""""˘Ŕ‡B’^ŻÇUÉÉHgQ…W×""""""˘4ţ|´ŞŐčŘĽ™…A†›!O¤v8mpŰ W}% #¸L5p™jŕ¨Ů …~W]AˇbÁ„Ęö©ŻDgőVv~#aßi6Ái6ÁiŘ Y˘Š«®<)+:ËÂi˸Žş=¬ŰR·5[ňŚ|(GM˝ě ZNNNµÚáP'r]Cž“——‡U«VáÇëŽ^ňßpž©”NäüNĘ“Ô@AşµR@NŠŔ‚V‡c»V‡ "Z­žíkŘ ç™J(sfA‘™Ď‚ş «V­Âź·q¬éŇ:°˘­öďLÉR)€‚ ’Ő€V# ™†…S€ĄSDmłG[¤űÜmt~őäů3'Şę¶ŰÜűľµÝŽ#ŮZ©nëdP+EVś0PÓ$ÂęQŰśî¶™«ľ®zĎqD?)lŢËňĺË/«^… ‹Ő‰«ç¬Ç ßEÂĐnŹ y/@¬z€E=bČa5[á4ěőýśź.ŕŽ|’Ő,›pT!Ü‘´X7ľrHP§Ž#ö(Fł Áůťŕ$50;WŽ)z^´0ek˝·ä°:D|zĚŤĎŽ»asJb›ą±DĹ:ç™Ji˙â1:ť'G¶öüşÍ «nKÇ‘«\ŘctűÎÜgÂ.Ä$"ŠtŻţł)I*Ľôö×xüăX Ô'ěŤDÎĂř•x¤HŽű'3ŕ‰Éj`i±ŹɡňÄłŽc;á8¶“…3ŔÜćFŘwŻö<·äČđËYJ<B­0;OŽ_ÎR`t˛tźh6Áľo-D§-˘ß»ŔŁRwŹ—ci±˛‡€‡Âő8˛°PŽźLS`XĽŇąę+ŃyřQE}óV˙ďtüý­#p»9r–ú†gmtbî]gXĽ€'nŕIy¤ĘÖĘđÄ Jß şóDśg¸öŇ€xmŇżNđě<9 &©•–+1Y'íKEł Ž#FěűuµÖ,÷aëęŰ`±9±ôé]€Ą öN7–>˝ ÷|+gv~m_ţfţűů˝xă™™hţâ^|ďÖQřÁŠOń«żîĂ›Ą3a*żs¦eáŃ˙)óýŻ{ú ľ=k$Nďř>Nm_oω%Oí`E !ěťDJ§Óci±j%OĚŁ4}Ë“ň8íAw<]­u°nű=–,Y‚ŞŞ*d8ĎTÂÝf MŃbŔ=î+ëę ÷=mËşí÷xć©Ç°qăĆĐ®Űu_„— x˘Z)tűŔ X<ŽP$ —}6E®#G[ŃĐdĹĚo\M&ÎYś˝Ž”)_÷m,˝'Ú$”J˛G$ŕ—OÄćOĄđ^´¶ŰQ2# wÜ|T± hÔ X¬N<őđDäç&cF‰GďÍÇÉSf<ýč5›ÓußW‡›ý^cľ?/µCăbđ袔Żű67^a%ĚąZë|ťÎi#žG™áńB×úJ¸ÍŤ,”ţě—> IRÓGq÷mťá;Ćz¶ąÓ.…"B´uŔyBú„nt2^F™d5pĂU]S9ý—hđ<÷j%˝7đ깏-.ŔskúľoÖgơą-đj„·^ŻďöĽIcS»Ž ‰±€ cRîk?Űđ7–ýîsĽüö×0ś1sŁ… ^]+Ä ¬_żöF+”ął ‹K»čó]~'c\$:ÍΓű®”â<}1ą7˛P‚TZZ c«Δ±˝^ŽŢmn„hk—ĘJ†ÝF7Z­€ËTÝkť ‡zMDnZÚlxíý¬zë~đźź<¦Š•ăw?ą)I=_ĺÓŢéÂoţţŢßz†z3ΞsŔétw{^jkzhÔ]‘€ ĽĎkőo§ă™—÷ă·/îÇýżŘŽ[¦ęńüS×atV<7â™ŕĆěldţä'=>Î'ÄX,TWWDGďÓĽŁxňÓv:ŁT˛Zúôýh ŕn: 0ä š·­)† ďőąÎÓ|· 2ŘÖ˘Őµz9޶¸ ÚÚá67öćCÝvµH :&©ÁűŁ”Z)  ]Ŕö"ÜMµ±Ď&" 7/¬˙Ë~0żzôšnŹýęŻ{±ę­Ăřů{üݻۊ1ŮIřżçnDj˛ Ď<ÜŘ‚—®řëÔ¨xęáIxęáI8fč@[G'.߆Ď×sĘV¨ŕŮ\óY0.›2šŤË”>}mí‰çAkogĄ©pĂâ¨Fłě”®}­«±:2ę¶§S_ÎzÍrRşFqqf"˘ĺtşń×7aáÜěż»$+×Ăáîńń-; X±¤9#‹9¶ě0ôű륏ÇěDTÖ´p#†&a̤O˛†ĺ͆űŤŽô†t…ŰŰ9“ÔÉOfYDłHľrˇvO ˘ú8’ĐňńjŤDDë폎!=EŤĽQ‰=>ž="şŚ!xűŁc=>žwU"^ýg ¬6' gĚXůća¬\{#‡ĹŃş+»Oź¶pž˝gÍť°ŮťXőÖL)HăF !<Ł g®®…´8˛€¨źy®<ĶFŁ=A_$,tî?bc8§ŇG5˙S´¶±@ĐźÖT^pŹ×ÂąŮxîŐž`~ĺoŔkjňÍW1ńöwńůţĽńĚ ,ąëjŚ›÷ö}­üĎ"¬Ý|S_GĆuŻăăňSxĺoŕF !\“'Śą;üNÎŮńŚfÜţD/uÓzĚz6KďÉÇŇ{¤EçĹŞ›0&eo}«ŰďüěBüěÂçRď›”źŠ]kżĹŤÂňEŽ.!"""˘H´|ůr´nŘ€ŽÍ›YDA`ČCeĄ B‚Ůijh4\̉¨_Ű[‚Ăc ŐjYqu›Çâ>›hŕťpşµq›6ˇ¤¤¤Űă y˘Ś,. ŞIwcůüŃ, ˘~¦št7îšž‰śtv„)ňę6Ź#Ä}6ŃŔ; `gs3°qcŹ!^&"""""""ŠÉbôz=üq<˙ÉiȆňRtDýéńÇÇ»űL¨ď Öm"Ök""˘°Ç'Äh4äĺĺA^ÁMCÔßňňň0Ä8B“ť…A¬ŰD¬×DDDaŹÓµ"‡‹QH*++ĂéšÄÇâ ęC"""""" Ieee¨©©^1NX D˝ŕt-˘(ă67Âľo-JKKa0X DýČľo-Ö˝ü<ĘĘĘXqu›Çâ>›(ôp$Q”6¸Ű ¨n, „¨ąŰ 0´ÍůWł0(âę6Ź#Ä}6QčaČb, ŚF#Üm§!ÄĄBP¨X(Dý¤şşçL&v U< „X·‰X݉BÚxŮÇ#yţügČb ž}öY@Ě„» OĘbˇő“ŇŇRiG8˛ĘQSY ÄşMÄzMDDҤŞŐČĚËëńq®ÉCDDDDDDDňE†Ł2‚ţ;šy…eĂĂÝn‡Şpm<Ž\Y˘­˘­˘ĂńśÖ¸-mmíÝžën3ôĎů_$(U M÷lěXȆ¦yz±ĹĄEěvŕ>;ňĄh`h,WU×µťÓŁč}3ä 1Ť999¨m´BPFć§.on«BJĽkţ}?úvá ˝ŽąOmŔĆ_Í ¸ďą~…?˝[nˇ(˛>FşśśśjµĂˇNd[c{cÝDĎľłŻmý˙ł¸Żĺß—[Díé6|vđnüŮ»řż_ĚÁ¤śĐ8=ë©._*¶čŮg»ÍŤ­íĎ™›ţ l.…h6ÁżşL˝¬â |dCÓ (Ő%ę!¨ ¨âYY)de‰"űŹ 04ú4–I¤˛Ř€ňĂŇm˝(Bô„z yBŚ^ŻÇňĺËńăuG#ňý‰˘UÄ+ËnĆÝżýŹĚ™lpÜŃ3Ý?Ű~đ4>ůĂw02=Oż±›2Â-_ľŢjı&;ŰŰëö Ů[Ó€UDŮçCßőáĆ5ąé¸&7±J96}qV;`h”n7wMm"Śëł• IDATMBŔş€4*Če6u $KÔCž<˛D=d‰\ω^€›MJ×3č‰D«?Q~XÚźeďź! ¨żn<€ÇîX:o<ž~sw·Žç»;kńô›»±ć'7#gX"¶|y÷ţá#Řť._ŔóÁîăřĺkźcŐŁ3Qtu&öŐ6âľg?†.5ÓÇ飔ŁĂ҉˙řÓV¬X0kÇÍáÂ/_-Ç“k>‡eĂĂa±VQ¶µ`ÚŰĺ´µÇWíŔ˙ýbŰ]Ô®C§±â®ÉA=÷í5řۦXłü&\ť•ŚŁgÚq˙ł#V)CɵŁzýýŢę3l?x ?}y^|t®ÉMC»Ąkţ}ýĺ“ ÖĺŢ^WŚB»Ă…e/ěŔÝÓó°fůÍp‡đü,ŃídĹĽH¨ăn««µ®×:şT@/Bź&@źhăýśđůTYŁ ‰ 2Ő®÷Qe‚źćvi˝“ćŽîŁÎů#ÄĄBž”Yb–oÔQŕ$€ž çׯ~‹S·"Asđ·Š0š<ş(b®}k/1äˇóµˇŤm߉ôuc‡áśÍ'šP02% sú»ű®ĂÄliOű˝i9hî°âńvřžó—÷÷ă÷÷OĹŤ˛×çÇÓ‹Šđ⇕ľżo±;ńó“1{ňHŔPO.ĽS–ľĹŤAlkA¶7¶5ęoő­čRâ‚zî‹Vâ·÷]盺U02ż^\Śß­˙2¨'úüÜ{_áw?ĽEc2)ńj,űÎD,űÎÄK~]‚  ŐlÇěÉ#ń­âĐ^¬XE¸N퇨›Č7¤éWî¦Z8«.ęä ˇOň˛„° s.Wŕh!! Óeh”¦‚UŃl‚Ól {Byf§w‘Ď3Ď<šščE÷\ˇű\HŁzvzęčŻ_ŠĆ¸sGő„#‹ ŘöđńŢ®)Z@T< yh@ýuă<4w\Ŕ}Kż5Űp+4Ówß±3í’—đĽyEŁBžŠc&ĚŻ xÎ ă†ă©WËî»>XŔĎI4źµEővđ^fµşÚťNŤ†WÖ¶L{c[»Ěb›†$+†A«Ő˛rö@ô¨–Ç›qíyőurn:ťlę÷©Ď{kńÂŹoěÓ{öuÝ41+ ¶‡Ńim÷+ť¸ŕ‚mw›!bŹ#ns#\gÂeŞíńęV€ęäe ČŐ{Ă ^ĄçB´ńŇWa¶ŕëŚLŇ:AUu"jNu•ť7ôqöBP%@–¨‡"ëš |¸ĎŽ.ÓdŘŔ ü°€ŠŁ"nś$`ć0ě °˙(°±L 뱞m;9ŠË…! –ł6¬ý¤ /m9„%Ďm xL#ÇÓ‹‹Ż4źµ!^đś´ÄŔHłŐÝ—şýźXĄ<ŕç8uL·“רďtžmDçWoˇtđřăŹ#//Ź4JŰZ0íŤmíňŘ÷­Ĺş}Ŕą’Ěť;—Ň]JŽžiÇŘ˝w¨ÎZ;»Őסj%ĚVGP˙+úÜvÎŽxŤ˛Oď!Řוš ém!Š"AÚŻÓűľµ zěűÖFÔqDtÚŕ:sÎ3z±ŁKŠóĄď u.ŹwúWžľk  *`4e•đŤômípŐ·ĂU_ !.ŠĚqgŽő]ćťűlşr< ŕߦoYí6–IˇAŃł& śĆ‚š;€ňC#wizÖ ™päIYa˙žś.7V}P‰Íżš×ăăó§ĺŕ–ź˙Źß1J…Ů™ Ř]Ý€Y…]•öíWg”“Žň#gpˤ¬4tÉ–,Y"íGC9jjÔµ5˝¶7¶5Öíţ¶řć1¸ăW›p÷Ś<čS‡v{üŹď~…ŁgÚńüĂÓ1~T*ĘźÁm×^ĺ{üóŻë1 ţW0őąpt*>ŢW‡…3żô{¸Ü׊äécŕj8 8ípů@ŞOô\Éz-:mpTo«ľ2ŕ~1€FĹŃ:AŁŠÇĹcXl@ůáŔ>®úJ¸ę+Ą°'wfżŽěˇč2ŔA@;€íFöŇâĚë?ÖÂŔg 5wµŇôΊZď˝]űćQÄ”( wĽĆ¸F§Cćňĺ=>.cŐˇţöŢ®ŁHKT#W—Ôă㣇%b¸6ď•üđ–±xjM9ŽÔµŔlíÄ;;kńÖgŐżóđÜqx|Ővě¨<«Ý‰3Íçđ_Ż}ŽďüjSĐŻkvöŐ6âśÍÁŤDQŮÖ‚ioW˘­±˝ŃĹ\űŤ |oZnüŮ»xgg-ÚĎŮqÎćŔţc&üvÝ—xöť}¸sZŽÔÉź“Źź˝Ľ _V7Ŕjwb_m#~ţJ–ÜVÔ˙ ¦>˙řöBübu9>=`„ĂéBS‡zď+Ěú黬˗űşB‰č]I!fÂ]±Ç‘ŕô,Ž˝°•­ xŠĆŹß HëÁĐĺ˛Ř¤ň\˙ đô«b@{Ĺ‹"¦Bšf÷=A?†ěŽ#y¨ß=ża?îş!÷˘Ďąkz.ţşáîś–‹űgçC€’§6ଵ׍ÉÄëOÜ‚±Ľî{ţÍ“Fŕżďů&_µŐ§Ú0$V‰…:<»dZĐŻëď?š‰ďýú8]nś|í‡ŇÉÄĽżž\řýĚË?S¤µ5˝¶·+ŃÖŘި7\2 ׍†—?:„ýíSXěN O‰ĂŚń:|ú‡ď`ô°DŔ·ŠGĂÔnĹ˝řN5›‘•:ŹÝ1ßňŠUÁÔçëó‡ăŮ%×ă‰w˘úT+’âT>^‡×ž¸ů‚uůR_W(·w›1cć@¸ťűÖBtÚá¨Ů ÷ŮÄŚ™–őLtÚĐyཀKxŤćóůp Oß*mŻŤeŇzđÔK癝¸€ŁzčŠRAZĽw2€UŞ4zoî¶î“ľi?’«ëZ”ťű•ŢĄQzŢ«ď†eB@°“ `Ľ ťë>2äˇÁ÷Ů3ßíő9–ŚĂ%]WşďÖ|Üwk×°đ6ł˝ŰBŻß˝>ß˝>ç‚óB'ČŢűoś…Ł«ő;D‘ÚÖ‚io—ŰÖŘި7‚ ŕ{Órđ˝i9˝>÷ţŮů¸v~Pő±§:Ö[}€ŰŻËĆí×e÷řXOuąŻŻ+”Ű€w]ŃÖŃÖY\b&.€}˙»€˝®úJt†as›ŃyxłoQe]ŞpAŐ𣍗¶Ý¬‰ŔşmŇŐąDł öÝk3îv^zťúEşçk€vHaĎעFA€w5ŻćŽ®u|@+Bź* /KŞ·şÔčŢçĄ22š¤ŰU˙E“»7zQÄ7Yť>bČC!gCů1Ü<) Ş®ęą§şąĂY8DloD4@ÜćČUńĹĄAuíbŘ÷­…h6u[Ç&ÔyGđx/‡>s‚ů3Ůawú4`ů]Ň•6•KÁ¤}ßZ¨Š—pDő«xFřx‚‡uN"ęüB«]@µ¨6ţľ.UDJ‚tŐ.u¬T—Ő1‘5wH_†F ą˝+Řéy:[×~8@–(b¤ ŇúH`°sÉňPČy}Ű×ŘSÝ€Go/DśJ‰˝5Ťxâ;±â®É,"¶7" ®–“§HŁž… ±ř‚@ZżÇn·‡üű°ď[ë xÝ*-čK‘cn1 MÖlďŠpŞ)?`ÁD˝^gs3´ÍÍ!ůúĽŁ|Î}ę´‰" ç…F“Ł)pť/ďčČó»ţŽ.µk­0müŔNóišŰ»n7µ‰hîş='ń˘žőtĽĺÇPçĘaČC!çĹĎÂĎ_)Ă”˝{§ ł&čńŇc7bb‡ß±˝Qż‹ ŘĎÂ}¶1ŕnoĐă˝*• Xż~=˛łłˇŃhBň­¸šj|ˇTI‘tŐ&Š<Ĺc«]Z U4›ŕDqľŔˢGâ|AZźÇ3z‹űl u*Ŕw đó  !@ÔvŢăí¦†yŮDť‚€NJ©n7äřű*\xqcopsţm8§ Ŕ «¨®îńq† ťN@ zV®\ ‹Ĺ2¨ď'F XíJ׋¨¨ĺöŤ$µ@ézV»€%Ë# kiń]Z<š•ř9=ŤŢń†>%đ» Eß1śE@]\­u°nű=–,Y‚ŞŞ*Q?˛nű=žyę1lܸ‘…AaGH‚šžÖĺąŘqDŁŃ`ٲeľ §˘˘ĄĄĄôÜ:PÇJAĎĘ÷őź·q8łŘ¤í¸ň}i»Şc¸Ď¦Č—ŕ wü×Ţń_‹g˛(J—c§¨Ĺ‡ş‘{.ť.šűľĐ‰7č)**ŤĆA z˛‡Ëî’‡JťŁ­ű€˙QÎuzÂRů!iűmÝ'ý¬Ž•¶ďŘ‘,Š\;Ľ$hş­Ĺă xŇÜÄ)ZQŹ!OinnƦM›ŕŹŐjEii)–-[˝^?ŕőZŁß*ë?‘®ĆÔÜ!ÝŢX&˘x¬10†F)Ü);$ú‚@•µx¶€<=Ë"“M±C°Ç/ČI‡´.Oş @%ŠŹqňbČCDDDDÝ „¸Tf\­u¸ś5m/^ŚÜÜ\¬YłĆô,^Ľ………ňŢňôŔ“÷J‹ö~üĄšS¬v[÷IŁz´ńŔ¬I@®ŽĎ`hî¶ÍÖ˝ŇmOŤä Q\  x,§¤Pä:)ŠŘ$h÷ü¬0Ҷŕ v®P-ŠhiNÓ"†…Ů@aŽúP˙¨65R¸ÓÜĂŠEcĄŻ<=;˛ąz˝“%Š+ÝFé¨7AZ«‡ëđ?† č)//€zĽ´ńŔ¬‰Ŕ¬‰‚oäIUťjŁŕĺF“ôłw¤ ŤöŃĆ‹HI”Âm|t†FŔÚ):Mm"š;L &{×ÖńŇĄą:)ŘÉŐ;¬hţüůhüřc¸>˙<âŢŰQÄż8z§ŕú:tYň„1ÁďĘ[-7’Ő2 Q?;ĺY+"®Zĺ?JŁĹ@Ëí­¬±ëü"&đS#!Q±©nsĂýźz˝żůÍoPZZ ŁŃč»úÖ˘E‹ ŃhBŞ|4Ş®ő{ĽŞ RQU'-ÜěOéÓ˝“&…]‹9çz® .!PsG×z9ÍíŇíŞ:ŔbďitŽpŢw@ čR<ŽgŠÜůĎ!ęi_§Ő˘#‚ŢS;€Ť˘:ż0'WQŽĚˇŢťpşµq›6ˇ¤¤¤Űă y"ćäŚ;hÖbńë´ůŤđę‰<) ę™OŕOóGłŕú@HĐAl7zFMÉY QĚć ţąę™Oŕáé™ČIׄf˝Vt-rŃląqŁŘ©żgHjŔc˛¸4¸›j!šMť6 Ő;Žh4ßŁŃŠŠ 455aٲe!ôśĎ»ŽĎÜbé¬Ę ­1ÓÔ&ÂĐ›XíżăťćUQ{áżë]ÜŮ? ň§Kíy}š ­tˇőnüYlťä…;Ů›tß˙y}¤Mľ+•…î9k¨ďł)2ě°]a÷„9 žpgĂ ŇI;››ŤňDyR–oőz7 2ŘńŚVęÝž[lŔ/şrdęD¸ÚŤľQťÖwu„eńéńžĽ—Č>XďĆě<G˘UM“_Ý,Č’˛€ew›ň”ś+úż˝AĎęŐ«±˙~ŤF”––†EĐă/Ď72%°ŁVeđ%íҨkgŕtŻóů‡2 úyĎô3u©ŇÔ4}šm‚4"‰#tşkđLÍŞßÚ;“EףwčĘbČbRRRPRR‚-•-˝ŽČş†P¬çč‚heu¨m–NÎeWřÄ;Ň•””ŕ‹chŹÖësĺi9pŐWÂć”:ú<G#˙@UÂí­Ou;%Nł §ĎJűµ’u;ôÖí¸Ô€^@ŕÔDwGCżÔ}ŤF‡z«WŻFyy9ŚF#V¬XeË–AŻ×÷ą^‡’®ŔŁkÔŹ—ˇ°Ř«]ş HAWOŁBňPŃ7},%QđÝö†8 QÄýŃĹě°Ă/ČáčęO yBŚV«ĹÜąs±Őr4¨çËSsánŞ…Í ě6¸1EĎuy˘Í§ÇÜľé#ň4†<}1wî\ßj„ą©÷3hY˘Ţ7rîłă.dp÷mZ¬Ŕ٧#ś¨Źş-OË…Ó3JăÝCn,,äѦ¶ŮŤÓg='†ĂĆőřď/Wk”ýřZĽ /{×çńŽčŃëő}Ş×áÂ:Va¶§ý_ĽÓ×Ü4µ÷üX†2“×m—ĆŽ'ѕРŠŘ Ń/ĚąŔő w¨±—î03Žc;{Ţ=äBA†ŔOaŁŐ!âłăžNg‚.¤G„;Aˇ‚\7 Îe¨m9š' ˝w¨k1žÜ#ć}ÉâŇ|kNí1ş1}” ĂăY·ŁÉ›.Ď~.ňڱ=דD=\fÄvcżżžĹ‹#77kÖ¬ń=‹/Faaa·çŠŕ˛CP%DÍöęm‘foXDDÇ&ŠŘ!Řăć¤(E¤3ŕˇţ>·c„żĽYŇÎÄ ĽôĄ‹EţRćôŤâQŽšĘégЬk (boT8*ĄČöŮq·o=yF~Ä­};¶kŃľ7ľr\i‰"ŰN´X=ű¸QS»MŐňť0ÍđÝv›űýucѢE«ŐŠ•+W˘¬¬¬Űóě_˝Çń]ÜDĚ`0ŕXs3ÂäőžEĽ$ŘăůYiôÎ}> ‹ üÉSr Ďȇ«ľµÍ"ިpba!7m4ś{‡×Ë3ň#ârΡNP¨ 3ťŢ“BŐ=N;ŢđŠXÄŚżă‚#zâ!ĐM‚»Ł®úJ_Đs˙†‘ęŤ gWŔ—ŠŘńw\ôů‚*Ťěpw4Ŕy˘l@Ž#………ľK¬[­VśÚ˝ňŚc€¸ÎTúžç¨Ů†Ř‰ ¸a鲄Ó>›úŹ Ŕ&Ő—ňË‚Q!x‚» ŕt/żRçůľc“2ÚřF+ő;AŔAH—žź+Če5˝ř9‹ rÄN\űľµÍ&Ô6‹řĂv'fçÉ1YÇYy‘bŹŃŤw*]ľ)ZÁśźĎmmăŘNl<äää0äąÄΰŘi…óDlNŕŰť¬pÇX«‘˘¶ŮŤ-Őnß•ëE,b&.čóčDZťŘu HŇ(¢Ă3f:_ĐóűĎ1ZŽi#ąŢ[¤8X/â˝C]S´„¸TÄŚą-¨đR64 n{Üm¸ę+ě8˘×ë‚W}e÷c[›®Ö:ŽjĄËnűlęźđâ%ţk›gddôéo¸\҇Cryp20›Í0›Íhđ€ď!bĽŤ®pęRĘ¨ŻĽedĽ ŕ&“Y]/ÜWa„‹ĹŁŃwŰé/cz1‚B…؉ ŕ¨ŢW}%Z¬Ŕ.|XĺBA† ŮZÉ#ÂČ©-ѳЯŰwRHS´ú2‚‡ş«®®Ć9“ ˘]Óç5V”٦BP'ÂYł˘ÓŽ=FëťČI0.C†$µ€l-ŰZ¸h±-§:D|QçňŤ”óďÄô”P¨Ű1cćŔˇŠ÷…VąđE0Z+ŐmGÂłn¬w٦ÉX·tíĂč4yň¸›j!ÚÚü}čőzüđ‡?Äß˙ń \vKŹĎqßŇ.ËvQD»gNnn.ŠŠŠŰď˙÷ĉřôÓOŃŮىŤ˘‡!d§%mGWŔ“‘‘[nąe@ĘčôéÓřôÓOa6›±]‘+HŇz:€<) CŻ»®ÇÇň„Á€gź}V:ŃžpWźOV… 1cćŔ™”ĺ»ęV‹UšrđŮq–oDŤGLŢ,^Ië (--•v„#‹/iájE¦´RgőÇp7ŐÂć”>%?XĎĐ# …\7Éłŕ¶*Şę¶rÔTČ<DZÝ(F{Ś¬Ű‘R·9ł ČĚďŰď IÔ×ýüóĎű¦@ô„Łyčr4řŇłŹ1b¦Oź>`˙{äČ‘¸ůć›±iÓ&ŘŰE7‡ŕ"Í6;=·“““1oŢĽű߯ Ă-·Ü‚wŢyvAŔżDߋ҅¬GČŃj‘9wnĎ}6çȤČ̇"3) u IDATÎ3•p™ŞánŞeˇ„9YJ6䩹}>)§~î,©â;î¸Zëŕ:S wS D§ťÎŰ4.˛D=”YS"î*Z}!OĘ‚|ŇÝľăŘf`Ý÷şť "=ňڱ—\ú‡'ţkN „ŞŞŞ€˙ç˙˙ýos4]*˙«w dŔăbddd ľľ>dŻ$ć˙ş®ąćš˙˙Z­cǎšC‡¦‹ŃyY‹ Â7°'ěť6¸Ď6.;Ü ,0!óś¨ň„5L:Äžíä67BtŘ ž3Aě´˛p¡ó«N„ Š‡ĚóťşGŔŐ*ťRą[yj6Ç‘řt@ ŮĐ´+2"MHĐAl7č{0 XąrĄďçó&˙Űî6śgB‘YŔŤOD}R/Š€ &&f@¦ődذa¨ŻŻ‡!DG¨ř_!läČ‘ňRSĄQĄvA€ ĽÚVŹçn,‚(éŔ(Tľ(§ůős§Ę»v Ă9Š0rĎÜż M«Ý8 Łx, rss±˙~霦‡˙8š§Ś!ő™÷cđäädF‹‹‹ Řf#X$ÝŹŐ,"""" †\v(t“‚?™tÚ šM¨®vA§Ó…ÄUQÂÍůĎdť wŚ•őîP8JV ĺť'Ç?ö8qşC”ÖŁ±¶!vâ‚>=î6 ',HV V« ů÷î›čBxö ťN[GŔ‰ůý“ľ5(2y?ŤUyĆâuţ˘ŤźŔ ˙uJî/Ç=wŁ‘L­°°PŽŃɸäÎpXGś¶€Ńiü  :ęöŇ"ą/čqTo…«µî’ţVŻËóĐCA­VwŐWĎbĘŢ/ď} ÄĄAyŐuµ­ŇS4qí0lŘv2ŕţ ŰNbú”aHOŃ`ă¶“Xńěnüá‰oÂT~/^|úz<ůÜlűübbäč0;°č§źâŃ{óqzűBTmą©É*,}zâ†(}SĆÄŞđÖ»¦ďÝűÓOđíY#qzÇ÷qjűB|{ÖH,yjŃ J0BĄBNNĎÓ’yb JKKaß·nďŐ!zá8¶3ŕÄاߍŤŤ ‹ýiÓ¦Mü¦eť7M Şř›¦ĺ/1>łľ9 |&M©Ű˛Ă×CbĽTţf‹Úk_ ”9őu44u­c7Dđ7… ®R´ú·ÓˇË‚ßľ¸#fĽ‰Ů÷Łuü@‰(”ńęZáŢńl: ČO8M+J%«ĄĹ¶Owľ©DÔáƮ̞hu­^ŽŁ-.¶v¸ÍŤĹĄ…Ýö|X¤×á‰RjĄ€‚ öÝ}>Žčt:=z´ßFňX,¬\ą20D’)·@WP“3 ‚RŃŰiAI6Ţű÷ ,(ÉƆm'q—gŞ$ ŤÁń­ sE˙§F­ŔSOÂSOÂ1CÚ::±pů6|ľžS¶BĎćÂ˙Č‚śnĘčîxv]ý'Ô× ŰŽ°gAŇańGńD±lż}m¤„Şî¦Z@A:ëu4—á©ŰN{ź`NMM -ľl±X®čk2 X±bE@Ŕ#äécEl×ÉlJ6ä©9żŤćη×áý­'°aŰ Ě›9Â÷Řä‚Ô^×׹\Łôń“ťĘš6"˘Ćd ŚůŻŮ3<žĺÍü·żűl# ¤?ŘĄK×f'ł(˘™˙I±ÓQďM;„§Q}Iđ ů\ö OŻ×űn_É)[[·nĹŻýkßeÓ‹ŠŠ|Ź ±CşXVÄ"&wVTl#ŤZ’#đó?îÁěiY˘éš~őă{ đČŻváłÝ§aµ9qşá~ţÇ=(Y˛%ř:>{+M8gqřî›¶pž˝gÍť°ŮťXőÖL)Hc!" aś®!8˛€¨yGͱ­Ńčdŕh ‚ľb(ó±Á ˘›€éîh€<%¸‘1ţ!ŹÁ`@aaáe˝‹Ĺ‚őëףĽĽÜwߢE‹P\\pź"łî6dqiT a]öţWŃęńřă·ó‚ŰFăÍŤµřőŹ'<çÖizüćń)xäé]¨:ŢŽ!jn,Žçź ~ť˘—sć=řN7Ëďüń?‹đŁ˙)ĂOźŮ ĄB†ëŻÉŔ+˙{ ¨ĺË—ŁuĂtlŢĚ Cž0ć89gÇ3šőeű J„˛ÓÔĐh4,<˘KĚ…Ý˙gďÎ㛪óýńż˛µI(miÚB!)HI#Pi\¨¸QĆ[@«Ž :*#¸ 2:_î˝?ť™{G-3×«ŁŚ+ "0âh˦¶ŠbŁ‚Ř"‚MÚMŘÚ¦;IÓ&9ż?BBC[š.i“ôő|Íl5Ž~}—Ďňi řfýMĂŞ\óMDÁn€ýGŹ""/O>ůd§ĺLň…Ţ´.G%B>ăN,ĎMaŕL>ăNÜ~U´Ł™PĄđ+ŰžóHjj*Ěf3ŚFcźßݤ¤ďľű®·{–V«Ĺ’%KřcńMDt–‡m6 ›nŇLňQźťÝe«»–7Ýٸq# ˝ggg÷«EŃpĆ$O^(-[¶ Ż|q â‘ŘŽ(–-[†Í{kp˘mA,ŰD}¤Ó鼏 ßIžł§GW(xřá‡}ޏ嚨wä 2JĄ:ť’î˘Á¸1aQ­ťÁ –m˘>R©T‹‹C]]L&“_Ż1™LČËËóvĎR«ŐX˛dÉ9ÇBaą&""ę3 DDDDÔ/Ťuuu~MŁ^XXŤ7z˙ž5k-ZÄ  &y¨_t:JKKa±X`±Xşl‘söôč …·Ýv233@"ęVqq1ŽŤ0Ťá ę“NČ„lâl„X¶‰úA§Óˇ¨¨đôÓOŁ­­ €ÓŁű#??ß[®y}DDDĂU €…IÝüpÂ$ő[jjŞ÷±'ÁĂéщ“Č.[ý)×Z†…Žű9\ĎíDD=a’‡ÝűEeŹV`őg‡ö&č?>éôܢĽĎś8…˙3Ç×?UOdáOëżÇ§?áŽ#Ö5Ö7 •îĹďßŘ…‡ćĄáŕ÷ŕĐęű°ö©k1at4ćţa3~0ž šmíŞ,÷U°×Çńý°ď]ˇĄĆ}Ń? ‘ŢqTâ9_§T*ˇV«Á [‘Ńťnţ)|™ÝE"i$ADĂ“<4¨AŔŞ­?aŐăYxsű~¸\ÂmKEż/Í™†‡ćĄ!I5‘2 .Ź˙}řJ<óN1w±®±ľQü`<‰U[Âç˙3ó.>#•ŤŠÄĚÔŃxrÁ…X¶ŕB|w(h¶·˘—-‹Î%ë@»±í·wK@Yj"§Í‡H*÷ëőžÖGĺ‰ĆnËrOŰe?]OެČýËVŚ˝ăMÄÎ-hë€ăh ¦¸[DDLżŇ^Ţ4{Ćĺ±ŮlC:•ş4y¦·UÇk 0UłÎ…#k«{˙zʬ4y&F6lŘ€~ű->ČźsC@éŐü}řÝüé€GoLÇóK'ú¬łyW9ž˙{¬ţý5ĐŽŤĹö=GpĎ‹;`w8!9}1ĽőűCřϵßbŐăs0krö–Wă7+?‡:! WMS#B&A“µ ţ˝ĎÜqÖO‹Öv'ţsŤ+V ë'KˇĽńUX?YÚă6ëÇĚÔŃał„Ö&8OěGAÁAĚš5ËgÚ[^uÍźúÖźş¶lŐ×ŘôÇyĂşľ9C˙e\SlđŮpóÍĎÇđĚíůµîżľ6âű°zůŻ099ÇńŔĘĎ)#ű’‰=ľľ§ň _ýtOżý Ţx< 3SŃhmĂęĎbÉ˙}ŃmYîi»"¤bŘŰťxňź_ăΫtX˝ü¸şąYę: DíV÷ÍrT"/ĽŁËÖ;ŽĂĹç<Źt—Çd2ůü=DR9dSćˇmßG°ŮExw»€E׉ IdÝ ¦jŕÝílv@6ežß-ÎxĚ&˘pÄ–<4h~1ŐˇşÁ꽾lęXśjmÇO‡k;Ýśţő7—áÂI‰©ŚŔ­WhńĐői>ÝMţďăRĽđŔlĚťžŚr.O‡çďť…7¶í÷®cµ;đěaŢE0R„Vüú|{đ¸ßŰlijĹăŻíÄŠ;/›ýŕ˛5 ˝rňóóQ[[Ë‚9Śëš?őm°ęZ¸Ö·öĘ]ř¦hű€>nNÔ[ˇŽŹňkÝ7¶íÇ˙üć2ĚĐŽ†2R† &ÄăO‹2ń÷ŹJüz˝?ĺů?ú˝˙2Ěš’™T‚řhž\p!¶˙ůć>o—H$B}‹×_47e¦@!…2R”u@$rß(KÔB~ń}ÝŢ,÷tŃh4P(0äĺ_Ż…lň<€ąF„ĽŤ ÷˛î…½@ŢFćšÓ žÉó ‰×ňMDLň †Wó÷aIÎ4źç˝)˙řdźĎs•Çq±nŚĎs7Îňý…¶¤˛W§«}ž»rÚ8ě1řöÖ˝<ÍwšŐ1Ł”°4·úµ˝¦šf\ó˙6ă‰ůÓqéä$î@ »şćO}ŚşĆú6Ľ‰Dč¶UËŮ~:dÁ%g•׋RGăç#ż^ďOyţÁXÝë2čďvýęÂä ®‚g?ĄHťŰď÷ó ľ<”ݵ<¤IiMž‘46»żň6pśžPUfrᅪ_6»"i$d“çťsÖ7"˘p1Ŕ•ŁF!;;»ësCd'­˛2¬\ą1ýö°™ţ±®ąëż(Ă[ŰĆâ˙-ňY&ŹŕůEłíţĹĎŇÜŠhe„Ď:‰±Jźż[líP˙ú­Nź)“řüĄ8ëfBä×öî-ŻĆ˙˝ +îĽweťĎ‚¦/^ě>NČ„lâěaW×ü©o®k¬o,Űęř(ToÄÔń=wm¶µu*Ż#2´ŘÚýú,ĘsĂ);˘•˛^}·+!FÔuŔ[o]8ë«ú} ˘Óé`4a6›aµZˇT*‡´\K“Ň ™¶[ ´ÔŔ`VnRŐ@v& ÓđŘô×É& hď™–w·Â)7ô8ëQ¸@«R!)'§ëóCDáí?ăń›3°â×—tZö—vă­í?ăéŰfzoDOµ¶cd‡ ćšF›ĎkbFDŕŔ÷tş¨?VÔŕÖ?mĹOdaNŻř‚ŤŐjE^^ŕÉ'źě÷MĂp®kţÔ·@Ö5Ö7€Ë¦ŚEÁw‡üJňD+#ĐpĘŽ¸‘gş5ZŰü.źţ”gU´u-vŚŽő˙ŘŇźí Ö:ŕ<ľżßIĎŕË€»ËVFFĆ/qT"ä߇ů8*wApŘ˝ÉU4“ ¤§J9ëfĐś÷[Ň ŕóÎtËN°Řé;ŢŇśiX¶ę+|˝˙(lvŽ[NáżÖ~‹Ďř˝]cU#°·Ľ§ZĎ4§˙Ď5zAł­ —MIÂ{O]‹©˝ç]˙šăń˙Ý})–­ú †Ł )ĂŐj¬\|…ßŰőúcspëź¶ÂátáČÚűk Ë%`ů_wZ÷˙ÝîWwšD»nőľ®豾 D]c}Łžümń¸lĘXĽ˝ăg<öŹ/aµ;0.> W§«ńĺ‹ 26pSf jm¸çĹOqÔ҂䄑řÝüé¸93ĹŻĎń§<_ž6+_ާŢŘĂŃzŚŠ’ăŞt5Ö>uM·eąŻŰ´u@,\N8k ýČ655fłFŁ1hËźH*‡T=Rő ¸ZŞá¨ÚW­‚ĂîMřxZő¨č4"¤jM˘»‹őŤĄ 0ÜS —™|»bgZíăµ&Ďä;DDLňP°ŘůŇÂ×y8{Î>3Đo®KĂo®;saŮĐbď4ĐëÂ˵Xxy÷ÓdZ?YzÎççNOFĹ»‹|–µü{ wX‘,qÍŻňŘUŮě©<Ŕ-—MÂ-—MęrYWeą·Űěu@4"Bó ¸jË!8ZűŐbBŁ9Ó Íd2ůüŚÄQ‰âžnÝYk„łÚg}`oŕî2d®w vU4Ş Kv'~ôéž©0ÓIł–&źRwćad4$Ł’!IÔök:t Ť‹*‹…Á ň“<t>ŃWâšÉGś)ž» '‘:.–Á aH?ßl6{']4 6ÔW´2ŃĂúFDDťŘIe‹ę$77ő š¶la0üŔ$ť÷Š~ÁnĂI<~K˘ä2ü`¬ĆSoîÂ3·_Äŕ ÄĹ»"Ň ™¸.-ńńń˝zmaa!6nÜTßG}Y 0ŃĂúś'Ů ™¸hÂHźY†‚ť(b„{|{Çöu™äń÷<˘R© P(`łŮ‚nđĺ^ť;Ł!ŽJ„ěôß®–j¸LpÖĐ`ňvíÜ­UşJdtLţ¨˘UĚ™çCŤ'‘ci<“Đ©m<»Ë•O©:ó02⑉č‡8VTIł‰(|`Wy9°x1V­ZŐůXĆQ°yă‰,<űN1.~ěŘۜȚ®Á[ż›‹ µüeg@.ŢĺŃMśŤśś”^ż68–ČDăžž‰Ö·`%›8—]•íh–E ±˛;ţ"´ !´Ô@hm‚HÝ©lű{IMMEiié·Hž¤Ź'ćj©†«ą®ćp5W§j|?@÷Éu‚e¤Ją{ĽŹŽ !E„ď:}am̵ťź÷$n ¶A€ĄÉ˝ÍžÄÎ9®0:?#ŤF$@¬…xT2$Ł’;•%ł‰“<tbFDâ•G®f ‚Ü÷ŚŞía˘‡őŤŽ$^‹vC! ˝ę{D¤Îíó{ét:”––Âb±Ŕjµ†ĺqŮ“ôA‡ŞG+\ÍŐNŐŔe­ď6ůăѱLđLß.ňo­¨@éNâ(b!’»[ëp,"˘ÁÇ$OŃétXµjžŘPÁ`őRo=«V­ÂË…fTÖÚ< +,ŰÔď[{y4D1jŤf¸j+€~$y<Ó¨îqy222†EąIĺŚJF%wZ&´6Áek¸ę«|źsÚ!´ÔÇ—Śö¶ĽźNŢ8“Čéđ&y(¬°EŃ]$Žť†öF3„ÖF8ë«Ü ‹>č8řrYYYź“<áD$ʆätr¤§¸zZťÍ“ę+q7źËÄ Qźż" 7Lôőź$aÚş;Źďďs’´Z-ŚFcHľż—gętŁŃČŔQŻlذ˙Ôëń© 0D~`’‡ÂŽłÍ…Ęupµ»/fÍšĺÓ]€ü#IÔş8ěpÖô}4ŕł»lůËd2ˇ˛®' "ż0ÉC4Ě8ë«`+z‹/Ë mO‚§µŢ ŔťŕY´hw< [Ń xé?~‡üü|B’$^란łĆŕS¶{sń´äÜ­+‰xĚ&" &y(l0ÁCD€‹Ĺ1S®Úr­M}z•J…¸¸8lÉCDDHx9ČX,čőz8×C2&ŤłPHłŐ9ŕjwä˝#˘$Ťx˙îK‚§  5‡šŕ”%qK +,Ű4 ‹c§ÁiŢë>ÖÖ!UĎčÓűčt:čőú>·äaą&""®0wŇ$$ýţ÷]ź·˘ŕR[[ëm"*ŠQ{§×$ 5ő6żi čgLą=’qź[đxęštB&o(¬°lÓ@G%‘Ń€˝ ŽcűúśäŃh4Đëő°X,°X,P©T,×DDD}Ţf(Úšť˙ [ť]´lüEˇĄ®–ę>˝GjjŞ÷1»l[ňQŔ-[¶lŔŢËl6căĆŤW»ŔŃ ÄkŃn(8ŽíëÓ{t|Ůd21¨DDDŔ$\ ¦/7íjô™&ť "˘ŔÉŁ!ŽźWm9\µ}~­V ŁŃÁŔ »kQČb‚‡hđHÜÝ­„ÖĆ>ż‡'éĎiÔ‰I" iLđ IÂ$¤‘ýzŹŽ]¶8.ů#77]z)®‰ "?0ÉCD!‹ "˘Á#’Ę!Ž×AčÓ{t|™]¶ČŤU*Śf(G쬯GAAA—Ë9&O(_l)b˝Źmí2f·‡«ŁMţ_lKF%C1ç)ü=7%$ż«N§Ă¬Ył P(››ËťOAM1ç),˝* ÚŃJ‚$Q ç‰ý‰D¸÷Ţ{{=ćšR©„Z­†ŮlćŕËÄc6Q°Ëbňó‘ťťÝi9[ň„0‘<şO7ů~líĂk˙/Z´ 2G›Nź@G&†üwwř± ÎĘ}K§Ż/"Ý.“ÄkH÷őGIIIźŢßÓe‹-yp}ÇŤFeË–!búí˝ş°µłĎpÖńćL$“3 ~Z¶l&\u7¤c§ő|ÓŁ”[\ Ü0×ę8]&¤ň/Ű,°XůcÁpÖńŘ&‘pÎu% “ĄĄĄ°X,˝ţ,O—-›ÍÖ«Ö<˝9f WLňĄR ťNɨäo $Ł’˝Ź÷ťŕŤçpf´§o:#!ŽJd@ü¤Óé0"q‚ĎŤn·ËÓ-<­8hxúéÄ™D8ztX”mQTB§›|~:Ű:¶đęJÇ$K_Zót|ą7IžŢ”k""˘áŠIžPßńî_Ó*xq>Ěo<]§Ë–ÁI˘;¶­ß}^ڵÎ3e"Lę›8Ö}Ă]Q7üş~ŇßUąË¶(*ˇÇ$Š8*Ń›,..îőgi4(î.aě˛EDD4Ŕ×v Aßx&¸›<×Ů€ďMLô G;ąĽÝGÄZwŃŔß{¦ŢyČÉ€ Cu6ŕ{ł; âI°‡Ž­2ľ¬äyd8*·¸p¬ůôu…źÉKOąéëĘjµ» ,_&˘žŁĐhÄ>†‚Čżű† Ä/ΓҼ n78ů+ě0ck°­ětÂ!2Ú]( DR9$ę§ovm†¶•9˝ U©ffř\D%zÇśÚatˇÎĆ}=ÜlŢşŹ4Ňd˙ĘvÇdP_Zóxfĺ2›Í°Z9ę7uݏ¸ź (xźCä×µCúdgp˙ĘĽůg¶0NŢÚsć¦3bĘ<ż^ăj©†}ďzäĺĺńÔ^’&Ďô¶ćys·“7ĂĂČ÷&v›Ý‰=É4ź1ŃÎĹľw=6ĽýJźn‚‡â<o~ßÎ †‘u%Ž3­xÔ3üPĽíŔŚ1€{ćŢň ľ °5ŹP9fť “<ápă™”ćí:°Ű,`]‰A&ćĺ§\îÍM§ĐŢ W żžö’H*‡T›Ŕ=6o†‡‡r‹ ď—žié Kťă÷k] &Wôi˘Á$• ‰úBŔ±fţ`0\¸“—§îŹJđ»ʧlź:u `±Xz=ł§%Ŕqy(x„Ę1›č\ä 2V«®G«ßŻ‹2Ď;˘'ŃĂ›Ďđdkđևυyon: >7§jŽ@hőĘ,iR¤2˝7Ă/~ĺŔŃ&Öµpľ ~E&ÁqáA=uzĘvDę\ź ^ŘÉ$f8[WâđI^Fö±lGFş[7öe–-ϸ_——‡Ă_¬ăXď†ň“Mś É÷řGu6ŕ˙ŠŘatń†8ŚÔŮ€·öřŢKµYG%†uŮŽ2Ď;>Ź'‰éé¦FáˇÜâÂ+ú?ô3y™’’ŔťäémëPO—-ŁŃĐrMDDNƸZĄBNNN—ËĄ Qřđ$zÚl…«¶Ü{“2v$pI˛“T"Ś‹1P!wA. ˘N@é1§wÜŔÝEK–:‡ ž!1eŁ’Ń~p+ZîAyż¨.#´1bLR‰ ±ľ…’:Paqaß —O‚Üs* žţžGä3îDŰ­pžŘŹ:°®Ä‰meN\0FŚ N—m -G›”[|Wĺ{ ´Z-8›Í†’’dffúýZŤFă}l2™|ţ&""˘ľa’' /Đ#§Í‡ăř~8Ś…vk>âř á#2˛‰ł9“V0@“Ň ’GŁ˝r„F3ZîVt»Í¬oáb¸&S#¦Ě3Q‹¶˛BŔŢ„:°ó ;±UOx\+DB˘žqz0ůţ•í””ÄĹš®®®×IžłÇĺa’‡hîQ‚đ˝ů”$L‚óÄĎpۡĄ†A ő‹ň¨HÇNdĚT¶Ţ ¦$Ŕ¨dHfÜ g­Žc?ÁU[Π„şČhHF%C6q6Dňčá[¶ăµPÄkÝ?ۡŃ̲çIĽv@’;edd ¨¨ĄĄĄ°X,P©T~˝NĄRAˇPŔfłq†-""˘Ę0a|1'•CŞž©z„Ö&¸l pŐW10!F<*⑉Lě„Ŕ ±$^ pÖWAhm‚`k``B©®EʆH3,şeőęB!) Ҥ4ŽV¸š«!śŞĐfc`Bí<˘ XŇ233EEEÜcódeeůýÚÔÔT”––r†-""˘şvc†‘<y´ßÓlQß±žQXžG¤rwŮfů¦łh4¨ŐjÍfö*ÉŁÓéĽ-€¬V+”J%JD>233‘ @áç íD}•ły3~=e nLIR& ŮďÁٵ¨ß7a`±XzŐőĘ3Ť:¶ć!˘nŹ/żJMĹ4†‚ě§ÚZü᫯pÉ{ďaĹ®]řĄ®.(·łŔa›­Űó&“_.++c`‰Çl"rÍííX{ŕ®ű׿0˙ăŹńˇÁ»Ă4ŰW ŕÝŁG‘——×őýw!ŃđâérˇÓéŘ,ž(Ŕ$Ł’ˇ9o’ßŃ…RŮîxQ©THOOčőú^˝—VëĎŚ/ŹŮD4”^şňJ\’”Q‡çöž<‰'żüŻ[‡çôz”××ý÷`’'Č(•JhµZbÔÉ8Đ.Q iµZ(ă“!RÄ2IJMÔOžÖ<6› %%%~żÎÓČŘĂx,×DDH u:lČÉÁW·ßŽÇ/Ľę‘#˝ËívĽýÓO»iróóńIy9ڜΠüLňŤFĺË—C>ăNÎđB`Ë—/Çysî…4)ŤÁ –m˘~ĘČČ€Bˇ€^%yüí˛ĹrMDDrOŤßÍś‰Żożëłł±05ŐgůwǏ㱢"\şnţű»ďp¸±1¨¶źI""""ę7ĄRémÍŁ×ëaµZý»î0>śŮlf ‰((D"Ě;/]u•÷9•üLo›şÖV¬*-ĹŐ6ŕ®-[°µ˛í.×o7“˝őV||ó͸{ĘÄFFşŹ'‚€/M&<řé§˝~ý n“IśîhµZŤF –k"" )jk±±¬ ů°´¶ú,‹’Éôł¦©V#iůň.—3Éd¬V«÷âFhoe@ČSפ#Ć1IJM4€fÍš…Ť7Š‹‹‘››Űăkt:ŚFc·/ł\Q0ihmĹżË˱±¬ şËë‚řxÜ1y2nš4iP·‹I""""PJĄééé(--…^Ż÷+ÉÓ±µOYY™wĆ-""˘`á|m6cSY>;rv§Ógy”L†'MÂť“'#->~H¶‘I""""p(--…ÍfCII‰wÖ­ît|Ů`00ÉCDDAgöűďăX)wlµ3b€»gő“z.AđY6B&ĂMAŘj§+Lň 3"™˘5&%*züE•úYßbÔ•JĹ`PŘ•mÎ#:ťqqq¨««^Żď1ÉÓ1©Ă$ńMDéѢ˘NĎc«ť}ö=ŠĽ<<ů䓝–3ÉC4ĚŁ!źq'–ç¦0D&źq'nż* ÚŃL¨Rř•mĎ#(**‚Á`€Ĺb9ç ´FŁBˇ€ÍfÁ`č1)DÄc6 ´`oµÓŕ°ÍtÓµYĚ]HDDDD2wî\ďă’’’×W«ŐŔÁ—‰hPĄĹÇă/—_Žďďş ąüň ď–Ő¶ä 2:ť«V­Â* ˘[µj^.4ٞÖÎ`Ë6Q€¨T*ď¬Y………ČĘĘęńZČh4Âl6Ăjµz»„±\Q ĚźßI"""" (ĎtꋥDZvRSS˝ŹM&t:HDDJě:zź>Ś}558ÖŇ‚¦¶6ŔČŚ1SU*dŤŹ«““!O')v×""""˘€š5k–÷qqqń9×íÔáTęЬV+,K§ç- ¬V+Däöž<‰_mÚ„»·nĹÚPZS› v§v§µ6ö×ÖbCYúôS\őÁŘD]Ś™ä!"""˘€R*•HOOď”ęçâ—§¬¬ŚÁŁbµZńŇK/áťwŢé´ěí·ßĆK/˝ÄDO/ŁĐhÄ>†‚AQUnűä”74řýšŁ--¸oűvl©¬ ŠďŔîZDDDDp(--…ÍfCII 222ş]755fłFŁ‘ŁáIđ=zZ­¶ÓrApôčQĽôŇKXľ|ąwĽ©4{Ů&<4/ ·^®…"2?r.AŔÓ;wâ’¤$Ä+Cú=Ř’‡hqÖWÁVô/^Ě_H‰ĚVô^úŹß!??źÁ °+Ű˝=ŹdffBqú·§Y¶:ŽŮĂY¶(ŽŮžO[[¦OźŢízÓ§OG{{{ŔZôě-ŻĆo_.BʢwńÔ›»`0×sǰ{îąź|ň Z[[Ś0łćçźŃÔÖ†2V_=VĎ›‡ÜóĎÇÔřxÄĘĺH ‹‰É*¤¦âÍkŻĹşnŔ™ -ííxďŔ!˙LňŃ đ´ŢŃëőçĽÁĺ¸<J:&xn»í6ČĺňnוËĺ¸őÖ[–čŮ•w+ną,M¶6ĽňI)2–ĽŹyü}SŽv‡“;k8pĎ=÷®żţz¬\ą‡fP„g\ť'gÎÄ•ç ŕl—Ť‡Ç/ĽĐç=†“`÷îÝxúé§‘ťťŤ×_'Nś` BPK{;@Ő‡1uFŹŢiÖ)Ŕ…§ŐkGLňŃ ńtŮ2›Í]N3íáąxĺ´ÓŚzJđŘl6 ź6›Ígť@'z:z˙סrő}XůĐĺ61ßÂM˙•Ź´Ĺďáo›Dm“Ť;µ—4 žyćďßsćĚAcc#Ţ|óMÜtÓMxňÉ'ˇ×ë™H !##"5}¨‹ő§ÇhВɆü{0ÉCDDDDfîÜąŢÇĹĹĹÝ®ç™Fŕ¸<\zJđÄÄÄŔl6#//Ďçß±cÇčłî`&zâFĘńŰěiřfĺmŘóĘřÝüé°·;ńě»ĹĐŢ·÷ç}ĆťŰ/Ľđ¶oߎß˙ţ÷HMMĹÎť;ń裏âć›oĆš5kĐĐŔ^ÁîĽŔŽ>ŚłTTĺne:6*jČż§P'"""˘AŁR© V«a6›ˇ×ë‘““Óĺz›ˇs6H ţtŃš:u*¦Nťę÷{z=›6m čôęMIŽĂźeâą»/Ĺ˙}RŠ?˝˙=>ŘiŔŰOţŠ;ąbbb››‹ÜÜ\TTT`Ë–-Řľ};^~ůeĽţúëČĘĘÂóĎ?Ď@©«4üpň$VîŢŤ)*.IJňëu-Ľ´{7÷ ĚCŤ-yhPeffpwĹ:×éZ­§Q§ŕĐź1xz2-z Ě\Ź«őüŕZ<óN1ZŰť¸~ćxîä”’’‚Ç{ ůůůxüńÇ!‹±mŰ6&Ý=e ˘#"ĐÜŢŽŰóó±řÓOńoŁeuuhjkC»Ë‡Ë…ć¶6”××cKe%–}ńr6oF­Í†±wM™2äß-yhPÍš5 7nŕ€yѢE]®§Ńh`4a44RLđxşEOă);ţµ«ďţ‚ď~q <&N‰§o›űŻť MÂČ Ś}nn.Ş?˙Îoż ©2sřđaäççcűöí8yň$Äb1fĎžÍĘÄbĺrä]u~űŮgp v>Ü«®[˙ďŇK‘=äßI""""TJĄééé(--=çTę:ťEEE  ©ÁHđx*ŃsŢgřX_ [›"puş\79—N„TÜť;4 ˘T*4…@YiiiÁ§ź~Š‚‚ěŰ·ŹűďżóçĎÇ1cXˇ‚ÜŻ&LŔ{7Ü€§vî„©ąŮŻ×ÄÉĺX1kn9Ýú4ĐŽ8V_ʍ‚dggwZÎ$Ń0#• Ĺś§đ÷Ü(ŔsžÂŇ«’ ­d0(ěĘvĎ#(--…ÍfCII‰wÖ­łoî†ň= ŹHôjšńňżĎ$P?Ři€j¤ÍKĂo®›ŠIccąĂŘŠ+PTT»Ý‘H„‹/ľ ,Ŕ•W^ ©”·ÝˇdÖرř"7EUU(ŞŞÂţÚZoiAóééŃGČd=bÎŹ‹Ăj5®;ď<(qV­#vY,@~>“bÁß6˙Ť_ápşÜ7¬““đŔőS1˙˛I”I¸cȉ'°nÝ:ďß۶mCLL n˝őVĚź?ÉÉÉĂ66÷Üs.\k®ąfPëÍ@’ŠĹ¸fÂ\3aBčm;«gp‰ŹŹGvv6¶ďŻXÁ ;Q eggă»Ę&4FŽe0e›hddd@Ż×Ł´´V«µËÖ :ťz˝JĄQç]ÄrMĆfłÁb±`âĉCrŁ*—Ë‘€ĘĘJŘl6ż^sńc@€¸ý*¸n*¦ŽWqg ňňr¬Ył;vě€Ó餧§cÁ‚;w."""†}Ś8€çž{űŰß““ůóçcB&KBg× 2*• 999Mś ‘<š!  śś$¦] ɨdX¶‰†@VV–÷qwcóxşlŮl6ŚšÁrMz]ľ|ůrTVLa IDATVɬHŰ·oGee%ž|ňI¨Tţ%j2Rđę#WŁüťEřŰâ+ŕ €;î¸[·n…BˇŔm·Ý†>řo˝őćÍ›ÇĎikÖ¬AVVNť:…÷ß .ÄĂ?ŚÂÂB8(ŔŘ’‡†„FŁńvÇ*,,ôN­ŢQjjŞ÷ń©ęĂ@Ôů  j]ľ|9^zé%l۶ ×_ý |nÇOoƦúfĺmÜivţůçcÁ‚¸öÚkˇP(.L™2ýë_a2™°víZlŢĽ»wďĆîÝ»Ź›oľ7ß|3˘&yhČdff˘  fł‹ĄS‹…Ž7¸¶úăLňĐ ěDO_<áĘd2ˇÚbŔč ŘžµkײRô˘î<óĚ3ŘĽy3`Îś9řúëŻńć›oâťwŢÁěŮł±páB\zéĄA7ćÚŃ–l«¬Ä÷'N ˘ľµ­­°¶·C,A)•bôĐŽ…Ëƍõ&`TŤ=Ä$…4‡iś5F¸L€4’QÉNČ„xäh‡(€uM•™vë…4ˇ˝퇋᪯‚ĐR ‘<â‘î˛-’Ç0@Ä“ä€ââbääätZG«ŐÂh4âTuÄĽçĄ!şYŚD<ťmذFŁAŔÝ|=¤˝đ hllÄöíŰQPP€ť;wbçÎť7n,X€oĽ±±C;.­ÝéÄ_ľýë€Cş]§ŢnÇ/uuČŻ¨ŔsĹĹx8#K§O‡8Ę(“<˛Z÷®‡Đ`ępj‡łĆgŤ˛É×CštÔŐMMkś'öŁ ŕ fÍšĺwob]ëX×\ &ŘwŻf]ëăp1ô_FÁ•1:ťŽ ¦ca{+ě?~ˇĄşĂń±ÎÖF8ë«1y$ Zęe{ Î#*• jµfłz˝ľË$ŹN§Ńh„˝ń$Ř9‚†ęčDĎ@$x”7ľÚŻm°~˛”ˇ3gÎě÷{ěŮłgŘĆ/&&ąąąČÍÍEEE¶lŮ‚íŰ·ăĺ—_ĆëŻżŽ¬¬,<˙üóCsm xčÓO±Ódę]˝q8·gŽ45ᥫ®ň3ÉC!©ýĐ7ޛΎSŞz·ÜqT"[tÁek@{ĺ.äWşe’‡ü­kgźY×ü_ĺ.|S ŚRJ™ä 2m·z<žňě=ź8ěh;¸ňŘĹÉä V7e{ Ď#YYYX˝z5, L&S§ÜŽ;ë«8ř2 Ů1ŰźDŹÉdBqqq—Żżúę«‘Řéy¶ŕˇá(%%Ź=ö–.]Š÷ß«V­Â¶mۆ,Éó‘ŃčMđČ%ܢŐâęädLŠŤE˘R ąT €ÍáŔń–ęëQTU…‚Š ´»\ř—ÁÓ®3ÉC!Éqčź ó.×1˙€Éó,˘Ő5o˘çĐ7ś6źÁ˘áj> Wmą÷oOŮö)ă;ć ;ď2lddd`őęŐ€ÂÂB,Z´ČgyÇÁ—]LňĐë)Ńc2™`µZ; $ţÍ7ß ĽĽĽS’g <ݵÄń´đéi9ől8·Âh‡F~~>¶oߎ“'OB,cöěŮC¶=Ť€±#Fŕ˝nŔÄnşŽÉ""]\rRRđŰôtÜ˝u+Ş­Vlřĺ—€'yĆŚ…‘—u}ŤÂ$O)++ĂĘ•+ÓoçEL7çÝéxîj®f°čś/^ě>NČ„lâläuíěʧŐ-5 Ëvh•í–sź< Ěsťoh`)•J¤§§Ł´´´Ë©Ô•JĄ÷±óäA–kr=%zT*U§®‡ěô>lÁCĂMKK >ýôS`ßľ}€řřxÜ˙ý?ţθuŔbüqÖ¬n<]ŃĹĹáŮK/ĹăEEŘWřëâń´*’şčŢ 0ÉC!HpŘý\Ż•Á" şv®Vs‹B‹ßç;c5233QZZ ›Í†’’dddt˝_Z›, ýه nV¬X˘˘"ŘívD"\|ńĹX°`®ĽňJHĄCźšhjk\6n\Ż_{…Z h´ýµEŤBMÇÖM]uń´.G%2XD¬kŢ..ś…BŚč燎e›­iWFF ÷°ĘÝŤgâŢIÎ[c G®–j¸LťţQ`y=•••ضm›ßŻc‚'´m۶ wß}7®¸â deeá駟ƱcÇ|ÖY˛dIŻĘD8:qâňňň|â&—Ëq×]wáĂ?Ä?ţńdeeE‚b"#ň>lŹR&óyʎĖ<’Äń“ŕŞ-÷,óě›QÎřC4pu­ă đŮdš™ …ɨd˘!´TwtŮ{… iR5Č222 ×ëQZZ «ŐęÓMË'ˇŃ\V?ć­MZÝßÍ3±D»­Ë®çIÜH#}â)’ÉĎ ¨/‰„x¤{™H‘<šő,g·č‰‰9÷ŹC‘ŕ‰AÚNȤźeöv§űś/ćôäţذa^|ńEźç ±˙~¬[·Î;řľ}ű°{÷nŚ3Ó§OV1*//Çš5k°cÇ8ťîň•žžŽ `îÜąĘ힍Z› ććf¤ôr*÷ŁÍÍĐ«n^Â$…¤ÉóĐŞ_Ńé¦ôg·2¨gpę[˘Şk»ßNwŹčT×’ŇX×(DËöő°˙řA§óç™v[© ¬¬,čőz@III§k˝‰Žú* „’p®–j¶F§jŕ˛6tJč 9‡˝Ó¶8kŚçNśNřG&B$S@«ÖI Ž‰AśÜuKŔ˝{÷B,z žŘ‘¨o±Ă\Ű‚óĆřŰĚ5ͧ׉ŕAČ6l,\¸Ź<ňŞ««ńŘcŹáĉŘ´i|đAŔ}÷݇×^{ ›6mvIž; ** óćÍĂüůó1iҤ ßîkĎ;{NžÄGF#–_tQŻ^›_QwŢyCţ=äˇ$’É!źµm·ú´2€4˛ó.”- ®®]´uŤÂŽxähČ/^ű­:ŢÜJ#qÁ-ěŞ5„7Ęqqq¨««Caaa§$ŹwPě–ŕŰ'™Ó|.[Cź§Ź)@uVľD)4‰ť[[¨bĐi]›0ůŃ»ÍŇÔ6¸[kÚÚD0÷°ŮBk#„ÖĆ.U˘¨±Ź ‘<˘¨ÄaŃŤľc˘§ŰcĎ$x #%_”šń±ľOÜâ›pŘüŤűćô‚ ńAW‡ĹŐéAq‡š§[ÖâĹ‹…¨¨(,]ş+V¬ŔîÝ»˝IžěělĽöÚkŘż˙°;–źţůX°`®˝öZo7ÜPp÷”)řWYţYZŠóăâť’â×ë ŹÁ?JJŹ;&OňďÁ$…ôͧgÚfg}ÄŠţęJÄşFäŮ–Ç@~áÚ[áj©fŮ™™™(((€Ůl†ĹbJĄ:łĎN·´Zj 8Z!’ʇl;G+„–¸LpÖń»UNÇäŤ.Ůý}ű,~űŰßbܸq§Ó §Ó‰Ă‡cĺĘ•€3f »ÍśŮ˙1÷ěŮĂÂÖGLňO?Ţ'6T0D¶|ůrĽ\hFe­ťÁ –m˘ ’‘‘Ő«WpOMĽhŃ"oąţĄx ść˝NŐ Řç ŽV8Ź˙ Çń}]¶ŘQ'¸»)ťIę0ˇÓWžV?:Í™?e&Ŕ\ĄŹĐÚ‡éwÂ'*Ҥi$Mň.]DUUUXµj*++a6›±cÇď2—Ë…K.ąÄűwdd$î˝÷^-Dě»÷^DJĄŠDĹ!ű=ä!"""˘ŕJ(•HOOGii)JJJ|–‰GŽpOűÝRÝŻVBk¦Ýpß8|“˘é)2´"č4ÝĎ\EĂ“ôÉşĐÝҧ¤(1 (­83Đv»±í‡vAŻ…lâěa;U; ˝gź}<ç: …\p–,Y‚””á7DB¨¶Â‰ŽŚ ‹ř3ÉCDDDDA'33ĄĄĄ°Ůl(..öN§.•ŚöÓë¸L}JňŽV´Šŕ<á;µ±:ČLfM”r¶Ö Ją»{WćT¬­€ţ@‡>;ś'öĂyb?$cŇ KťĂ–=4čĘËËţóź1gÎďx/ŠŐ@¨-÷™ŽŰźó_ŰľŹ|^3k*“ÉîXˇŕĚ8>@~ńédŹĂŽvc!ś5DL»Ąß­zÂá­ĽńŐ~˝ŢúÉŇ űN&“ •uuŢ|Cíß˙ţ·ßë:ťNŘíöay˝˝mŰ6Ľ˙ţű8räd2fÎś‰ÇcÇŽő®łdÉäääŕúëŻçAn1ÉC4Ü.Ě›«ŃöăČŰ ,[¶ :ťŽA! űŢőذ8•ťÝí4ĐDˇZ¶ă<’••˝^§˙w–$n<\µîŮŻGkŹ7÷®–j´ŘâTYťÜv5[î„"U4°č:wëžw·»»qąL°żÓné×MŠë®»nŘĹčرc€Ĺ‹#** QQQXşt)V¬XÝ»w{“<ŮŮŮxíµ×°˙ţţľ"qr9ZÚŰŃîrÁ%ýĽ+Ěť4 Iż˙}—Ë™ä 2V« ´·2 Dä©kŇă bŮ& Rž$XkÍŤᄄŽQCh4ĂY_…î†>uÖ˝cđdĎrĎÚDaXF¦6;°ń ÷tëŽăű™ŕ¦A'‹‘€„„dffâąçžĂ˙řG¸\.Ě›7oXĹ"..ŐŐŐ>cĄ§§Nž<éłÔÖÖĹvűŰťJ4µµá Ĺ‚——cSY¦ŹŤż]}5FFD }Ydu$"""˘`ĄR© Őş[ę:̬#éní,4š»}­ăŘO˛.d,ĂYÖ…îý Ón„†Ü˝÷Ţ X»ví°űîžq‹ŚFŁ÷9Ď8çI…ú ¤—Ž‹%řĘlƇ§[S%&y(¨eddx»ZŞÝ±QŁ˝Ď §Î}S/â0<ĂBěČ3Ź]§čˇˇe·ŰAĐ}g°%%%áí·ß†ĂáŔłĎ> “ɉDŔ=­ĽÓéDEEV®\ 1cFČç[N·8ÝܡőŇPá=’Á ˛1IâLĚŠčnL¨’ó•í»q1Ť†Šuj¸ůÉÍë µ— j%Éľ7B$@ĚăçlTeŔĘÍFÂm—kžłévHĄRDEEáŮgźEHH/^ ˝^ďrS¨wëÖ %%%Jđčőzh4‡űňŮh4B­×ăüŐ«ŘQT„wŹô˝>-Ľ-U8[_´2Č3“1Pݰ|sÇ Źb,ťĹĎŔęFÔkx-cBö!-- oľů&>ůä—Kň””Č6lX‡ßÓV ©;%âý÷oűw<اĎ]˙ś]ËÎČĺr,\¸ 3ŕÖ…_5ŮŇÂ… 1fÜ»Ç1IJMä ĺŢ]éq¦.6`¨RµúŢ{©§)Ńłr3°á{ NÍ:˛:µé8®Ül:®ROÓq&˛UU¦ßÔś——‚‚‚îź'-2DGßőý`K;#“É P( ú™‡†ČÖ ĽJĽ Th bŮ&rr-Ž m~6ڵĺ0Şk ň‡®ş¤ÍÁ—űGŔň @˝Č> äś4bú˝†qś‡ł˙$°5¨¬1˝–z‹¦u ë ăCw—V«EII ţçţâr1h­UŽÁ`@ee%”J%Ö®]‹ÂÂB<űěł7nśC~NÉdčíë‹űŁ˘j'H3“@DDDDC m~¶éaŞř D]{š×éŻCäßň7Âň`ŕO~0% ę5Vď0% Ň’‘€LÂřÚ«:5đËËä Ś4uÁ“IL/ŮZg§PôŃG´ëÜÜÜ„   $''ăµ×^ĂK/˝Á`]ÚÎ>ő”SÄ™I""""r‚Ä‚oŚŐ%0Tś¸÷ó:CIŔ”Ä™;ÖXťiÄĺ«*k€Ő;©§ÉýM-{88łýP•™’r9'Măî4ęÚĹąă(ääärrrPŞT€#uBŽ‹‹ĂăŹ?ŽáÇó ¶bÎś9زe >űě3—·Č–ä!‡¦żR ýÓÔ©ŕć÷°Db~GÄşFÔ˛]®„ľ\yŁlEĂ=d˶˝ßŔvʶşFu5 WË xÁX[CmY‡ŢŻKźsČ9a„ňĽ€zŤ€ě٦®\>ŔŘD >Ęô3ÝY•5¦•srŤ()oLâţ 2›äţLśś(•JČŤFÄ w˙¸·7…zii)˛˛˛°qăFś={–Iž6ôčŃPTTÄ`XóÉŁjČφľäĹ2C• :ŐaxÄNió›"""đňË/C&“aĘ”) NKuNc;ĐĂĂÁ°"&yČ!éT‡-:ŤFŁéV@ť 'ľ‚dŘ<~ŰĘŤˇJ…ü* ®®Žˇ6éË•ćşÖXĎšŐµ!s!H|¬ŞTPU•b ;Ł-Úg‘ŕiz1Ö–ˇát&$Cć2Pm”í»yÜ%… €ţb.ôJŁÇB1Đi`T×@t®ůŤBnú7m °˙“ ”\Çą˛ć>€)ŃŁúČ™ôąUuj ¤ČWyŦOK‚€äŔ°~€L"Ü‘rÍs6YKJJ ^~ůe¬]»Öĺ’<ť·Ď$ÖĹ$Ź˝]ôęęPRRCU)ď îLR´vs~óMąťÚ‚]đaßNj]~~>®•—è‘uúŔU4śŢn®kBKM¤uh‹ö±®±l;ŁşÚ"y)Błňm¬-Nuîň$ĚN˵(8Ú"±ÓHĄîˇnéoČ$ŔŘA¦Ť]†ňŠŤřĺĚŤň‘_b™” 2B!ĚIŢÜ\eŤilť|§jÚ«ąčF$ôŘUާPoź——$ ¤R)Ń Dőč®Ó§·¸žI;ŁR©°bĹ €G vháę%@gjÚ×ŇCgă »ˇľšÁ˘6-_ľÜt"ŚH¶¸“n<7Öµ¶ëdŲí`בeVhc|‡ŽŽďBw§\‹Ł!¸{¨ÓŔpő⍟Ż·äi*Ŕ§1á# NmJěä‘_"[ů@IąéucKŔ”ě đ1"ĐĎ”ü đqŤ„EeŤé_ľĘÔZGU¨Ę-LnÚ 0µÖéf„"\¸ž$ăX;äŘnžB= ŔĺbĐ޸EçĎźGff&vî܉ůóçăľűîcÁé_AR)BŠ×3ÉCŽ÷ŕŮ䡳ĄÖŤŻŤĽ9'ş˝á&J¬%r˛ÝÎő_8·ţĐ—…ˇ˘‚Ow ¦†ÚKV˙;2Éőqy˘L÷uj@UŢŘÝČ4psS¦–>Í“M“?€©Ëŕ8I Ć$TV›~Î+ę4mµÎ±\ÝăzBçzą–¶!˛gťíŠ4yňd­ ©TЍ¨(<űěł ÁâĹ‹ˇ×ë9»–1ÉCŽwCçݱyMoÎJdËşfîćÂşFŽV¶ýäÍĘqÓ׍ؚÖnd»ÇA_bjB#¸{ÂŔX[ŁNmÓ.ď2ÉŤq|Ň’Mĺ'Oeǧ˘ĘUPR! ţ¦Ć­%šjçG&iy*÷>ň¶ßw3UPßĐţgĘo2fuE••5¦ýlšŘi㮫ůś'hJhÉMűgú–ž>€¦†úËć冫ewś_¬tF5€kőő¸šźŹ>}ú4[Ď$9ĹM:ؤľěÁđH÷v‡\Îń&uÍ–<f`J|F÷`0ě” –°lßbٶ‡ëzßJ5WMÇR`ëâ9›Č´7…zii)˛˛˛°qăFś={–IžNúŔŢóçĺË‘‘‘Ńl=“븉±ŔçyyxÔhd«);w@îůóđXľ‹-j~.c\ě†RâqďHK‹d0lLÜ{†Ź Et7ANW¶yą}ž"|đĺŻm&yţoÝ)Ĺť{ČűÝďżÁ·Od€yÎľă:’ŕ7nŞ««;÷‹‚l¶ĎĹĹĹřµ¤Ä*żëđáĂVŰ/5€b­ÖęUßS¸fÇĹŠ‰ž¤¤$»‰Ł3Şp¶ľČĎoq=“Đ4č‘ň~Ů<ó^Ţ–˘®^‡ă[¦âŻçŕ§_Ę`0‘8 ˙\śŚ8E %™q­Ö]ľ|]»vuýµF˘§3 ­ŞŞ*Ľůć›řî»ď ‘Hđ‡?ü…ć6qŕe""""˘»Ŕ`0âቑČŢËë,ÖU_mŔ–]çđHZ´Ĺň­»Îá/+âíîAůţŮřŕo#±äꇰëŔyx{‰Íc÷óž˛Hđ´ő>đ»AÓ`ŔłۇY÷GăÂŢGqĺĐ\Ěţďď1ylJ÷<Šó?>‚Éc#0ďĺ=ß}÷âââ°víZ«wOsElÉCDDDDt—xÉÄ™…Śő§ń׉ćĺ«6ĺáˇq˝áí%¶ŘţťŐ'đÎ_†áw#ÂŁ‡tÇ‹†bĺÚSHą§G«§˝÷ ‚€+դގ~×ËüľÓgŞđč¤(Ȥ¦Ç†?ΉĹçÄňŔ&xšjhhŔľ}ű°cÇěÝ»šë‹D"‡ü<¶ěşU[[‹+V`Ë–-‹Ĺxć™g0{öl‡Ť•˝a’‡č.zć‘ţ3k+ţ2/b±ŚF#V®=…­+ďk¶íŃSřÍ0ËdNĘ=ÝńâŠmţŤŽľoÜHył×‹Ţ<€{ă·ĂĂ őć#Lđ€^ŻÇáÇ‘••…]»vˇ¶¶ÖôíîŽaÆ!%%cĆŚqŘĎws˘Ç:„W_}/^Dtt4^{í5DGGłBY“<äĐŚęjčĘ•0^- ő…ŕ ÷ ž$¬^×´jč.ćިk_¸‡``Č9®#ra¬Ż† ő…›ź"˙p†î¨č_Ä*şbă·…11 ;ö¨Đ;Ě}zů5۶¶N‹€ˇź6[îéŃö7ŕ}_PW©ĹëŐoŚÁ˛ŹÁü‚'^ú÷Ťă_/Gd¸ś‹[µj***đÄO¸\‚'77YYYřöŰoQYY đđđŔ¨QŁ’’‚QŁFÁÇÇ9ęȸ‰±®şź_¸€Á·ń{Ôj5Ţ{ď=lذnnnxě±ÇđÔSOA,ł2Y“<ä°ôĺJ4śŢč,`ÔyĂ3n ‰/ÔRÜ®ŁáŘ:ĚŰ,\¸ …‚Aˇ[«kŞĂđ·.ݤVÔďz Ëv©©©HKKc@쌶htEűš-ECÜw<1Ç•h«ló:b]Ď>:/.?‰ŁĂń꿎⯠µ¸ťoe? oŹNýţ[}źLꎗźIÄËĎ$˘PUŞš<’ľ 6Lć9ŰĹMš4 Ë–-Ă÷ßďăÎÜŞÉ“'›gă’Éd;v,RRR0bÄxyy9Ýç=yň$Î_¸€T¦îľ'NśŔ+ŻĽ‚sçÎ!<<Żľú*bcŮíÓV8đ29n˘âÄWć‡Î¦ÓíkË 9şF­š"˛q]k8ń5ë9$ťęp‹ ŕzbóÄW ÝQăGÉQ§Öá…e?árµ¦Y·©Fc°ďčĹN˙ţ[}_S˝ĺ>čĺ‡\ĺe0‚\.Gzz: ‘™™é2껤¤2™ O<ń˛˛˛đć›oâľűîsÚĎŽ; îŢŻŐjńď˙O<ńŠ‹‹1mÚ4|ńĹLđŘ“fĎžŤU«V!00˙ţ÷żń /pö5+ŕ•¨(ddd´¸žÝµěLEE¶nÝjzň HÂţÎ-ÝśŁŤŃh„ Đ_)†¸×pŚZŐX×Ü#’9GKuI«6×µ›6A`]cŮvX†újsë´Ö¤Ŕpµ eĽX®ďśß?¨Ŕ[ţ‚Çl˝ ܸQrücá,řŰ>äUĂKęŽß$÷Ŕż–Ü8üŹŃôt´:ĘöĎîđűZň΋Ăđ‡×sđßËBěI!Xµt4 $™5&z–-[†ĚĚL§ďşµyófüňË/řöŰo±sçNdee!++ ‰ÉÉÉ;v¬ĂwÝşÝĎG}„>ř:ť©©©HOO‡·7mżSä!ÇĽ9o|˝ţyóĂ'p=DD·^×jË:VתT 9VŮn§Ě6–÷¦u€ČŚyOYĽö÷őÄĄśYín7}B$¦Olő÷ţnDÎďy´ŮňöŢwó߀ÄAŘ·ö~,j“+%zA@||<âăă±hŃ"ĚJuä!‡ă&őmöŮâ /Ý^]óîаŕ'g°Č±Ęv;e¶ńÚŇ^ "˘\­E`Jb 6 Æ FŁÁŢ˝{‘••…}űöaĎž=ŘłgD"~úé'‡ř<ÖHđ4Ą×ëQ]Í/Ţď4&yČá_@â¨kšµ.húš]nł®‰%ćşvsýb]#Gć&őÜ=-oéK·.LňuFG=Ë—/·»ýŽTµ0>[gxzzběر;v,jkkńý÷ßcÇŽ·Ý*ĹńzňÉ'›µ.˛f‚‡-qî.&yČ!yÄL@ñu­vÄîa‰ ‘ ęšĹ1ë9 Aâ qŻáĐ*w™Ż7'zÜŁŕĘŮ?:«#‰ž… BˇPtč÷-[¶ JĄrŁłÚhĹ»>łâďňööFZZŇŇŇpůň­ÍF×Ŕă¬=śóŐŐŐIk·ŕˇ»‹łk‘Ců‡Ă#vŠé›Ř›oŢ˝!ô°©Y˝®5> ŢÁđŚťÂşFÉ]ž÷&†ßśŕń™Ŕ Ý"kÎş5}út‡­<ëׯÇ;=Ng±Ľ¶¶JĄJĄ7nÄ‚ 0kÖ,V–N8`oe%°uk‹Iv×"""""""›°f×-{°~ýzŔÔ©SńĂ?`Æ  ÁĄK—đĺ—_š·{ě±Ç`4-–Ů ['x>Ś·ß~:ť'Nć~ďż˙?ýô~řá|řᇸ˙ţűˇ×ëńĎţdE±"&yěL`` RSSá‘ 7©BdC©©©ę7 něšA,ŰD,×Dd3M=Ž®´´0oŢŚß­Ş¬1MžŻ2%ušvĹjÚjÇ-0˘Đů‡3hdöîÝ OOOvĐuĚ^u¦ĺRc«źC‡áđáĂxúé§qöěY± ~zJ$đ·aF(ÂM‰&}Ú¸Ç-3 –¬şdD~‰€Ęš¦k-ÇÚq چ(8˘ŔhŽěŽDÂ&}¶ ‚ą‹–··7҆8IaaMOoq=“ôWŠa¬.1oSYsŁk`ęŢ% 6uíjLü¸Rň§˛ćF+ťĘjÓ˙ŞňĆîW€E¬Ć%ľaů‡C܇_p’CČĚĚÄ_|sçÎA,#)) üăŃ˝{wó6óçĎGZZĆŹĎ€µˇi«OOONˇnLň89Łşúr%tŽĂX[΀8đ ş{hDAŃ$üšĐ.ëšN CEôĺJčŻ3Áă¨uMâ QP”é[dv 0—mý…“0TC_®d@ř:"ň‡»|0Ż#·ŔÍ;nŢÁ_­żR ăµrč/ź±JenéSŻ_rczď¦ú„>Fú  2%¤¦n_ަN ”T’8Ť ť:uËźűz Ľń“»'?9D]{ÂÍOΤ9śőë×ăí·ß¶X–ťťŤÜÜ\¬YłĆ<;ÔńăÇqčĐ!„„„ !!Ł;†I'ľ)×ćď‚ţb.á Çł¶Ze6´Ęl{Ź€(lÓ´8pú‹ąĐîQ]Í`8z]SWC§:¨ŽŔÍO÷^Ă]6ŮcÔ©ˇ/9 mń!&-ťä:˘«-‡Nu˘÷ÁdĎmů‡ţáćé» µe0T©`¨»ĂŐ2ŕZą9ńÓČ”i} ʰ #dždËÄOcB¨)kµj))S§JĘoü¬*»ţłććqs:Fđ ›Ônţáů‡łÜ‘Ă[ż~=`ęÔ©X°`ĘĘĘđ‡?ü/^Ä—_~‰'ź|đŘcŹaĺĘ•řňË/™äˇ;ŠIg|ŕ¬P˘áÔv‹›r___„††"00r0ĄĄĄ¨¬¬Duµ) -Ü mń!xÄNaK;xÖ]kŃJN*•"((Ý»w‡»;O±Ž¤¦¦eee¸xń˘éˇ­J…†cëŕ.O„8z¬KĹÂP[††ă_Y$.ĄR)BCC-š˘“ă”ísçΙŻ#ú‹ąĐ_Ě…8fÇl˛’Ć–>7_# WË`ĽVŢfň§QÓJă@ĎöĄőŹŕ|ź~ú)Ţzë-óut:ťy;ĄŇ4YËYŰqąçĎĂcůr,Z´¨Ůz7†Č :Ő57ćLđ¸Ć úČ‘#Í”†_Á¨Swč˝nŢÁ$ÎDzz:ärNĎŢY Çż27ŁŹ‹‹c‚ÇÉ5¶Šl<§ę/ćB_Ńń¦$‰31ăńg‘śśěe»1yÉŹk”í¦×íéí0Ô–uŞló:BÎĆ‘ÎŮt÷„††âăŹ?†N§ĂâĹ‹ˇR© ‰L÷ z=ôz=Îś9+V[{“ŐU8[_Źüüü׳%ŹĐ1g‚Ç5řúú"66GŹtčŠCÜ{cCú+¦Ö‚ČČHĹEŚ9ŮŮŮ¨ŻŻ‡6W»Ý+Ť®äy|©¸¸8&x\Dc˘g×®]¦ű‰ül§ď’HDt»Š‹‹‘‘‘ÂÂB””” ++ËĽÎ`0`čСćמžž3gŽËĆęÂ… ظq#:•J…k×®A&“!44ĆC=„°°0*+cK;#—˱páBx$Ě€[—öűCŐ5ćiŇCBBxcîbzöěinŞ;›Łş†Aé„… "bĚ,¸wŹëŘpŃ>óŃ­Ě@Žý0sýĽ[mJ®;IŮ6ęÔćĎ#•J™Ľt1ľľľčÝ»·éá¤JýmŽóbOçl""[XĽx1˛˛˛PRRŇę6R©C† AFF†Ë^Wwî܉‡z«WŻĆÉ“'QSS˝^Ź«WŻ"??kÖ¬Á´iÓ°mŰ6*+cK;#“É P( úąc‡F[|Đüsă¨îäZhţV_ˇ„{›„v”Bˇ€W‰„ MűÂęs+žČČHsr={öÄéÓ§Q__}…Ň®[Îu¦lëË Ě­A٤Ü5őë×*• Z­ú ąv;ÖKgĘ5‘­^ýu¤¤¤đž°•-Y˛Z­)))9s&z÷”Ŕ7ß|m۶áý÷ßÇßţö7DDD±¦ŰÇ–<®ńˇÓ××—ş(___s=]éqÄFšŽĂÂqx\Wt´©›–±¶ÜiZÎ5ÎĐ#‹ŮÔE‰ĹbólśťsŠČuëÖ ŕiĂçź­V‹xo˝őâăăáăăcĂÇŹ>ú(ôz=>˙üsÍŠäq`FuŤyX>tş¶Ć.[Ťĺ¬Ow)Ş®®ńAŘ™†?GÓĎF®'00đúÉNÓ©‰l-''ŮJ%ěĺ«ĚŻżţ‡îP‚găƍظqŁËłĆiĐyä‘6·»˙ţűÇŽcA·"&yˇľĘüsÓĚ(ąžîÝ»ßx`łăńűIŘÔ=€›»¶¦ >cC˝s|¨ë]µĽtm~~~7ʶVÍ€‘ÝČÉÉÁwůůřĹht¸}_şt)–.]ęrǬ˛˛˛Ů3JK[EUWWł [Çä!r1†Ú2h󳱼DŠiÓ¦qúŰjl%ĹaňőőEuuu‡şkiŽ®ĹúB¤Śa—Sň6m±anÉA.[®ÍĺâJq»ăňhŽ®ĺu„śŽ˝źłÉ>;v +V¬@aa!4šŽŤ6cĆ ÄÄÄ oßľ>}şó'ÜݡŐjQ]]ÝćýEEE6X°6¶äqä‡Îk7şćp×ćĺĺŐńrŁUĂPĄB~~>ęęę<˘[¸q,[S¶ĆPĄ‚ęěó7Zvwa‹ şEĽŽł–k{>g“}xĺ•Wpúôé'xÓ@Ä[·nĹŰożí1ŠüđĂmn×8ý<]¶ň˝*C`_*++±˙~čÎ^(dIëYM§é*@·Ť­KnͶmŰP^T˝8Ông“!bŮ&bą&"űŃŘú$##ńńń‰D­n›””ŕĆ5®â·żý-Nź>Ť÷Ţ{Ý»woÖ2®´´™™™řŕ B»c÷Ą˘zô@×VZ…±%Źž4¶nÝ máŢ}KLD·nëÖ­(ËÝmž]e›ĺš¨-Ť­T E› W6}út 0×®]òeËš­ź4iV®\ ŁŃôôt$&&2hťŕ B*…Bˇhq=[ňuŔš5k:Ľí‡~č’1ňôôÄ˙ýß˙áÝwß…···yą››ĽĽĽĐŁG 4<đ€9iFÖĂ$Q'᫯ľÂ‘#GpţüyÔ××C*•"44 2e âăă]6>‰/Ľđ‚Ų˛ŕÜLňuĐúőëńÎ;ď@§ÓY,Ż­­…R©„R©ÄĆŤ±`ÁĚš5‹Ł;ŠI""""""˘8|ř°y–¬‰'bĘ”)ŚŚ„L&C}}˝y&­Í›7ăź˙ü' † ÂŔµBŻ×CŁŃp"+b’‡ěRrr2ÂH•J»ŘźĆ1yćÎť‹ X¬óööF||<âăăáëë‹O?ýëÖ­są$Oă¬bťáj3Ůg×"""""""»”śśŚßöé8;Ůź'N&OžÜćv“&Mäććň ¶ÂËË AAAg0:ˇŔŮúzäçç·¸ž-y\ŚČ?Ň”đ?Ó# "“¦Ľ€gĆ„"ş› “ó•m^GçlrEWŻ^¶ą]HH ¦¦ĆĺbÔZ«Á€ĘĘJ(•J¬]»………xöŮg1nÜ8¬NřŔŢóçĺË‘‘‘Ńl=[ňu€ŻŻ/ ¬¬¬Íí***Z«‰››‚‚‚śśŚ÷Ţ{C‡ĹK/˝„íŰ·38ÖŚ3C`_‘šš ÷d¸Iý"JMMEPżQpógQbŮ&bą&"j_\ś©ăX{‰‰ďľűĐ·o_­sćĚ|öŮg †±»–ť @ZZ˛ëÎ0D6–––†˘ěÔVh bŮ&bą&"j×Ě™3±{÷n|ôŃG¸|ů2RSSŃ»woHĄR¨Őjś={™™™řňË/>ř ÖŠ=zŠŠŠ +b’‡¨ „ôôt¬X±›6m¦M›ZÝvöěŮHIIaĐZˇŃ’öžžž †1ÉCDDDDDDvIĄRˇ¬˛zÝědź¦OźŽÄÄDlÚ´ ÇŽCii)Ôj5<==Ń­[7ÄĆĆâţűďÇŔyŰŕĺĺĹ©Óm€I""""""˛Këׯ‡R©„ÜhÄ,A°›ýŠŠŠÂ /ĽŔDv‡/ąŁşşł9ضm*++"ŇťÍÁţ˛——Ç`Ó•m^Gçl""űĂ–űě3î(&yčŽĘÎÎ†ŹŹ˛˛˛îę~,^Ľ¸Ů2‰D‚Ź?ţŘ.ЉX×Č}ůĺ—ČČČŔĉ±zőj|ńĹxńĹŃ­[7¤§§#??ß®ö·Ąň|«Xś[Ź=EEE ÝQLňĐc4±mŰ6,\¸Ű·o‡Á`¸kűŇŇ7Ä‚ ࡇÂűďżĎE¬k¬kdcůůůضm–-[†{îą2™ ŢŢŢP(6mzč!ěßżß®öął­‹ÚÂz@DÔ1rą˝şvE7ŰoŤFđôôäA¤;ŠcňĐsäČtëÖ ˝zőBĎž=qŕŔ$''7ŰnďŢ˝Xµj***ŤgžyĎ?˙<6nÜ‘HŘąs'6mÚ„ŇŇRbĆŚřÍo~­V‹9sć`éŇĄČČČŔŻżţ ‘H„ŕé§źF—.]đŔ05…5j^|ńE¦î-#GŽÄž={pđŕA 2„ŤX×Z©kÚ¬oŹ=öy&Ö5jÉ–-[đĐCÁÇǧĹőS¦Li¶l۶mŘ´iĘËËŚ9sć`Ô¨Qţ›m•g8pŕÖ¬YââbxyyaĐ AxüńÇ!‘HZ˝v´·_ ;w.ţ÷˙ďľű.Nś8ŤF˝^Ďz@DÔÓ§OÇ©5ß|ăPűíĺĺũӉIrn_ý5¦Nťjľy˙ěłĎš=xîŮłź}öţüç?ŁGŹ8tčŢxă hµZóCç°zőj<˙üóčßż?ňóó±lŮ2">>uuuXľ|9yäĽôŇKhhhŔęŐ«±rĺJüőŻEff&ĆŹŹĚĚL‹żÝŘÚaţüůXĽx1bbbĐĄK§;nR?¸G$cÜ€® dÁd]»ĺş&‹[­o«V­růş¦zŃ}úôaÁĽInn.fÎśŮáíwďŢŤ-[¶ŕĎţ3ÂĂĂqáÂĽýöŰ‹Ĺ6lX»ďoŻ<?~ďż˙>-Z…Bk×®!++ oĽńŢ|óÍVËs{ű%‹ˇŐj±rĺJŚ;ţóźa0ĚI,G­ĽŽĎŮDDwÇŹöóć!##ŁůóCDwBqq1ŞŞŞ0`Ôjuł>Ş_ý5žzę)DGGC&“aôčŃ8q˘Ew“Ćm!‘H‡Ç{ ß\Ďîk4<účŁ:t(d2üüü0{ölś:uŞCűęçç‡3f`ĺĘ•Ny,‰Ä˝G -- ,ś¬k·\×n·ľ9{]qďž2 …‚…ó&—/_FPPP‡·˙ć›ođä“O˘Oź>H$čŐ«üqlܸ±CďoŻ_çvŤ1îîîصk§?$Ö5Ö5˛˘qăĆaăĆŤ(++kqýţóĽűî»ćב‘‘8yň¤Ĺ6§NťBddd‡ţ^{ĺ9** GŽéôç¸Ýýb= """kb’‡ljďŢ˝đ÷÷GXXX‹ë»wďŽŔŔ@ó8ăÇŹÇŞU«pîÜ9Ô××ăÇÄ÷ßońžÉ“'cĺĘ•8~ü84 *++±zőjüőŻíđ~B©TB­V·»íüůó±víZ‹i‰X×X×čöÄÄÄ`Ě1řÓźţ„ü×®]Z­Ć™3g°víZ|ůĺ—3fŚyű´´4|đÁČËËFŁR©ÄG}„ÔÔÔý˝öĘó<€Ź?ţ?˙ü3t:Ş««±qăF,Z´¨Íň|»űĹz@DÔ¶őë×ăýýűńm'Ćq#reě®E6őő×_·Ú˛ Ń˝÷ދ͛7cĚ18q"AŔâĹ‹QWW‡ţýűăĹ_Äď˙{óöIII3gV®\‰’’H$ÄÇÇcţüůŢŻçž{Żľú*t:Ö­[×ć¶ľľľ;w.ţţ÷żó€ëëYŃüůó1`Ŕdffâ˝÷ŢFŁA`` đÎ;ď {÷îćm‡ŽŞŞ*Ľńƨ¨¨@pp0¦NťŠ#FtčoµWžăââđôÓO###çĎź‡··7âăăńâ‹/¶YžowżXÚ¦R©Pxů2´ Q‡0ÉC6őÎ;ﴻͤI“0iŇ$óë &`„ ć×µµµÍz=z4FŹÝâďËĚĚlwybb">˙üó˝Żń&ľ­őŽDĄ ÇÖaŢ.`áÂ…ś&”uí–ëZGę›+×5¨ßő–íRSS‘––Ć‚zAÚ-cMMś8'NěPYl©µ÷·Fމ‘#G¶şľĄňÜŢ~µ¶/Ž^ęw˝Ĺë9žł‰Č°»ŮLßľ}áăăÓ©e9990Ť2dyY^^ärąĹ2Ŕ4#Ęí,‹ŤŤµx mi™ŹŹ† ŇěÁ—ČžôčŃŁŮ4ćí-ËÉÉACCŲĽĽ<„……ˇGŹčŃŁGł÷ŢĽ,66Öć˨ăä±3•••ضmtgs`T×8ôgĺćbPďŢćäJll,şîÚ…„®]-—ee!!4>>>ČÎÎĆ·ß~‹Ë袢"Ľ˙ţűx饗 |ú){ő‚ŹŹÄb1 ăG!1:Úb>řIýúY,3¬\‰¤¸¸Ë’’ŕőÉ'ŚčęjËe?ý„čÚZ‹eŢűöˇŹFcąÝîÝčŁÓY,óĘÎF_77&zX¶‰Z´|ůr}˙)tr ""rYŁĽ…ŚŚŚ×3Écg***°uëVh ÷ÂP_ĺП夻;Ęţő/Äűř !!˛őëń«‡ĘWŻFB@ ýôSśňőEŮ»ď"ˇ{w¬[·»wăž·ŢBD\víŘO?ýăóňđKh(.˝öŁ˘0xđ`Ô˝öŽ÷ę…‹/ľ¤Ó˛%Kđ‹B Ď=‡¤¸8 <×ţügś8Ąóć!iĐ Ó˛?ý ż$&˘öůç1xđ` <µ â—ÄD\űÓźLËPůÜs8‹ků =NhëÖ­(ËÝ ?WI$8uŕĽvěŔ!C}íжo‡ňâExgg›–UUáě®]Č?wŢ?ţ/ľřĆ‹‘8kz?ü0ľxç¬Zµ bcqöřq?ŽH• ±±±,*ÂŮü|:„¨‹1dČtŮ·Ę‚ţř#˘*+1dČx˙đň‹‹QôÝw®©Á!CŕőÝwČ/+Cá–-čS_oZ¶};ňjjPôź˙@ˇÓ™–mŮeC V݆‚‰–m˘äç磮üŚ~DDDdK“‡lęÔč·nd:”ŕr` .Ź~«VA¦×C9hŞüüPuß}Ŕ?˙ O,Z„G1ß|Ź˙üżŽµŻ/NÝ?đ׿B,á×1c ńöĆé©S?ý ž8ý»ßA#‘ŕôŚŔ‚đđôÄŻiiи»ă×Ył€Ç‡‡TŠÓ>­ŃÓŹ<‚?üśzřač śž<ýŇÓqMŻÇ©™3ˇŐjqúwżCĚ_ţ‚žĎ=‡‚‚X˛;˝zEEčąlŠ<ôiýőĚśšň˙ěÝ{x”őť˙˙×ä<$ ’ ŠŚš@°(Čࡋ(n%u7˝hěAÔ]k­[ş]@űŐÖ¶nŐ­Úţ­‹őµ*ز­Á­éŞ«BĂAA ČaČäśůýg$ä4“Lf’Éóq]\Ŕ}ßźűđľď™űľßó9üŁZËʤ*ŔaxŞ˝äŐOź®Ö¶¶Ď§Mź®úË.ë:í˛ËTŐeÚ±éÓUť•ŐĺszüňËUsÁg÷řŚ:yAŮăW^ŮmÚ±«ŻÖ‰ §Í›×mÚŃkŻUĺ…Ë-\¨ŐŐťź? ř$†žÁ Ććć®/kC—Ź$)"˘KB&XÓş-ÓŰ´ÖV<öźµÖöönź5_§őř9íaÚ`¶áÓ4©3éC‚đ o¬a€š<€a)//OŐCöíŰ ŕ’<Ŕ(cŽ“!!MÓ“Ť2™LĘĎ[BšRÇÇČüYgň@8]ŰÜGŔw6‚Áb±h¬Ů,'ˇ$IÇ$ť8}Zc7oÖâĹ‹»Í'ÉŚ2c“7űëZ™7Ť`C,nö×uű/Rz /Âżk›űř΀ŕ;&i«Ă!ĺç÷äˇO€0@MžaĆb±hůňĺzú˙N(b\2†Đňĺ˵iwŤNµŚVűUSSŁ“'OŞľľ^µµµś¨HJJŇu×]ǵ p]ÖHň 3&“IV«U‘{85ŔPłZ­S1F†Úća±?555:x𠉝!P[[«úúz%$$pm\ׄ-2 0 ;vL»wďî2-qüXYRͲL¦Črś>§m—I’ZZZÂI±čŕÁŢ˙/¸6S9‹®’ÉKp©ôđ o’Ś<………:Q^®XI3 Đ/’<BÇŽó&xŚq1şëö/jVÖ%@ťIžňňrYÜnÍ4ĐF×F™ŽsŐjŢýšVŻ^-»ÝN@B¨µµU{÷îőţĹý9$xÂLóî×´áŧUXXH0v×6÷đť Ă5y€QĆÝÚ¤Ž3v•ť‘\. ˇC‡©­­M’´ř¦/Đ÷Nę8c—ýŚäČşś` ě®mî#ŕ;†’<ĂŚËĺREE…:ÎśaěD˘â 0DĘĘĘÔPS#włI†¸ř o˙řńă’¤ôK')gŃUś„͵ p]04˛%MOMUb^^ŹóIň 3v»]żüĺ/%I1WŢ®Č S 0DVŻ^ÝůEx‰MŃ—^Ôm×××{·N›ĚÉ@Ř\Ű×5C'AŇDŁQY­=ΧOžĚ`ďýwkk+Ĺęëë Âs~‡Śé$y IžěüŞĘgÎś! ŁXKK AaÎ˙ĚŇĎČŇŘŘŘé~¸‘ IDATy—<ňÎű±€ľUŕ}ľ1Fęó!žÎ[Áů7DÓŹÓĽô$¤Iš„ŞÉK€GO2$úL;˙Ç’<Ł[MMÍç×ʼn€Š$ĎH>yçýŠ\]]M@F±'N|öŇ«±Éd(^†?KžŐÖÖ ^„;݉ójÁŚh±ť‰j„ŽnN§óóç‹pą¶ŤyB0‚_:Łâ‘4]’tęÔ)2Šy†ń‚1D"'fHę¬5EHŁ—'ˇÚyML“kűóűý»Ť^•••ť˙Ťgä*ĂŠÍfÓŤééĘ6ŕ’<#ýá<ńbIťŐěĎ˙…ŁÇ±cÇĽÍ,<‰ Ý‹°$:t€ŚB­­­Ţaď iaŃ\ëüű×öčU__/‡ĂŃí»†›Í¦›224“Pť÷mIGUVVÖă|’<#ýá|R¦ QťýyěŢ˝›€ŚBěüGlĽOçă’sĺíZľ|ą,jţřʧČIY’¤ăÇŹ“T…:äí˙Ę×á›c®Ľ]yw˙‹l6Űđ˝Ź$ĄË0v˘÷©Í3úěÝ»×űďč)s|ľ¶ąŹ ÜŚ„ďl(’ôreĄVŻ^Ýóű!ů/žQź˝l¸\.8p€ Ś"»víňÖ≾ôZźj˘â9aЬV«L&AôCgŚc˝/EĽ ŹőőőŢ„Ş!!M‘¦řT.rÂY¦N—Ů<ĽGP‹NżQRgsÄó_řţŽ;ćmňu‰Íç¦ZÜGŽFĘw6ô…$Ď0c2™”žžŢŮŔÇQ’˘Ňf{…=xđ Ž;F GÉą·éČŘ‰Šş(‹ ř)==]¦¤)>w k‹WdÚlIťť”îÚµ‹ Ž.—K~řaç5«ë°»¶#'Lńöńvüřq~0%jjjĽµ€ Q±ŠšrUX]׌F$y†‹Ĺ˘•+W*nö×ý%)vćWş4Ű"ŃŢ>ÜĺÁ<ö _#(°rĺJM]p§_ ˛čKŻő6Ű:yň¤¶oßNŤž0V__Ż-[¶x›iEĄß8"F°ȵsĹ—şü`@ŤžđćůţňÜGbľđµaßĎÔ@®kF’"Ć&+ć _SËî×änkÖńăÇuâÄ Ą§§kĘ”)´›Á\.—Ž?®ňňrďCąb㓝Ëy¨^†żđ5µ–˝§öSĹr:ťÚşu«¦L™˘éÓ§+!! Ť`'OžÔŢ˝{»$7˘/˙Ҩ¨=`‹W¬íŰjŢýšÜçjtňäIť¦ŔđG’'ĚDŚMî|@/Ú$w}…ÚÚÚtŕŔ8p@ńńńš8q˘bbbÔŃĐĐ 3gÎČétv}KHSlv.5xBů2§+ľ¤Ö¸xµ-”ÔŮ—ÉńăÇĄ‰'jüřŢűŤđttŠŕ9~ü¸wčžÔÔÔt?/±ńŠąâKŁę%Ř›Ä<˛UíťÍB=÷“ɤ¤¤$Ť3† j„hiiQMMMŹ÷‘ëB~(0ě­ZµJĺĺ岸ݺĂ` @?Hň„ézÜ쯫ýôqµ–ż+÷ąÎf$N§łŰCFŘąMHëěf/śî¶&ąĎŐ¨¬¬]iiiü2?HŃ—^«¨É3;_OKęˇČSǧŁrßľGc™6ŰçˇŇ{ÓqĆ.űQ—Ł&ʍŃZ QqŠÉX¨ŽÉ3ŐRúŽÜő’>ŻU,6ľó{k5Ó:ÎŘąŹ ěŚÔďl8Iž09aŠ"çÜ­ŽsŐę8cW[U©ÜMN©™DĎHz7ÄĹ+*ĹŞń–€üâÚq¶Z-źĽ®Ő»ĄĺË—ËjµçÁľÇuÖöpg,P{Í!µ×”ÉÝTďM°bśĂ¨XiĚDEŚKVdâĹŠLJČz›wż¦ »Ą†Ĺ‹•““3ââ16Yqłż.w“SíµĺjŻ;Ƶ=Bď#ă’5yfŔjî4ď~ŤűÂÎH˙Ή$Ϩ16Yc“őŮĐφ*Q§¨‹˛|ú…ĽőČVo3/GĚ•·Ó÷Č@Żí¸xEĄÍć>bă%]§‹ĄÇů$y†»Ý®Ť7ŞąşQŃŚv ĄŐ«W«âtłÚ’2Gt§ş†ř«şM3ceMżDßĚű{}˙ţŻ)22Ň»¬Űůńěg˝Cµ Śľkŕş «™’®JKÓE+Wö8ź$Ď0ărąTVV&Ir·6`y>kQcRGü±\TillRUuťž{ůżőŔŹţ?ýň?–÷¸\0ö\Ű×5ÁA üŤqşäâÉzxĹÝzń•7 0 ä€0Vë8#“1Îű˙ó›vµ´´jÜäëµýŁ}š}ý7›4Oi—}IĎ˝´©ŰzÖ˝şYٶŻÉ”2_WćjÝ«›{Ýć…۸(}‘ö<˘›n»_ă&_ŻÄ‹č¶Ű—ë¸ýÔ€· ;šk cí'‹˝ĂÖŇúě0Ô§\căűlĂŢv˛Ř§‘·"'eÉßăcß~r_˙±ďc_:ÎU«µěÝţŻĄ±“±°×ů-eďř4ş]_ý:âxŰ@O c $y0Ä1Ĺj˙ěßÖ97ęňĚě^—ýÓ{[ű]źyň%şnáu˝Îßrpż'Žö»žąŮ—+)mBŹój+Nkka˙ű㑤[o˝µ×yvěRé‘­A9ž@Ĺ6ÇÓl?Ŕ7™LŁú3f4ŐŘبŽ3vď C0>'věRi€“<vľ@’fµZ•ťť-—ËŐeú5—§É–•Řką’ôô~×m±Xô÷}¬ĂYr©ěc˘ű]Ď3Sd±ôĽ{BŞŠűß—@ʤ>Ź'Á™¦ŽÓéA9ž@Ĺ6ÇăklM&“,˝ »7Zäääč“O> úç$Á™¦šC‰Ş«« ÜKĎ :;ţâu]‡ĹľćŞ,:ňyí…ÝEµđ‹sş,łŕ†«ôĐŁOűĽŤćˇË˙'Ąĺ¨«č6ú“ššę}pöwę¬Ył‚zm/X°@v»ťďÝ|Vů˝;ŘóÝ_lCuî×u_űÂu=üŻkĎľL¦ gŁo+W®Ôé7ß”ó­·ŕŰív‡ëÁ}űŰß–$%ĎŁ”Yc}*ło}•$ińâĹĘÉÉá Î“źźŻÍ›;űI™±4ĄĎe«öśSőŢIŇÚµk ^±\»ęŢÁ}‘ű1dů…ËâŻRŰéŢ!Ö%©µµMc/şN͵Ű$IQ檽˝˝ŰşbccÔTSŘm˝=m٧ý;ş/ŰŇĂ'ôËg;ăĽ|ůrY­V.>Ŕ~·ůîĽHM7‡¦kŐ5…­:\üwĄ`$y~çvËn0hҤIúň—ż’ř~üńÇÚ˝{·$éáax ~(ÉS—óŢ{ď É>ś8qÂű ý IŹÂď‚˝’ŠŤFĹX,Z±bE·ůÔä€QîěY—ĆŹçý˝óśĚ‰ Ţ˙'ÄŹŐ§űţ¬řř±C¶ÁŘ0Ňť‘t´±Q*+ëq>ŁkŔ(W¸so×˙ď(RĆôĎązöúŰö˘!݇`lw$y`‹ŚŚÔŁ˙ńś¶~"—«IźÔ~ĽF+ż÷Mď2ß˙Î×ôÝžŇ[w©±±I'NÖč‡˙ţ-^ňý€íG0¶„;škŔ(÷«ź/×÷˙ß/µ·¸\Ićńzě‘ďhń-źŹ¬sËM6=ńăŃwW>ĄŇňc3ƨ…_śŁ§W=°}Ć602ą\.FşđIáüYëÂeŰŰŰe››­ť˙·®Ďry_ąYy_ąŮ§ő^¸ŤŢöďÂéýmŁOaaˇÖ­[§´´4­X±‚d@?Hň`ÄŘłgŹ***TvÁ‚ĽŔâIđHREE…VŻ^M˘Fˇ 6ččž=Jt»ułÁ@@€~äÁ±qăF9Ž•u»ÝAć0pç'x<†c˘§ŁŁC§Oź–Ůlć¤Ŕ±Űí:RW§VBř„Ž—1bx<Ńc"dJŽöé`d9?ÁmĐôĹfM'éóDŹËĺ é>ş\.}ňÉ'˛Űír8úńʬÂÂBN9jň`Ä™0ͨ”Yc}Zvßú*ôÁźţ|€ˇva‚gę˘DŁ”6?A’túpSČkô¸\.­ZµJ­­­şăŽ;$IW^yĄwżm6'„ Ir˝%x<†C˘çüĎ’%KĽÓ333%‰DrŮ’¦§¦*1/ŻÇů4×!Ő_‚Ç#m~BČšn]ŕ‰‹‹ë2?33S·Ür‹6lŘňćd |%HşÄh”Őjíq>I2ľ&x>~ĐÇ˝˙~=zT×\s $yaË~˘óE?11‘`„ŘŤ7Ţ(ŁŃŘcňĄ·ęĆľHOO—Őjí6}Ö¬Y~Ż«˝˝]‡RAAA·}÷w}±±±JNNÖęŐ«»Í›3gŽ""h1Ź$ ,ą›U~ä”$ő@đŮl¶§&ÉcµZ•““ý‹ŽŽVnn®rss»Loll”Ýn÷k]2ŤZ»v-' ?#ÂŇş x˙=oŢ<‚ ŞŞŞ˘Łet$ya§đŁ2í)>*©÷ć<€Ż*++ĺt:ý*SPPŔ°év»]GU Ŕ'$yaeOńQmüsç˵ŃhT^^AÁ lÚ´IĹĹĹB`Æ znűvýŐí&€č“\ŤÍZ·áo Işë®»d±X l4¶B¸mâ w$y ÄĘź$ä8}VöĘZŮO8şÄŃh4ę®»îĐK gőš1)2$Ű>q–řĂI±ŐĎć„JOOW^^5xR)))2›Í@@ďoĺĺĺ:\šfKűN}ľÝŚŚ N"KŠś0AăćĎďq>IŽ+11QV«UóćÍŁ“e\jjŞL&“_e-ZD˘@@Y,•——ëĂ­>íĐ S×˝jc«[˙]Ň&©ł¶,ßo@č\,)ÝlÖE99=Î'É!`µZő«_ýJv»ť` BRRµ%0ärssů¬ąśśíŮłGuuuúď’vչܺáŇH%‡v»űNutnݱó˙wÝu—߉oÁC’BÄd2QëřüÜp˙ý÷kőęŐjllÔźvčO;‚şóćÍŁż;`cu€+**Rii)P‹EO<ń„ŇÓÓş]ŁŃ¨ď|ç;şë®»8 Ŕ0GM€>ěÜąSmmm~őAQTT$·ŰMm=g2™´rĺJŮívŮív9Ž!Ý^FF†, M´€‚$@věءääd:0¬X,–Qń˝d±XÔćpČ<ÄÉ, \ä€0ĺrąTTTä÷/|FŁQłfÍň»Căüü|ëŢ{ďőN{çťwôŢ{ďuYnÁ‚Z¸pˇ$éČ‘#jmm•Ífă„€nňňňtÚh”ó­·ŕ’<¦¶mۦŤ7¨laaˇyäżËM0ˇKRiÚ´iJJJę˛LBB‚w™’’ŐÖÖ’ä|𡤭‡Ißţ¶Ö®]Űm>IS.—kŔe+**T...®Ë˙ăăăĎÉŔ¨“ťťôŽQHňŔ(0ciŠOËUí9§ę˝  8ĎÜąsŐÖÖćW™ěělúđAG’fłŮďa`$3gŽěv;ĂI@@Řl6•••ůUĆb±(##ŕAˇb±Xčta©  @………Ŕ°˛gĎžAőŮ`ř#ÉЇM›6©¤¤ÄŻ2UUU]Fš€Ps8zöŮgµjŐŞ•čٰažŰ¶Mu»9‰€Hň˘´´”~K–*++U__O Śh­­­şăŽ;ÔÚÚ:˘=v»]GęęTĹ)|B’eee~'yJJJ´aĂ‚Ŕ:uꔜN§’““µdɵ¶¶ę™gž!0@˘ăe@ČÔ×׫¶¶–@0 U__ŻiÓ¦y§ĹĹĹiÉ’%jjjRGG‡""řÝ'|˘lѢEt* ¤ µnÝ:555u›§ńăÇËn·«ŁŁ`#Čő’~:}şÖ®]Űă|’<}X¶l™ß ›””™Íf‚ $< ž[nąE™™™˝.×ÜÜ,»Ý®üü|FÝÂI@@dddČb±BhĎž=>%x<ęëëµsçÎ7ꀞ‘ä„Őjő;É“••ĄĽĽ<‚@€Ś?^·Ýv›O éó>zFÚ¨[zF’2ńńńÔţÁ°WYY)§ÓéW™őë×+??źŕŞS§N©ĄĄEÓ§O÷«Ü…‰#ŁkôaÓ¦MJNN¦#eĂÖž={4~üxµ´´ xžDOUUŁn#ź\@@äçç«°°@D………zöŮgµ˙ţAŻ+..N_|ń°ukĺĘ•úů­·ę“ ř€$ ¤¦T `ŕ÷QO'Ëţ6Ńę‹gÔ­O>ů„>z€afݤ—++µzőęçÓ\ ŔBŔPó$x®¸â %$$¨˘˘Â;/--mPënjjREE…ţô§?)))I+W®”Éd"čŔ0pFŇŃĆF©¬¬Çů$yúššę÷Ë͢E‹čTŔJKKÓ׿ţu˝úę«Ýšj=üđĂjnnöy]ŐŐŐ:|ř°ľúŐŻJ’:¤ 6H’ęęęäp8Hň#I€>äććĘn·ĂĘ”)S4eĘÝpĂ ÝćůűťŐÜÜ¬ŞŞ*ŤFIŇŚ3´víZ‚ Ś@$y‘““ٞ^ŞŤz$y!cłŮ”‘‘A vŠŠŠärądµZ € r8ŞŞŞR||<ÁF!F×čĂÎť;ýnúPTTDÍ6!QXX¨‚‚‚°:žwË˵—S ř„$ ‡śN'@Řٱc}ňk'NTnnî°Ü·ÂÂBýoY™ŠÜnNŕ’<€€=„űUĆétňň @ĹĹĹ)55•@a€$ dŠ‹‹˝C´:^°ěělĄ§§Ag6›•’’B ü÷Ůß---c„ťłQ÷Ś!izjŞóňzśOM€>Ěť;W‹Ĺż°ělFÖ6›M‹-ň«ŚÝnך5kFuÜR IR]]]ČöáÜąs’¤řaÚ˙Фö5ŘNś8ńů9Ą×j‚¤KŚĆ^ź3HňÂl6+!!@ ěĚ™3Çď$`dąřĽűŰÇ` 477ëčŃŁ’†oňâü§Ľ†d<1˛ĐwŻHňÂfł)33ÓŻ2‹E6›Ťŕ€JŃç5h>ţřc9Ž m»ąąY|đ·©XögµŠ†cŚ<ɕݻww©U ďż˙ľ·¦ŐeĂ4FĂ}ňBĆb±(##@ ěčŠ+® ‰ čJKKU^^ÎýŐOq’– zAťýňüńŹTVV–&Ož¬ŘŘŘ!Ű®ĂáĐľ}űtöěYIŇUn·2†qăËž—Ô,ióćÍĘČČĐäÉ“5nܸ!ŤŃÁ˝ ‹Ű­«IňôŠ$@6mÚ$“ÉäWMµŞŞ*:>eeeÚ»w/IžH‘´XŇ˙Ş3‰Q\\Ô¦[én·®ćÉ‹I9’ň?‹QYY™ĘĘĘ‚¶}‹Ű­%$xúD’ĄĄĄŞ¨¨ ď„ťĘĘJ%''ak87ź¶Ůlš"ÉX^”íÍTg˙<u»U¤dB˛¤ëĄa]ç|’ţEťÉ°2u&{†ZĽŰ­ą5x|@’eeer8~%yJJJôÉ'ź(Ż—! ŔčfłŮtş¶VÎ %y¤ÎÚ*žÚ"U’š†p[ŹĐó§Î=’T/éĚn+ERÉŻzI ŤŤ:[VÖcŤ=’<€ĐݤęëU[[K Ŕ°DĂŰţ%¨ëČ[ZE’¶VVJ«WkíÚµÝć“ä°E‹ięÔ©@ĐeddČŕg­‡¦¦&UVVjúôép€ţWR“Ű­› Ĺ„C¨ô!77WYYY~•III‘Ůl&x‚Îjµ*;;ŰŻ2555Ú´iÁ |·[IÚg0č·{H›wýˇ& 222TQQA vRSSŐŃŃA „ę%íóaąxuvžÜ›­>nďÚ>ć“d˙ěߥn·ŞÎ«9Um0h­Ű­lASŐ{ź;:ž˝’ś>¬g†zoRuţńôŢľű tlz<ÁŽm ŽÇźŘďg]$yaµZý®ž••Ĺ0Ó OŃIIjHMŐ‡••ý.{q\ś®îc}ěŔůĆôô^çU:ú°®Nn·Űűě7!JĆÄ(ť>ܤAs»‘¨Ś¤¤×ŃŕräxöŮí:ÖÔݡKSS•l2őy<ýůbb˘2ú¨ĄČŘćx‚Ű@O b+‘ä„P||ܤłgĎjőęŐZ±bE·DO Žge_ -ĺäääG‰@O öe´Ç–Ž—QXX¨’’Ŕ“śś¬ÜÜ\á—_~Y۶m“Ô=Áă‘6?A¦uޱUQQˇŐ«WËĺr<I@@8Ő××űU¦°°P«V­"x`Řó%ÁăA˘ˇB’€>ř“ŕń ŃP ÉЇÔÔT%$$řUféŇĄtş $rrr´téRżĘ8ťNíÜą“ŕőb =6’<}ČÍÍUff&¶ęëëµcÇŃÁ$x>ž` B <çŻîIôô4Ľz8)++ë6íüg?—ËĄŠŠŠça`HňôaçÎťjkk“ĹbńąLAAfÎśIż<‚®°°P{÷îŐ’%KĆťźŕéäÖ±˙;#IŠŚ‰ůr“ĆNŠńi]ŤumŞ.:§ö–Žnó<‰žGy$ěb¸eËĹĹĹéůçźď6ď‰'žđţűČ‘#]–ůęWżŞoĽ‘‹pHňôaÇŽJNNö+É#IBB‚ćÎťK ÔYŞk‚Gj:Ý.©Ýű˝żćö IDAT˙¶ćŤ˝%Ń·őhÓŢÜëüŠŠ •––Ęjµ†U·mۦääd­X±˘Çźíy–yăŤ7hÂ$y‘źź/‡Ă!›Íćs§Ó)»ÝÎË3!Oóź1›ÍZ°`ěv{·yjllôk}Íg;“CFŁQiiiÝć'%%ńô™E‹ięÔ©bHňB¦¸¸Xďż˙ľV®\I0Ŕ°——×ăôU«V©ĽĽ|@ëLKKăy§)))ôe$y,;;[ééé@Đ™ÍfĄ¤¤`”"ÉЇąs窭­ÍŻ2ŮŮŮTż6›M‹ĹŻfEŐŐŐú裏ôŤo|"d÷NF!‚ŮlVBB@Ř™3g a­ąąY•••!˝w®_ż^ůůůo¨É›Í¦˛˛2żĘ$$$Čh4< ¨É™ĚĚĚ^;7F˛‚‚AWZZŞ˘˘"ŚR$yú°iÓ&•””řU¦ŞŞJ‡ŕş˛˛2’<‘÷N͵QZZŞŠŠ ú.AŘ©¬¬Trr2¶,‹l6÷Î0@’eeer8~%yěv»jkky°ĺ–.]ĘŹ…@s-@ČŘívú-„š<¶hŃ"Mť:•@şŚŚ  żĘ455©˛˛RÓ§O'€ŔGM€>äćć*++ËŻ2)))2›Í@ĐY­VeggűU¦¦¦F›6m"xeË–ůÝźA $ 222hGŤ°”ššŞřřxŔ*(( Đ\ V«ŐďęáYYYJII!x@P“2ńńńÔţAXZż~˝ňóó € +,,TAA@HUVVĘét ÉЇ5kÖP}Ŕáp8TUUĺW™ŘŘXĄ¦¦<̦M›T\\L B€ćZ€€(,,TUU•233 #Hrr˛fĎžM R ZÔä„ĂáP}}˝_e µjŐ*‚0Ę-Z´ČďąĐI€0@s-€[şt)ťŠ ›Í¦‹.şČŻ2N§S;wîÔ 7Ü@©©©2™L"¨ÉЇeË–Q}Ŕa6›•’’âW™úúzíرŕ!`rssýî§±¨¨HĄĄĄoF|M—ËĄ÷Ţ{/ŕë-++ÓćÍ›»MOOO—ŐjĺĘ€ Řl6}úé§~+**’Űíć}{F|’çÝwßí13Xeee*++ë6Ýh4ę׿ţ5W\Ŕl6ËápřUĆfł)##ŕ!ěTUUÉd21JŞß\kÖ¬Y2Ť˝ÎŹ›ĄÄtŁĎëKž9F‘ц>·FŹĘĘJ9ťNżĘ¨°°ŕşüü|­_żž@ ¤vîÜ)»ÝN B`Ä×ä±X,Z±b…VŻ^­ĆĆFIRÚüxMfĐúRfŤUʬ±’¤ö–)¨SÓévIŇĽyót×]wqŐ0ŠlÚ´IÉÉÉôË l%$$hîÜąłcÇ%''3A„EÇËžDʧFOĹßś:}¸qPë$ÁţÉĎĎ÷»ć‚ÓéäWB,>>^sćĚ!©E‹ńJ„ÍčZLôŕ€ŕ(..Ö† Ŕ(—’’B_vVC¨"ŃC‚đ  F˘p; Á$zHđ€ Ą¦¦*!!ÁŻ2T9*999Zşt©_eŞ««µiÓ&‚‡€™;w.ýń„HT8Ô…ť1Wü­sDŚľ:c&Á`¤r8Ú¶m[·éeeeZďćÍ›»MKKKc”AŚ:ąąąô ¬577«˛˛’@ `ćĚ™ă÷˝sýúőš9s¦rrrŕ D…ëů“č!Á`¤rą\úŮĎ~ć]0ňóó{śţťď|§ÇDONNŽß‰Ą„„oÍKÎçKÓ-<F2“ÉÔoż¦ĹůĽ>Ł9ş˙e”ÉĚĚT^^'a§¨¨HĄĄĄ@Đ•––ި¨@ŁTD¸`_‰<ÂÁŠ+”––ćý˙řiqš±4Ĺű'mľď}‰Ä[b»”ťľŘ¬hwţťwŢ)«ŐJĐ1ŞlÚ´I%%%~•)**t“I˛˛2’<‘÷NFÔh8Čžšnu´¸UwČE‚gqą\Z·nť\.W@×»m۶¶­V«/^Lŕ1˘™L&ďw\EE…Înęü0¨ő6ÖµéHAť:ZÝ’:<}u$ëp8ät:ĎIAX©¬¬Trr2¶&Nś¨ÜÜ\îťa j´č…‰žťőÎ#Á3|Ľűî»ÚłgOŔÖ=&B­ r8r8Ýć—••‘äAXt˘Çߏ$Ęápř5˘ÝnWmm-ŁBqqqJMM%©ĄK—2"WDڦ˝°é–D‚g¸ąńĆ»4;‘$Sr´÷Ď„iqJL÷˝?Ésâ»”Ź›Đ5Żą`Á‚ްáIôx>Cg7Éţ·zż×3Ď@Ůívrň€ë$ŹÁ`Á`č2Íb±č¦›nR{{»:¤|°ß2ĂíFÓKŞ$ĹŚ‹Ô´[5í–DĄÍOPôHź×o‰ő–˝dÁxInďĽyóćŃá+Âţ3äo˘'  ś-Z´Ď€ČČČPvv6F©€'y­Ëívëţűď× /Ľ ÷Ţ{O555Ăćdřz ĂçÜť˙'""BăĆŤÓĚ™3µbĹ UVV†ä%µ't˛ŤŃd ź!<@Ďrss•••ĺW™”””~Gľ€ˇ`µZýNňŘív­Ył†ŕ!¤÷ÎŞŞŞ»Ř€†¬&Ď>¨C‡í@víÚŐmšŰí–Űíî2Íápčřńă’¤>ř@ŤŤŤý–nÇ0\ąÝnť;wNűöíÓ/ůKÍś9SĹĹĹA}Ií ŚFţ~†‘ŕÉČČ 5ÂRjj*Š0Ä÷΂‚šńŔ$y \.—–.]Şööö HO ’ž455y˙}ýő×+..nŘś _Źa8ń$ˇÜn·ÚŰŰUUUĄ?˙ůĎĘČČP]]ť~đí%µ'$x0šůú T «Őęw’Çb±P[!Iň$$$hÉ’%Ú¶m›~ń‹_ř]ţťwŢQNNŽ’““ى'ę–[nŃźţô§nËţ×ý— ·Ć§éĐÖ­[»5[Ľxq—žš‹őŐ„ě˝÷ŢÓ—żüeĄ¤¤(66VÓ§O×Ď~öł.‰#Ź-[¶čöŰo×Ô©S§M™2EwÜq‡öďß?ŕcHśd0dµZ˝ű¶páB%&&Ęh4ꪫ®Ňď˙űÁ_LJNNÖ—żüe˝úę«Ţm]¨żfzçĎL˘‡Đ˙g(ÔM´Hň \­_ż^ůůů@ĐŞ  €@ŁÔ$yÎť;§gź}V“&MŇO~ňíŰ·Ď粿řĹ/tÓM7ióćÍŞ©©Qkk«jkkUPP üÇÔĂ?’@ýú׿ÖÂ… •źźŻęęjµ´´čđáĂúńʬkŻ˝¶K˘ç•W^Ń 7Ü  6ččŃŁjnnVkk«ěv»^yĺ]uŐUú裏µ?ţÄ)::Z’ärąôî»ďjáÂ…z÷Ýwuúôi555i×®]úć7ż©?üá‹—'ˇ1řKĚd2)==]uuu=ľ¤ö„Đő3ÔS˘‡>x߬Ył†ęăF ‡ĂˇŞŞ*żĘÄĆĆ2„:¸w†‰!Iň´µµÉl6ëąçžSKK‹î¸ăµ´´ô[®¨¨H=ô |đA8p@.—K‡ÖăŹ?®ýüç?×öíŰ˝eî»ďľ.}Öxš]{íµÝÖżyóć—íOII‰V®\©¨¨(=óĚ3Ş­­UCC>řŕ]vŮeÚµk—{ě1ďňëÖ­“ŃhÔOű¬ÚŰŰőđĂëţűď÷NżţúëőÚkŻiţüůÚşu«wú˙ţď˙v[GLLŚćĎźŻ^xAłfÍę’¨ň—żqň4}ŞŻŻ×?üĂ?tů,++KĎ<󌮾új *Nn·[gÎśŃű￯ď˙űŠ‹‹ÓŁŹ>späȵ´´č'?ů‰ž{î9UTTčĚáÎÚS–ů ŢĺHđĎ‘‚Óa„ń$zVŻ^­ŠŠ ďô@Őŕq8ŞŻ÷Żß¬’’}ňÉ'ĘËËăA U-ŞŢŰ@ …-ZÄ 1ÔřőŻ­)S¦čÉ'źÔŽ;ú\Ö“(ąçž{zśÇwHRĐ«}}đÁ’¤%K–t›7kÖ,544čý÷ß÷i]™™™’:›´ Ô`âô˝ď}Ż×}ň÷ĺLR·!Ô•››«éÓ§ë˙ţď˙Öôă˛Ë.“$ýëżţ«ľńŤoôŘż žˇ7Ěşç\ax¸°FO¨›hŐ××Ën·sb Ij¨jUŐžs>ý T:Z;ú­ŐîůÓŃÚAŔTQC˝řřx˝ôŇKZ¸pˇ–.]Ş={öČh4ö¸ěŃŁG%I—_~yź/úž!ĐĺÓO?•$M›6Íç2µµµzć™gôî»ďĘn·{űÍikkôţ &NݦyÎG ‡jßľ}»~úÓźę‡?üˇ®»îşAŻĎs mذAoĽń†ćÍ›§™3gŞ˝˝Ý[٧©®•ĎłŮl˛Z­Ş­­őąŚ§& žDOcc#Ub1,,X°@ď˝÷žßĺ{ĽŻ K—.ĺ×H!{^»č˘‹ü*ăt:µsçNÝpĂ ĐM§Ű©ŐŽaËŕä›˝ämtáj—-[¦§ź~ZË–-Óţçö¸lTT”ÚŰŰŐÖÖćí{ĺ|íí튊ŠRddd·dIOŰím_z›ŢŰĽČČHuttôş_úôÓOuíµ×ęĉ}.çË~ő4m qęë}™ďëň.—K'NśĐ˙üĎ˙č'?ů‰Îž=«˙ůź˙ŃÍ7ßěó¶z›ęÔ)ýęWżŇoĽˇO?ýT111ĘÉÉéö’J‚Ťüü|9żj޶¶V+W®$€¶ý®qf4Iň»Ý®ĆĆFż–ßµk—|đA‚ׇ 6 čG IĘÎÎîŇő÷ÎîĘĘĘ”žžÎŹÄ¬ =ůä“*((ĐÓO?­Űn»M7Ţxc·eĆŚ#§Ó©††ĹÇÇw›ďiâ4věŘ iĚ1:{ö¬‡’““ű]ţÁÔ‰'tńĹë‰'žĐüůóe6›ăMľ v†cś¤ÎÓ§O×÷ľ÷=Íś9S÷w§ýčG]’<}é«îI“&éÉ'źÔ“O>©ňňrčŹüŁ¤Ď›‘ŕBÇfłyk>„›ĽĽ<Íš5k@eIü÷ݍ¨Hn·›$Ď -Éc2™´nÝ:]wÝuşűoß>EEEu©‘sÉ%—hďŢ˝*))ŃĽyóş­c˙ţýŢĺ‚ięÔ©Ú»wŻ<čS’Ç“Ý}űí·˝M§<Ž;6čý®qşgßöîÝŰeşÁ`ŰíVSS“âââşĚ;xđ OëNOOWzzşľűÝďę7żůŤţüç?«ŁŁCk×®ĺS „Ůl–Ăáđ«LVV–RRRÂNUU•L&M" Ě€ŔpĚŤÍ›7O<đ€ěv»ţő_˙UăĆŤë2˙ú믗$ýö·żí±ü‹/ľ(I}öńŇŢřa›=űµnÝşnóŠŠŠd4»$[š››%I©©©Ý–˙éOęýwoµVú;†@Ä)<m'&&v™î9ďź|ňI·2O=ő”ßŰůć7ż©żţőŻ]F802ÄÇÇóˆ˝ĘĘJ9ťNżĘ} :›OŻ_żž@ ¤vîÜÉŕ!ě >ú裚1c†Ö­[§łgĎv™÷ťď|GQQQz饗ôđĂëđáĂjllTyyą~ôŁé…^PTT”î»ďľnëőÔŮşuë€F‰ęË˝÷Ţ«˝ôŇKzâ‰'TSS#—ËĄ>ř@_űÚ×ÔÔÔÔĄ Ov÷ˇ‡Rmm­ššš´sçNĺć檮®N—^z©$éŹüc—DŹŻÇ08 µÖÖVť8qB/ľř˘ľň•ŻH’rss»,3cĆ IŇňĺ˵wď^577«˘˘B˙ň/˙˘-[¶hüřńÝÖűŤo|CIIIúÁ~ C‡©©©Immm:zô¨~đtY/´iÓ&a+!!AsçÎ%;vä ‘^;^ö·ł® ĄĄĄ˝.sŕŔ-Y˛D­­­Ý–]ż~˝üńžwÖ`ĐřCďáçËÍÍUII‰÷˙ĄĄĄ˝îK_űŘŰĽçź^żřĹ/zÜŻiÓ¦éŐW_ő&'Ţ|óM=đŔÝ–KIIŃ믿®Ő«WkóćÍ]öŐßcđ7Nýť_Î[OË÷gĆŚz饗şÔÚzë­·´|ůňnËFEEéŮgźŐ#Ź<˘S§NéŕÁŢNßzë-=đŔ˝ÖrŠŤŤŐóĎ?Ż9sćđ©BÄSkÁźŽ—ťN§ššš|j „ʆ d±Xüş¶_ýuM™2ĹŻ2¨űńńăÇuűí·ű÷břY— @¨îťĹĹĹJHHŰZŢfł9(͸{ě“çĺ—_Ö¶m۵âŐ«W÷9?;;[üqŹËŢzë­Ú·oźŞ««ŐÜܬ¸¸8Ą¤¤hĆŚŞ®®îqÝ—^z©Ž?.—ËĄńăÇwY¦·}ék{š÷÷˙÷Ú·oźw8ô1cĆčŇK/Ő•W^©^xˇË˛6›M%%%:{ö¬ŚFŁ&Ož¬«®şJŻ˝öšĆŚ٤¤$ŐŐŐiěرŢmů{ ‰S祿ůý‰Đ1cŻ©S§Ęjµęąçžë¶ÜżřEíŰ·OgÎśQdd¤ĚfłfĎž­Ź>úČ›\|ę©§őů%úĄ/}Iű÷ďWUU•šššÔŃŃ!“ɤɓ'kć̙ڲe‹¶lŮÂ7*B=őÖßÍĽ°°Đ›Đ†Łż8›››µm۶A?OŔ@$%%ů]făĆŤ:~ü8ÁCČîť Ú¸qcXÇĺÎ;ďň€z¬ÉłjŐ*•——seü2oŢ<ż‡P'É‘đ :{ölż®í7ß|“g)!“””¤;ďĽÓŻ2oĽńI„ôŢi·ŰĂ>Éłxńbĺää é6ú]+nB”.şzW( _§5¨\Ü„(MžO1ląwţşčŹy ®VŰÔZ‚ $®›zł_ËWWW«9ú¬¦Ţ<ŕ! Łß5yâ&D…í5xňŁłj:Ý”mő™ä‰6h줮P@żNµ ¨\dL÷ kS¦LRfB&×5€#2Ćżńuš››ŐÔТäIc "qĘDYüKň|´m—ZǵkÂ4cŘĹ#":xµÖُü2k¬lýkcś c"·"€Ń®®ö´Üj'Aˇ’™™IS-„Ąň‡un€µŰ`0ZÚUWsš@ŁI€>ś*«“Ýn÷«LůţĂnÂQWިí~D R‡¶Uޤ¤„@„I@@´6´Ëét„ť$y`$™8q˘¦ĎK%sŽFŐ×× ÉşňFűUĆn·ëôáF‚@ĹĹĹi¬ŮH R×\µÓą‹$ děv»ęĘIňŚv‰'(zL$$’<–~Ĺ4Ťau!3.R‰I0J‘äčäŚDY,żĘ¤_>McIň ÓŚşć†«ý*c·Űµgó!‚‡€™>/UYYY~•©«9­Ö†P,’<€€©„„°3$ŁŮXłQńńń~•ŮţáG4ă€(B„ ÓŚĘśéW‹Ĺ"{Ăk@M@ČX,MĆ( ?Ű?ř‘ă„ÄéĂŤÚţÁGĄHňôáжJ•”řW㬮ö´ZÎŇŻ€ŕk9Ű®şÚÓ!µgó!čłąVÓé6)¨#J€~µ·tčň›ě~÷]Â}ĂţÚ>ˇúřzżĘt´ąućpŁŞZ € j9×®ńcL~•±X,JIIÖ‘‚ €Ý;ý•0.AÇŽ:ÂňŢŮtş-hŰę3ÉÓŃęVCU+W(Ŕ'v»Iž’’ť;Ő"Á@đ0lĹÄř?J–»Ă­Ö†µ6t@Aç6şđÚλBzďś•}ĄĘJËŐě¤&ě`ôäY¸p!‘řĹápř]¦ľľ^FŁ‘‘‹0¬ť:uĘď2JLL”Ůl&€‚~?ŽđżEjjŞbcc Bvď4ŤJOOë¸Ěš5kČ·ap»Ýn.AŔ`ĺççËápČfłů\¦°°PµµµZąr%İőÔSO)99ŮŻkŰétjęÔ©$yťĂáЧź~ę×đŐMMM2™Lš>}:DČîťüđ ˇЇeË–Én·űU&%%…€0›Írą\jlô}„żššíÚµK>ř „_ĘĘĘş]fły@÷΢˘"UTTtąfddd?‘äDFF†**č°`4°ŰíZ˝zu—i ,ĐÂ… ŐÚę˙NСC‡şL{â‰'ˇ &p‘ä„Őjő»嬬,Ą¤¤<„ťŞŞ*™L&jó–ĹbŃŻ~ő+9ťN9ťNďôôÓ(I·Ýv[·iż˙ýď•””¤ŻýëÜG„*ńńń´˝Ć°WYYŮĺáŐ*,,$x‚.??_ëׯ' “ɤI“&ůŐ”ŻŢ~űm9rD×]wö5y~immŐ±cÇôß˙ýß]¦OžgÎź?_×\sÍ€@őĎę„0pdd¤6nܨęęꀬłşşZëׯWEE Śx ˇ îľűno˘'P÷cŹI“&iÁ‚Ô¤€_l6›îĽóN˝ýöŰ~ˇ!‹Ĺ˘ŘŘX<@MžaÄ3*ŤŰí&FÄĄ$ď/©ŃS]]­Ť7ęĘ+ŻÔüůó .†•ÔÔTż6-ZÄ‹€ ŢŹ8Úµv»]W_}őt¤‹ŃÁfły› ú:pÁŰoż­YłfiúôépţS©Á`đëĎ`­Ył& 멚››µvíZ-Z´Hééé;v¬bccuŃE馛nŇoű[µ¶¶†dßFűąFËĄçĦ¦¦­ăüŹçANrssÚ †â~üĎ˙üĎŠŤŤÔzţň—żčÍ7ßTTu08V«ŐçQ·<ť,Oť:•ŔŔ˙ôîÚµkÔž<‡Ăˇ… jĎž=Ýćť:uJ§NťŇ;CuëÖéŻýkĐ«ĽŤćsڶËyóćÉh4ŞąąŮďň§Oź&ÁŔ yšşŘíöÝŹ˙ň—żčČ‘#Ząr%Me0“&MŇ©S§ôé§źöř Łh ÁwÁP­Řívűô‡DÂŔ=đŔÚłgŹ®¸â ˝ţú모¨PKK‹š››UZZŞ'ź|RcĆŚŃßţö7­Zµ*čűG’=.»ě2ojÄĆĆęÖ[o%Á°STT¤ŇŇR ¸/w2™LzţůçýęŁçü/Ú4—ËĄ·ß~[%%%]¦o۶ŤĎP| ‡ťhhhŐj•Á`Đ믿Ţă2ŻĽňŠ ¬V«ô_˙ő_2 *..–ôy3±­[·zËlßľ]K–,ŃE]¤Ą¤¤č¶ŰnÓűďżßmýçÎťó®_’~÷»ß);;[111ŠŹŹď2oË–-Z¸pˇe4uŐUWé÷ż˙}·unٲE·ß~»¦NťŞ¸¸8ĹÄÄhĘ”)şăŽ;´˙ţAÇíÍ7ß”$ýůĎV^^žRSS­eddčÁÔĆŤ•””䱺şZŃŃŃŠŠŠRUUUŹë=sćŚbccŁššď0//OS¦LQll¬ĆŽ«iӦ鮻îŇŽ;ş”ôąihhč˙W^yE3fĚÉd’ŐjŐoű[ﲿýío•™™©¸¸8]-: üDIDATrÉ%zě±ÇzL&ús<|°´X,*--í÷Á˛©©I±±±Ś˘…açÎť~wYTTä˝÷@0Mś8Q—_~ą_ť1O0†Ěůť1{=şí¶ŰHđ w€Irdµ}ô‘;::ڝ讬¬ě2Ďn·»ÇŹŽvüńÇn·Űí~öŮg˝Ű:˙Ď–-[Ün·ŰýŇK/ą###{\Ć`0¸×¬YÓeMMMnIî´´4÷+ŻĽŇcą´´4÷;ďĽăމ‰éqţoĽá]ßď~÷;·Á`čq9InŁŃčŢąsç bď–ä®­­ő+Ö·Ţz«[R·xĽüňËnIîŰn»Íív»ÝřĂzŤĄ$wTT”űŹüŁ·| ĎMssł7ţŻľújŹĺŢzë-÷“O>ŮăĽgžy¦Ëúü=ţyńĹÝË–-soٲĹ]ZZÚíĎ–-[ÜË–-s8p€`aDxňÉ'Ý/˝ôRŹ×so{ě1÷›oľIđ Űű±çO}}=ÁBPüíosß{ď˝îżüĺ/&2D†ÍϧW]u•ţýß˙]uuuú§ú§ó“Pşűî»ućĚ=öŘcš={¶$éľűîëRCĂÓüëÚkŻŐáÇuß}÷I’zč!•——«±±QGŽŃăŹ?®¨¨(ýŰżý[—jÔŃŃŃ’¤łgĎꡇŇ?ýÓ?©¬¬L­­­:sćŚwŢ·ľő-ÝsĎ=:~ü¸Z[[µoß>Í™3G’ôôÓO{×·nÝ:ŤF=ńÄŢv±gĎžŐÖ­[5{öl566ęá‡t̤˙ż˝űŹ©şú˙ţY^á‘_Ň€pWŮúá@Ȩü‘‹đŹ\ˇ@Af`eĺŔ:ÜlŃ]ŁD˲´Ąm4§‚ÖL6j ‡ř°ŕF .ńŁKČ.—ßż/\Ţß?üđţňö^đňă^îĹçcc»÷žs߯÷9Ż·ÜëáĽĎĐ××göűâââ`ÂYS999€řřxŔ`0ššŠúúz appŐŐŐŘ»w/d2ľűî;ńý–ĚMZZŽ?Žľľ>466"""đŃGá“O>ÁéÓ§Ńßß/)ËĘĘ’´oŞí!˘©™l;×ń‹,ŻZµŠťEDDdáĎcS3ĺ »»Űě…q‰fCXXŢzë-„††Îx‘pš„­ĚäA0 Â3Ď<#™}qôčQ€°~ýzÁ`0/))I (•J“qŇÓÓBrr˛ÉcmŢĽyÂ8ńńń&g!\]]Íjçźţ)ärůŚú®¸¸XpvvcďÜąS8sćŚĐÔÔ4éűWWWaÁ‚Bcc٤¬łłS¸ďľűAŻ× ‚ 3—¦:Ęo‰Ü¤ĄĄI^ݬ¬Ërss‘šš 777dee™˝nĂĺË—%3Vî ¸rĺŠÉň¤¤¤ ŹýŢ{ď˝6¶JxWW—Yç7Vż··wF}Ś7n`Ó¦MčěěÄÉ“' ___( $%%‰ëâŚ'“ÉđňË/Cś;wNRvńâEčőzDGG‹3hĆţÚžćććťóLsóĘ+ŻHžŻX±B|m˛¬§§Gňúl¶‡&˙ â¶mŰŕéé (((ŕ.Zd—,X€[·nÁÝÝ]üQ©TČĚĚ”ü¨T*±<44 …‚ťGDsN.—K6G[důťwŢaçÍÇď-˙›%1«_„Ě5QčśśDEE‰Ď/\¸€­[·Noü±\]]ÍpquuEGG‡Ń±š››q˙ý÷›ŚÓÓÓą\nÖyčt:;v ůůůĐh4hmmĹđđ0FFF0::jTßÔ1ĚŐŘŘĽĽ<˘°°˙ý÷źxĚÝ»wăČ‘#pttë_»v O=ő{ě1”––ŠŻoٲąąąřăŹ?Ä[ăJKK±qăFtttŔÁÁÁÁÁXż~=6lŘ€đđpÉq-ť›ľľ>Ł-Í)ÓmMĎčč(´Z-ärąÉßźD¶®żżmmm’…!ŰÚÚ Óé$ő<<<ŕîîÎ#"›ÔŰŰ‹ŚŚ twws‘e˘yĚb<39ěŕŕ üýýˇŐjáĺĺ…††,^ĽŘěxŽŽŽ0 wŤłpáBŚŚŚË`0Íş[»î,ŻŻŻÇşuëî:Sd¶yî¤V«ńý÷ßăСC@FFRRR$uPWW‡ęęj( tuuÁËË …ĺĺĺ’ş---8rärrrP__/ľîííŤôôtĽńĆVÉŤ©ľ™NŮtÚCDDDDdĎÎť;‡°°0đÍc6ąomZZ´Z-ÜÜÜ Őj‘––6Ą÷ŹýĄ¸ŁŁC\ô×ÔĎřAI§ĚÂvľ)))hnn†żż?Nź>Ť††ôôô`hhȬAŽ™ ć~3g΀Ʌ„_}őUń—=p{Kv˝^/.¸<ž··7222PWW‡šš|ńĹGKK pěŘ1«äf¶ĚV{ěŶmŰ8ŔC4ĎŮÜLžëׯ#<<>>>¸víBCCŃŇŇ‚+W®`ÝşufĹ[łf JJJđŰoż!$$dVÎ}Ş3yÜÝÝŃŢŢŽŞŞ*Łd±|ůrŁăMĄďţý÷_#""ÎÎÎÖÄâĹ‹!“É000 )«««C@@€xËÖ /Ľ€_~ůMMMđöö6«ĎľůćěÚµ ¨­­µąÜLőzś¬=DDDDDDD¶Ě¦fňô÷÷cÇŽĹÉ“'ńŔŕřńăĹŽ;Đßß?á{Çώٸq#ŕСC&ëţüóĎP(8pŕ€ĹÚ244đńń1*űŕÄÇz˝~ZÇţůç…Ź?ţxŇzׯ_řúú•­X±O>ů$T*T*~ýőWlŢĽŮěŘľ};kŮCnfÚ"""""""[dS<űöíZ­Fbb"6mÚŕöBŔ111¨­­Ĺľ}űŚŢ#“ÉÜ^HxlAß7ß|NNN8ţ>z˝rąYYYxńĹ%q222™™)îv§E‹áŕÁ’‘ŔŔ@ÄÄÄĚÍŐ«Wqřđá ×Ý €R©„źźÂÂÂ&ĚÍXś±ŰáAŔŤ7ĐŢŢnt=ŽďŁ”””iµglŕ€Qs˙}ܙâ˘"Ią›››Řf¨®®†Z­ž“8ć´ŮZq¦ÚfkĹaďŤÎĺµbO9ś«k…9d­ťĂąĽVě)‡¶ôąË2‡–ę[[úîdë9´ĄĎÝąĘa~~>6lŘ`ńA“{Ek4\ştÉčV#WWWxyy‰ĎÇ˙~ş*++QRR‚Żľú ŔíŰ%Űě666âąçžCvv6>ýôSĽýöŰâą$''#%%Z­â9yyyá‡~Ŕ©S§P\\ ­V xxxŕá‡FHH4 \\\ŚÎi|»ĚiłBˇWUUˇ´´...@qq1şşşŕîk×bĎž=¨®®ĆŁŹ> µZ ­V‹%K– ŞŞJŚ˝{÷‚ćżíŰ·CĄRáâĹ‹F‹DŃôŤ ňś8qÂâ±L®ÉłrĺJ(•Jfâpţüy¨T*¬]»–/var/log/ceilometer/ceilometer-polling.log``. Then, ``kill -USR1 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/ceilometer/ceilometer-polling.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module (currently residing in oslo-incubator), as well as the Ceilometer version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from ceilometer import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation about oslo.reports: `oslo.reports `_ ceilometer-6.0.0/doc/source/4-Transformer.png0000664000567000056710000012235612701406223022232 0ustar jenkinsjenkins00000000000000‰PNG  IHDRýbę|đHsBITŰáOŕ IDATx^ěť\“W‡3 +€ěŤ,E¦¨(¨ ([¬ ëÄŠ«­¶ÖݶµÓjŐÖEëÄ˝nEÜŠdČ^˛÷&$ä;!.¦Śěś·ůYxsÇąĎ}!î=÷"›Í&ŕ…@H ) @’‚1â‘@H $Ŕ!€şź$€@H@Z î‘–™Ćq"$€@¨{đ@H $€¤…ęi™i'@H $@‘$őőő•••555𦠵 Ńh$‰H$ĘËË+((ČÉÉIŇxq,H $€@§Ąîihh(((ČĚĚĚÍÍ-***,,,...//g0ížL&úQUUUkĽ455 tuueddÚŻď"$€@@€(.ń{@Ö$7^IIIoŢĽůXâČdE%9ş¬śĽ¬śMV–LˇÂÜPehĚz .FmMmMUmMumuUEiq}ý…«A €LLLLMMÍĚĚ´µµáŽL- $€hF@¤u¬ë€Đ‰‰‰yőęUVVW˘‘É5-]u-]5MťÚJ=Ô•”‰ÄÎ9*ŐTWV”–”ĺĺçć˝)Ę͆;\4JJJÖÖÖ666–––˛˛˛řÄ $€@C@učXÔyöěYDD,ók UF×ČTż§™–~O }><ź€Ňâ‚ÜĚÔśŚÔ¬ÔוĺeśN)+++GGG[[[đâyŹŘ @H $ `˘Ą{JKK>|řŕÁpŮňŠJ¦}lŤĚ-u MÉŢkť¶Xĺe§'ĹĄÄGädB=ýű÷wqq166n« ŢGH $€Dź€¨čž´´´ëׯGFF˛X,š¬™U_s«~:†&Âuµ)/)JŠŤ|ő´´¨ćR__ßÝÝV€`)Hô§-DH $€šľî‰ŠŠĹ“–iéZÚ;™ZőĄREę€;;=%6âQJ|Č2ew7·aÆáćţ8!$€/ÂÔ=qqqçÎť•XÔ1îmm7h¸¶~OQĆWUQýô şÚ:ťîĺĺ5tčP*•sv /$€@H@ô G÷Ŕá¬ăÇŹ'$$€â1ł˛ď?t”ŠŞ†čĂâZXϨ‹~z?ňQ¨4aÂđţî~ś¸ C;‘@H —€ uOUUŐ… îŢ˝ gÔaŤgŔp/U má"čZď z˘Âďľ||BAě???##Ł®5…µ@H Á¨îyúô),óTTT@ôť!ŁĆÁŃtÁ ’˝T–•<ľu)16raŚ1Â××C?óŹ6¶Ś@H ›¤{ ϡC‡^ľ|)#Cŕ:ÚĘa0¤Íę¦é˘S=7+íÎĄ“H]]}öěŮ˝zőŰĐ$€@Hŕ=Ač;ÁÁ+++ L{óšDWQ•Ľ h`±žßżůâa(ěߍ5 ~ ä G„@H@¬ đW÷0™Ě'N€7$Ě2Ę×Ň~XĂú¤ńňâćŮĂ%…yŕëł`ÁXţůd,€@H ŚuOIIÉ®]»ŕ:d–p7]EMS`ŁbG őáÍçĺĺĺçÎť ©ľ„h vŤ@H |L€_şâ 3¬ń8{ŽçGF-QžČÄw.ž`2ëaĂËÓÓOą‹ňdˇmH $ =ř˘{žŘŹB/AJ/ť/żüršJČăĂ@H $ Vx4ůĚ™3 z4u ĆÎ @ŃÓÚ@tróî7Ä-''gëÖ­555­•Á{H $€ đ@÷\˝z|z`{kŚß|şî¶9a]GŰ8şdffţóĎ? ŁÍrř@H $ŔÝŐ=áááçÎťSVUźYyÜŢúÄ,9{řZôśśĽwď^ŘüDi| $€ŕ)néž„„„ÁÁ°±+=čČܱy!3YßŘüĹ‹ ;VK!$€@Ľ!ĐuÝS\\ô/Íöśěë=Ľ1G Z!I“üa[ö?~,#Ć!"$€]Ô=proAŠőˇŁ'k‹ĘhÄÄđ‚ňš:ÖÉ>ś••%&VŁ™H $€Äž@uśZOOO·ěçdaç(ö „1şrČŐZß(«««…aö‰@H@ętE÷DDDÜ»wN­»xŚ—:`Ľ°ioG—QGŹĺ]«Ř@H $Đ&NëžŇŇŇC‡QehîăfČä6Ć7:@ źł›®‘)¤q…«ű@H $Đ-ťÓ=pôz˙ţýUUUCFů˘/s·Ŕ7Vçc§AWpô)))é~Ř@H $ĐÎéžGŹĹĹĹ÷˛îÓw`;Ťâ['Ž>ÎăkkkŹ9ŇńZX $€čNčžŠŠŠS§NÁY$݉]č «´E —Ťˇ©ETTÔłgĎÚ*÷‘@H îč„î9yň$ěp9ąů(Đ1Łx÷É7ia¨×Dp™:qâ,üđ¸il $€xG Łş'55|oµôŚ,űáďşŠŞł{YYŮ•+Wxß:¶@H 4 t„¸3ĂRd“wfpĆíH,ÓY¶‡Ćľxęâ⢮ŽńŻ;ËË ”@ZZÚ0Çś@ˇóş3 …âîmxÍŰuŇ=L*%%ĹÜş¬÷ú€ÄÖ>2™2ŘÝçęÉý·kŢĽyb;4\*\ľ|ůĺË—R1T‰¤śśśŻ/ü5‹"źÖ=đ']HH™L8ÜKŠŔc¨Ć˝m´ô{‚wóčŃŁuuu…aö‰:D ˇˇĘý˛ÔşCĄ±čČ-ŞýçpwEĎ:´ đ‘Ŕ§u| ggg[9 >‚M70Ô#äHÍ… "$ âÔ{ĐDÜB4Ż-uőĺŠBźđk†ĹžK—.‘)”~CÜĄŽŕ‡¬oŇK×Đ6srrß;ö@H@˛ |B÷ÄĆĆÂpo›ţŠJĘ’ BtFä&88‹ŽIh @H É đ ÝĂůô%ᨑdŚV,Fa`Ú«‡şÖĂGŹŕX»XŚF"$€íéXéőÎǰ¸ŚG"ě$Ú Ćb2!é˝D @H@T´§{îßż.6ýťEĹXľŮÁ.:áoH·°:V4˘%›ŰôdĄ<Äř(|›sl $€¤‘@›ş‡Éd>~üX®l`j!x0ŚĚK; ó˛ Ű1Ôňôžőßť¦ŕíRŹ ŐÜơ¸¸ÖŰ„dv‹@H@ ´©{"##+++-ěI¤6Ëđ‹GÍómSýţ»Á´ž<ůó…c‡ëFťÝ1Ëďhj=żz˝v-íQWôLC‹@H +65 77x/Űţ‚Y}vŘ˝,AŶĂ{×ü°ńÇw‚–ŚćÓźZÍ`XĹ6¬iäë@Ć&ź­Ř“Xů3 9ű>;#˙ Ű=ÝÇśnojóUpLÎÝusű)ŰęL[{±€IhČ=0Ę8Ż:ő×䱜•$Ý)+gÔµ2>Fć…­ł¸™ŃíŤ ¦,ţ'şL.Ô4uŕ]Wך‰­XŤ·@H Oh]÷@VđWŻ^©k驨j|˘>ĽM¦ëŞ‚]Ygťz]É"•V޲m“ź•;˙ôZ˙őaIşغ؝wzůň­Q Č&#OKwţuK×g†‹B}ZčZź€c¬fĎ·•«Ś^ô˛šH•ĺDiĚص+Ę!`ëö©U‰§}s(ĄůöYőłm~Ó÷‡Ő ^ýߏ_)ľ¸&ŕë‹ĹBP>SK;҇ڱI$€@ŇH uÝźµőőőđą+$$Ťq«żsW!¤_üÖcŚĄę÷1ßţô0˝šł°#k7oĎ™ťgN}5çóŮ+f@*‡ěűĎŠYđ‘“.µÁČăď ľýcŠ>|S˘µtÇňeëżžŞM ĽŠ.Ĺ)C´|Ă*/ŹY?LQ"âNß‚7>ľŞ"¶ťÉ$ĐFü8ÍÇ}ÎŻ‹úŞně|" ácjŮ ă.Ľ51żAH $€şD ő<Ü5 ›.µŮíJDů^óĎ\ó‰¸wĺňŁÇ÷źßż{5^»ýφ-µ•/{´ăĚÔ:¦Ď,KL¨‚·oÍđ°äV€Óăržę˛ďżĚ°Ř[]qqq, ňŁ ¦SěE‚ €»Ň„¬·jjjůÍmáńÇ‚ú¸nüjç#rżŐ÷LŇ+řwéś˙r?¶LmT$ga‡,Cn\ßiü÷ĂŐĐŔnT@ě†zî:QÓ·ß®)ŚŘ´a‘ĺŰäCD9­ž2MÔ7ú&˝_>ľť””Ô»woAő‰ýH,řřř”””M›6}ůĺ—(}$všq`H ´K •}®ôôôŠŠ Ca_o4µ>ëČTď>Ş_‡ľő'¦¨šXé‚ a1Ş˛žĆ§Ťé„YCm{k02J 8»“®7Ďb‹ŔĄ‡]ťô“KËRťăôţ˘(›÷R ŞËäĚť:čkYTEEŮV@}\ŤO_sgOłó Żt6[TTŇţ•Îá㨑r­¬÷Ľ~ý č÷޶ÓpŤşË‘ÇÇyGş´Đ –Ä=˝Í"ЇM`[R%‰Çvś"ĐîüŻB!äe\ązwô´Nx"Ĺě\°¤Čţrďµ:ÉvĘő¦;H }ůčž:ö|Ő·ëKܕ·\Čí˝öäĄŐ°u&řKÇĐbůŔźé‚ď{”Tهę\ľ›«>’:ż8.$€Ú'ĐĘ2Fjj*ÔŃ10nż&˙ŢĄ/ü'dÇ´á}dRĂBĎź¸y7™f5zöߡë˝ôŚ>ŰĽ`6űŐöŤ;"­~˝řÇ";™š{űw?,oę›ÜžmÚ~KF—]ٶëI‰šőĚ˝żů6wśQ°üČnż=^íýnĂ–Pׯ¶Ż4Î6L¦hędddŔÁ®ö…ď!5XkâH}\őé00,€Dhe˝'1)IYU]NAQx%)ZÍ\ŘúÔ°k56ôßźźż˙/ůćo á{×=ĄĄĄ`Ä{uEůćÓ'ă°K±" ­.K“iľ» #@é#VÓĆ"$ĐuM>8kjjŞ««µôMşŢ^jŇ45ž˙ţwü u}DŇϦ!˙ĆáŁĘźÖÁ¤]RŚŕ¨ÝÇ˝ą ŇÔÓ”H˙؉T@é#ÓŚDRO ‰î))á$~ «p>eůwQLćčíŮ|Ňç̧ĺŮ•–~’C!×2 ý÷}k´~NŽ“VNZlŘüÁ7»ĘwźőPfĽÜ>vÉIŻĐĎçś>éK?\ě˛ëÓw-ŮwÍWáőÖďţ -C¬ČUš|x‡ %.Č{Ö±Ź&Ĺ?®J5öw¨ÂńŹ…'ťl|ÜŞ%{Ă-ëš›aĘŻ ÎrňśáÎÎÇĆâ×H€çPúđ)6€Hh˘{¸Ç=hr|÷iˇčN^áôÝ®¶oi• ĺ^­[6ĄŽJ.‰/Ń­aÉ*;éÓ$ş–Şˇ’ą2‰X§ŞL¬¨c×f=ȸ—ŕó D`®«¨±+¨'4P&ŽîÔW™D ÷Y¶ń/BŐíSt Wΰô­tKŽd×ufH­YR|?ńSft¦“vËr•(ęžv!á›<#€Ň‡g(±!$€DŹ@+ë=ÉP!k6w˝•ď·g-ĚĎ–Ôg\ĽŹąîŕqw•Ş+KÜ÷Â-éÝľ‘üî+6Ü'iĆs˝3ť‘ľĘ×’ŢísQ4Çm_ĺŘśÂ.6ű=Ü&Bspüíˇ–OçsoŐ’f4鍷ßp•h«‡y۶†¸Púŕ“€€¤hâ“ÂM~IˇňkżćcDĄˇ‹ľP<ąů~%GŹ0«ň«čf&ŠäúÜ[‡ă+™m‡¦‘ŐÜ»řÚől9Śä ?o|QN5ň?q",âěŰד eýŤšß/fIáż+É®ą’›x5‰Ď2T­őhDY:»¸Á&° #˘ó%ł43!Łşô‰Ŕb°šŞ¦ÓOÓmnĆ'*´hˇ7D"¨Ż'í:Q‹".ééK»Č«!$ ščî'+|Ę Ä`˛ÖŘź'ĐRŠ8źćr¦Sg+đť9qŇ?9“ýíc¶üx±°uíCTńçW–§—y9űyM»¦0ČD±ĄµÄîÔ·Ďswś2˙Zďą“ô(dş^CČJ_źŮC×O1ŁÉŰ-UőS ˙Ěď·§čkSY „†’K˙›ľöU5Ş?Ě"{íÂŔë9Ěv0U\?iF;Ő»ňV]]§¶çşŇÖA@éĎ@’G€ÁߏęęŐ«gĎž7{‰Ž±$ µę¶źßń…ÇvşňŰa›Đlů±‡2ýűᅦ.Ž9rçÎťď,ń;˙€cËď \™ŰŐÔÔV­ZĄ˘˘Â˝żm۶ččč uJL ĽÉŻůig¬§§çřńăĹth6č&ë=¸˘Đ5¨E"7łÇ śŔśHQ6„ŽŔ;Ç‘@<&ĐÄŻ™FŁqšoćĚă…ĐśÂđc!Ă…Đ/»d1Ńą‡‡8±©Ž`˛Ř˙ťJµkkk''§ŽVĂrH Q%ĐJ¬=ýHDnşXL&…‚Y)Dn^$Ű ®č‰Ś/Ń€O dO7Ž H &şGVV$3GHÉT´;̆†*•Ún| đ’Š^ŇĶMtŹŚ ç{]MµČ×h3ůŕhëĂ«ŢYĹ.˝:Ůcʡü¦ľŔyŮ' ě}ˇN »ÎÇDÇݱ×(Ç^^Ţk_ŐtŞ6ß 7°`›‹ńv’ď˝aH€€˘$€$•@“­ΧZQÓ=#7?­cGźU ñݎčéŃhÓŰŐyçéŰPSBqŮsîŔhJ ˙ŃÇť:ť.©O!ŽK¤ č©é@cŕ-&ë=ÜOV‘[ď!P´GÍŃ}ş?Ş‚3ö†â°3 v“úeî4ŃËeŠ«ÓWâß{$UŢđ˝ä.gÁ ŽşŽ<›Ë"0łB×yO÷uýläŕĺŰÂ+iű&Np¶ńyűę7sTMM9[^¶e ޲îrkµ5śe,®*ír#X t„ŠžŽPÂ2H /VÖ{Ş«ő…(]$ÍQ“L~?ó´tĐĄâ»Á™ý–Úss>Ęc39¨wëłËn}2Ź)ˇâFIéË?–Žů¦¨šnűů–Ő3lDIUWrfDQQ4WŁZçŽwĹ‘Šqś5´ Nh˘{ 4k®(+îT‚(LŇ4ĂfŰŃĺCűŢ?žá°ÜIE9µISČ/ŃúU×<¦łąKVéŽđv çź˙aâÜŽ÷[4鍋;#0;˘aZ!™PôHćĽâ¨hJ ‰îsŞĘĘĘĄ%˘G‰¨äôąŐŻ˙>Ž÷ Éw[fGË8Đ<Źé››&m‘@´eÓ=˙,;‰ˇZ×m´Ĺׇ2ë"Ą{g¤™î¨*2”VÂŢܡE"D@GSÎ\Ď–«™(zDh’Đ$€řI yHřpMKK‡ä‚ĘŇŐŃÁđ™4`Íöű«Ü·ZČ3“šć1eľK¶Az—p”Öp´ĆÉcşăzö<3Cvň… g –~iyLý?î·ţÍáÉßg¬ÜúŤ“|ůóŰIŞv†"´Ř†6[ď177ţüyYU›k\eŠĺ¤Ś„ćJĎ©öó2Ł59€˘GĘ.jÍuŹ––VrrryI‘˛ŞşQ´ňsÎ:ấ ĆÉc:×wć5÷Ďýíż†<¦[FsěmL8čĆPWż1áhcÓŰţËĽŽĐŘ 5ďż~m%Ź)Ugô˛Ëľě\O`Ň,ć-ë%Zş§¤0B+˝OŤäŘx‰Řě 9b@`Ďž=Ož8p$€Z×=vvv {’㢄Ą{’‚ą“>Ť@˘k©*™+“uŞĘÄŠ:vm‹<Łl­ach ¦¦MéíçămJ‰z;­deĂąK[o¶¶H·p5‘‡ŠúVş%G˛ë–|Ů yrłRÁmô%ČX]{˙8Rehtĺjšşrň 4y*U†BĄ’Éo' Öuµ,&ł¶¦Ş¶¦ş¦Ş˘¤´4;;űă§>ŢŔ+\Ôá_tÂu ×{i $€¤–@ëş§WŻ^JJJ)ń/‡ŚňVŕŐDň»Í6N®yFáŢš=a3˘Ă.\Ý5vĆőwOâÎf}ĆÁVr—¶Ő, ˙q.6›@$µš»Ô¦űű\ Vޤ%¦'Çg&Çîá*C“Őëi *GMKGUC›®˘*+§ĐŮ'˛ľž E źŠňł‹ór rłž5^ЬýXYYÁîü űbťmË#$€­ëýúő»}űvNfŞ®¶şÚ@,Ű"Ďč’W·FÚŽłÄҢföŠČ’ $‹Áb3«ÚČ]ÚZËěŠ+ •ýȉĎ2T­őčF#šĺ.m­R‡ď54°2“_'ĹF¦%ÄŔR ÔŁÉÉ›YŮë™iô졮Ő}q kB ™ŕeÚÇ–kWeYIg1)\Ô!+\0­ŕ"íŕŕ`oo//ĎYÝ $€€Th]÷Č| şçő˧"¦{Z楑ÍȡK†¦*™Š_ď3ęa‘˝xa iSŔlĺ…Mr—nőnkrÉt˝†•ľ§¤°†nÜbĆłM.XÔ‰{ń8>ę)×Y˛žY÷ŇÓÜRSĎß>ăŠĘ=Ěáeec.-ĘOOŠK‰ŹŽ‰ŤŤ‰‰9zô(čZHíŢĽŹ@H@ňÁY¤­Q­[·®¨¨xÖňu°ÓVɸ_uŰĎďřÂc;];˝˝ÔćřŮ™É ‘ŹogĄ&ÂÖ™ś˝·m3«ľÚśôgBĽŔ]:962ţĺÓâ‚\0C[GÇÝÍmĐ AT*UVa×bG`۶mŃŃŃAëÄÎr4KŕM~ÍO;c===ÇŹŹL€Ths˝(8;;ź:u*ńU„•Ă`©‚ŇťÁ‚Žb‘ŹÂs»őMĚ­ú9őěeE"‘»Ó,Żę**©Ř ŻĽ¬´ŘŹc^:tčüůó®®®nnnp4ŚWa;H $€D@{şÇÉÉ >ŁźŢ·rp‚Źp´žW&) ?2ĽűŤ±áÜÓ;×J óHdro[G»AĂÔ4uşß.?ZĐŇď ŻA#Ľ_={đęů .Ŕ ľQŁF’‘éľ7?LĆ6‘@H t—@{şÎţŔČ˝{÷`ËĆŔ´ww»’čúpýŢŐ3…ąo@ńŔňł»]YôG,§ č8ĚĂ~°ë«ç_<ĽućĚ™ĐĐP__ßÁwßŐZô‡Ź"$€€´hO÷ Řű¸˙ţËđ;¨{Úz2*ˡ†$ĆD‰ ;Çţ.Łŕ z[…Eó>…*ÓwĐpŘŹ‹ żűâQXpp0Ý©S§‹¦Áh@H ®ř„îŃŃŃ /Ż^˝Ę{“®ĄgÔµ>$·;ćůŁÇ·.ÁŃtmýž.žÔµőÄw°,ŃÁe$$e ż}ÎńýńÇŢ|i4žťn_8h9@H HO§ßňńńˇ>ąsU2Ě«Q”—ťŢq÷ĘipXvóýlĽ˙±=ď±Č+*ązOÂÂÂ~üńǸ¸8^AĂv@H —Ŕ§uOĎž=mmmłR˛3R„k«čôţ:ęŮÉÝ=č·hU/8Í+Q~ß°¶7iŢŠĂ=KËʶnÝzúôé÷YQEgĐ$€@ť%điÝ-Ž;Ľ\޸ˇh:Ű„•ŻgÔÝ8{čÖ…ŁDiÔÄŮ#ÇĎ×` #w8°ŽĺŕřĽ5qÖ 7o~§Öꎩ|ŞkfŮwÂśĄJ=ÔBBBöďߏî>|âŚÍ"$€_ tT÷€ĚWOO¤8ŕ«M"Ö8űáÍI(ŻH7ű iŘŰj‹?śđš8g™ŽˇÉăÇŹ˙ţűďęęę¶Jâ}$€@˘I ş‡L&Ďś9śo_:ÉdÖ‹ćxxk›ÝpűâÉ—Źo›Ë„9ËÔµÄ8<OČĐää}¦/„µź×Ż_oŢĽą˛˛’'Íb#H $€C ş ‚ľÁRy? ˝(ű„Ř žĐóGă"õ ŚÇÍ^˘¨$y'€‹L¦Śś0ăéś‘±iÓ¦ňňrtŠ] $€ŕ Îéčśőôőa·+#9ž'f#ŃsîdV‡Ł[ŢÓćËĐ0QůÇEOgđďÎÎÎŢ´éŻŠŠ ŃśD´ $€@3ťÖ= eŢÜąT bŘ@j* ĘľséTbĚ ˝žfcüćA f·†ţÝý†¸ĺćć@`Ăšššnµ…•‘@H „@§uXĄ««ëççWSUyýLp‹%;Ú Dh„í--ýž^SçBÎNö-Vť tm;Ŕ%33Üś †XŮŽĆ"$€€4čŠîNÎÎÎC† ]÷®ť•0l1Ďľ ż ŽĚ°˝EEŃó©Ů2Ęע””}ű8çü?UßGH $ L]Ô=`ň´iÓ ŤŚb#E=ą'Ěđ´ď´„{WĎ‚ óh?ôéé Yâ°Ń“aC0""âÜąs¬Ĺ@H …@×u•J]ĐŁGʇ7/€\ŠőĽí´¤0ďćąĂT=xz«ălI$’çä9°BvőęŐđđđŽWÄ’H $€L ëş Ń(KŁAŞÎ1ĎÖΨ«˝zr=1bě4řđ4{wpŢÍkĘç˛rň‡ĘĘĘ÷á ýH $ ©şĄ{ Dp^´hÝpůřžüěLńĹrŇŽö2¸·µřŽB–ÓUTÝĆMŻg2wí Âă]Bśě $€Ú!Đ]ÝM[XX,X°€Ĺ¬ż|ěżÂÜ7ít&˛oĹFŇ08F$€řďu´®ĄĄőÍ7ßŔ9ݤŘČ3űţ.+.Ač™)ŻÓ“â Lz›ZöAóÄÝ$Ąjp¶ \|®]»&îcAű‘@H@bđE÷EEĹeË–Ť5 ‚ źÜýW|¤hťîihh€äŁhxđHÜáâ×Ăl7přú\ż~˝¸¸_}`»H $€:C€_şlU1qâÄ€€*%ěâńë§kkŞ:cËľ~ů´¸ ×˛ź“Ş†6»‘î¦ÉŠ“›w}}=ćí’îGŹ!|Ô=ÜQBhźďż˙öĽ’ă^۵!)FřŽ® ,Öó7!×z—Q"4’hŠi[M]§OźćććJâřpLH $ fř®{€‡ŠŠ ňňóócłÉëĘń˝Âőř‰‹ Ż(-¶vt–SPłé?s‰ŽĂČíŰ·c^Ŕ ˘ýšYö…—š–n×›nZܨ“c_†‹®ˇ1ŻÚěx;ŚĚK»Ůr;"·*ÉkŮzLüúŹ9Ăt„?C7JBÔ3+űŘGŻ_ż† Ýh «"$€č:QůŘĄR©.ŤWZZě†<ţҚ«‡ş–‘ą%ś Ň10&‘É](I3@úôé;Ž—u§ť®Ô­yľmŞßÁ,‚ú€É“­UëRn^ş}vǬtŮŰ7gS»Ň ÖéÓw 螇˘îĂŮC“‘@B@ű\­"ěŮłçĚ™37nܸxńbGGǚʲČGaíÜ»é!G‚žÝ»ž•šŔ¨ëĘćMRL$8š[;´Ú/_oÖg‡ÝË"ôWl;ĽwÍý©Ő 6Uü`ĂĘ‘F°-elňŮŠ=‰Uśť †ś}źÁť‘†ížîcN·7µů*8&çîşąý”í u¦­˝XŔ$4äeśWťúkňX ş˝ˇî”•‡3ZKĘČĽ°uö73ş˝‘Á”Ĺ˙D—5đuĽ­6 a± ’´ŁO«đ&@H ~•őžfăÇgô “ÉLJJЉ‰‰ŤŤ}“š•’Ŕ-©¤˘ŞŞ©Y&”TÔč*=ŕ_9E:l¦´Ĺ«¦ş2;=IŰŔX®ÔVţÝ'ÓuUI„´¬3‡NyřÔUTvXu«żňO¬ő_˙i=ů‡uŹ~ŮqzůrŤţg×ŘQeä9 A‰;˙ş5Ńg†Kđž{ˇk}’Ěű ť=źą#(*xiĐřßő”ĺĚ_Ć®]QkľÜęórSŕńS‹ľ±r žkŇde¬úŮ6żé3MĽ˙÷źcÍąż7® `]Ř5Lđeji÷üŢŤWŻ^Á™>wŤÝ!$€"Ş{ŢĎ  Ř R}ŐÔÔ¤¤¤€ ĘĚĚ|óćMZB Ľ>žEH%'Ż C“/`3‹&+ÇăY,&M¨(/…f}ě„2ë$Ťq«ż;ąpýÍ‹ßz\ü–ÔŁ—ó@×±>3f:Édíćí93‹nŮŻŻaPúąkeßV̲ӂXŹ`k‘˙Ćß'ŞÇÖ^Ľ/«DkéŽĺľôآó3÷羊.dől,C´|Ă*/MŇPú˝+~ÇâNß*ô7ŃúhśUŰÎdh#~ ść®ČŔ¸yé—;ź{{Ş Zř€Ăčž/^ îĘsť"$€€¨ëžŹgHNNÎŞńâެ®®†¬OEŤWEE„‡©ŞŞ‚űµULP9µµŕ6”ohŚŽhla#ś)'Ę÷šćšOÄ˝+—=ľ˙üţÝ«AđÚí6l©­|Ů g¤ćU˛¸Ć1k™ďϸęĂGÄŘ4@˘G@ň×{Ŕ…655UMC‡*óÖąEł@R´šą*x檖}«8M xßĺ~ř»Bű3¦Ľ»­7ńRĹÄwßčĎ1źó »(˘ńž˘ĺĽí!KŢ7Áů‚¨6ĺŁęDšÉÔŐ'§®nRDhßčôĚĎ÷’ tńÚ4v8Ýl×ń\őî,`ďH €äŻ÷Ŕ™/pîŃ6č)`˛Ř][´ôŤá-ŁmŔű‚! (OY1Ë\_KW}{AH@Hľî`?˛r‰n´¨7ć\LHCčPú} Đ$€L@ň÷ą˛˛˛€©š¦¶€É  »¦›Yč7]@ŇY¬Ťş‡74»Ý WúlNÄ ŻnłÄ’żŢ“ťť l ©…̆t†zhhggç°ąq‡¤cÔ˘M˘«%ęźä¶Ó&8rýđąöů˛i{<ŕĄŘŐy_Ż>5ěZŤ ý÷Ĺççď˙ËEž÷tşEş˛*Ôě"=z@Î $Ů˝{7$étCXˇKŕŹ–őpĂ«%Ľ€äpÝ+ ,&“®Âů”ĺßEÓÔxţűßńÖőt«Ť©!˙ĆáŁĘźÖˇň(DWáČĐ=fffđśíŠ‹‹ŁRITŠä/@ ď'»ŢË.+Ó<ŇJźO˘ĂH ; ×=eee0CŠJü ML1™ ·góI˙ť3?„FdWFXúEH…\Ë4ôß÷­Ńú9A:NZ9i±ń´1}2Cc_• ضcÍEVVčúE{#«ŘŐušľ›×/¶/:0mĺž„ú·U{âţí*a˙›»űYY=SËýçý3ëúýŁé¨UX”—^ańݦőcĺÂ|§ś˙ňÔ¶ˇňµz­6;zuĽvă§Z}ęŮď7<—˙bč˘L«3×vąnOźÖŞ%«ŚÂţŘŚtľ‰îzOI '-ÇűË{¨Ž§łşźëˇĚxą}ě’“^ˇźĎ9}fÎÇÓĘ.»>}GŃ’}×|^oýîżĐ˘1ÄŠ\ĄÉ‡wŘP₼gKđhRüăŞTcďŔq‡*˙Xxb7ąUKö†[Ö57Ă”_Aťeĺ8»mÜŮéÎă‹uůAĄ?¨b›H éĐ=rrüĆMŃťĽÂ7č»]/l=ŢvEV6”{µnŮ”:*ą$ľD·†%«`î¤O#čZކJćĘ$bťŞ2±˘Ž]›ő "ă^b€Ď/ą®˘Ć® žĐ\p@™8şS_eÜgŮĆżU·OŃ-\M8âAßJ·äHv]gHjÍ’Âäű‰ź2Ł3ť´[–«D«ŞŞÚ-…o ŤJˇˇÇŽ‘ŕ3 ×=ÜOVZăęź/Yłąë­|ż=kaΰ˝”qpń>ćşÇÝUŞ®,qß ·H¤wűFDň»ŻŘpźH¤Ď=`ön2éű§|} éÝ>EsÜö…P®Ů©o6Sąń6š|^Ťw$ź{«–´0Ű_ţ••çĚęľŔĺQŁ(}x›AH@´đÍC4†ÉÍ;-Căűz —¨4tŃŠ'7߯äčfU~ÝĚD‘\ź{ëp|%łí4ٞz{_»ž 2‡‘|áçŤ/Ę©Fţ'N„Eś}űz´¬żń@łâŰáĹ,#éď/üwe0Ů1W`Ż&ńY†ŞµŤ(Kg1ŘVaDt^Łdb–f&dT7€e$‹Á⪤6ç…¦ŰÜŚOThłĄĽAˇPˇäé@Y,"4x¸]hč±c$€řF@*tĘăŔŹ&kŤýy-ĄŁ:äL§ÎV>ŕ;sâ¤r&űŰÇlůńbaëÚ‡¨<âĎŻ,O/óröóšvMa‰bKk‰=Ü7¨oźçî8eţµŢs'éQČt˝†•ľţ>ł#†®źbF“·[4Şę§@˙™ßoOŃצ˛ %—ţ7}í«jUEöÚ…×sÚË€NTqý¤Ľ¤H&S0%;/ň§-”>üክ"$ 4DÉŽ™{öěŮ«WŻNü|™¦®ˇĐóˇăŞŰ~~ÇŰéŞŔ‡ĆÓäž?żÓŐŃţöŰoˇ»čččm۶ŤwÓĂó\‚ßŮ^*«™Č"+ŻÚÝÝ}ňäÉPć f-hťCg›Âň"BŕM~ÍO;c===ÇŹ/"&ˇH@0$Üż‡ ‘ܸ«‚—¨ŕîBŠšUhOK÷ő©˝6ç+Ďëď<Üšä´¨ä”=_CSÔg…Ďźyř@Bť¶ßWI›ĚĹ÷Ż‹–S‰w ÝS_W+a3­0üXČp±“¬¬HĆy{®|Wúü¶;Ň—óĄŹŤĘ8¬üââôzŽ‹YCĺ•ďě` Úő«^ăŻ+Šş1źăR0㏄HPůjďÜ9ÖÚ‚đ ä3Ml Ź H¸îˇŃh0Z>:čâÓÔUőŚ:Aů]uŐD¬×”@lryqCAAÁĐĐđńăÇüÄCR·é3Ʀ±VIöfČ˙˘;ÂÝÖĽ•_Wl›Hné×-㪊ë*–>.F–ť8 ĘKřŃf·Đ`e$ $ÜŻ™{`›ĹlcÁ\&Pl‡Že\U*¶#.ĂźDď;—&''żbĹ MMMaľ:i®ÍBóß^ü=o Eď›ĺ/9?ÝĚü¸_ýŞgş¨·húŤóŞŃUŤďÔ$Ď·]hţ{Ü…?˙ém±ŢUrŮńŰSÎ1GXJĘ»wy˛ÇײPEo!}ŕźóe”×nq t:XN(}<Ě|ˇňâלȧu…g~ŰncŔm|`ŕő»EŤ ´´¤±;łß˘Źţ°ŃŔš]bęýIAîÁo6č›Ŕ·K-Ü‚ŔěśşĚŇk[‚ěű~ÁiÓlÍđµáqŐŤ÷[¶Éą‹@<# ẇ»“‘óŇĚ䣭 ¬Ź]zu˛Ç”CůMĎyϲO@X#ú13oţĎ}ĚŔ>ýí—íŠŕü‚e<üuĚD×~c‡ ˙ń\†ŔşN RÔ=<ű9ćsC‹>fůíĐ8Hd9 áMČ…S:ŢWC–| «@¬’‹˙Y{‡ľěżďâî˙đř'Âé`Ď_R8?DD˛,…täČv˛ŰőČťőŃ«–ËĽ\x-ŞŽĐP1{öů‡ć>WB×'Ý^˝íÔęż—?•_|~õ©t‚’ăĄç›36™+6TÝřnĂÄCľ[űř—¨˝î:wN»Nż?K--iě.ů虓IĎâ·ç\ă´ű„ÝWl>‹IÜ‘wÜ™qéDŔŮR»öáĎzţ™d´ ňÁOO¶:2Žîş*şÔTË6; !$ĐQ®{9žµ5]Ty-)µ IDATĄŘÁr#7?­'Gź˝µ¦ˇčéŃhÓŁÔ›g‡ě`s-‹±Ëď~»)fâö{qW®üʏoÍ­ü†ÚČß˙|ęąéZÄąSe›Wß+‰MżÚFÝ;&-Ç€wDŤ€h‰¤Ż­©·üë»Áîý z*!ş÷¦µQWç5ÂĐÂXgŕhŹ˙ —Éľ“€»`*N[-Ťd‰Uc?}Bf«r6#/=®Nnč”®š¦ć&—ÎtańW˝edĺ”d M ]I^YŽÔPµţx™ńs·ř™÷1P·ćöď/–”čĐ1áÓÜnwšÎ?ÖS‹FŃ4ČKťP!3hýT}e2Ył˙€1šěřđ‚Ş˘WëöjNźp±µ]O-Ç1ľ‡×ž 9–Ĺ>-Úµ'íAâM@:tOµhčE{ÔݧűŁ*8MCqŘ™»Iý2?wščĺ2ĹŐé«ńďůUŢđ˝ä.G@žQבgsYfVč:ďéľ®źŤĽ|[xE#mßÄ Î6>o_ýćoŽ–ĽőŕŢĎŤd$%cCůÚ Fí›»ŹÇxŔu×ŃúѡŻ9M ýŞ«ĺA§Ó…n Đ>Ń=o핱°0ďO¤(ÓJ/oŮiďôµ˛ÉbŁžW„şÚşĆÍ(¸ä{ëż=ÓI”ďYçę+lš‘­Ź^ͱy›>ű=ěTx~Iś…‰ĄZs˘šÔ×1 ˛ŽÎšďz#ö°ěeB({_Ĺmľ‰%ŤÝ)őŇÓä6C–Q“'(ľű–$Ł*O¨­¬ŻI‹ɤň2|÷@Ňu˛6%d_Ixű  e›oG‚˙CH Űš˙w»AŃj€»ŢSSŐ(4Dŕ"iŽšdňű™§ĄF(ß Îě·Ôž›óQúŇÉA˝[·“]vë“éK95ĎH1óoü}OwÖ6B^VąĽ{ΊQVU“P›᯲T7ή÷´>Ő"sWdEç9§S?¬“VĄ¬·uK]ßß cˇ¬$S˙pÍŻÓ˘?p¤PIď]ź?|A7ßry•ő?×ţ;~bň? Y-ßĹS¶-łÖoúK‘U iä@Ż|đť&É)€»su»AÝÄ’Ć>ÉÔžÖPŤLiŢ;łş˛ŠPaƲfŮ*ąµmµ)2" $,ÁYí„ř,ç™§2™ GIäřź­RôI¸îQSS9¨(-•™ i šałíčňˇ}ďĎpXśÚ$}黿P[\×ôĄÜJŚÜKË—ţC_vh¶Ą.î˝ŰÚj–ŕ«E‚»Áťîě®Wě©3DYô4Žăf¨Ž{x$SfěţŮ«F6ĘvŐ“šy˛QÔMý¸xŃŹ eéÉç]ükŰ$µźďű+}̉¬›ĺuE\OčĆ7@ ÁrĄ˛*ŤL¨ifÉÇŰůš¬@W$Đ\7ąˇ/,>˝żňÚJdBy×Úl§;|K" TVVćĺĺ6^EEEĄĄĄp§˘˘ţ办VG Ňžh:, 4^đKX]]ţŐĐĐ€ó  ŤZ­%a7%\÷€¶•——//+™i#*9}nő뿏ă˝BňÝ–ŮŃ24O_úÁÔ·żŘŮ\-Ô"ohËôĄ{ţYfYv9pIú—G~rR'ŇĂPą*«IPˇ°« óÚę"2§˘”3#đó&2ó‚†4! ň˘§‰µ µŐŐšŽ*…űĂĚŽÜŢ@Pý„ÎŻL‹ż#ëćŮSŤLR62źőµ_ŇĄź¶<.¬mŞ{äŚ-l(źÜÉŻu6lüáaGÇ'ÔVXĘ“Ą]{ndŤzŰQď&çŇĚzis…»¦4±XVdʤĹë!¬Ő ’’’¤¤¤¬¬¬ĚĆ«Ľś+‘?”¤ÉĘÉĘ+ôĐÔĄŃd©2ś.TRŁ”Ă=ě†6»ˇ®¶¦¶şŞĽ˛*?ż€Ĺj’şLˇčęččëëĂŮ…ž={Q(’©$sT?2 dóň Zy„t „Ϥk¶ďŘ_ĺľŐBž™Ô4})óÝÚ é]žQZcžŃ^'}éŽëŮóĚ ŮÉ6ś5XúĄ=¤/őo2 fĆî˙m&/=Ć=đ–Ś®«Kí˙.dĚ^nT|=$ŰaŞ…H„a+/-ăšéž´7U÷ž iZ¤®[]MYSÖă˙‰—虓3îcCz~úß'óľ·VĘŚŢđó}u%ÂÝô'©–m>c¦?ZĽŕeß3k¤K«Mą{ý@:Ą_ŕ{?ž·ŹIĂćűi*#víűŞ÷Ś@'•úÄg߬Ť'žľ¨7•Đ^»öž(’šőşŮęΛţ]®5-pą0íđďбą|oŽ—HüaŇžńřž ÇÄÄ$&&‚âEť·]‰Ę*ަ}lUÔ4é*ŞJ*j𢌱2‰ÔąŐĐ@đ'(ü6†ËJŠŠ róň˛AQ=zô:˘R© ~ĚÍÍ{őę˙J’’|ÝŁŁŁYUQ¦@WäóÚv_ŠV~ΙăC'\·”!Đ8éKçúÎĽfbâţążý׾tËhNŐĆ<Łţg uőóŚ6¦/˝íżĚëŤÍPóţë×VŇ—2˛.ě O*Jóľý4AÖńÚ~q©Í×ß ýüĎ ‚šÓ7ű+7ó(hŰL~ľSRݤ¤ô~§Öä ·ńĄđâg·Řö2qË7}©pv¨é%v˘Ě'ë Ú÷gňÔ: ČY-űiŃfĂgůcO-ńÚV{yJ[ł®â2íć™Űť7×Âź şĆľß-ÝÄ6KIIüwQQQoŢĽá\^‘BGŰŔDS×@MS‡»śÓM&°>DÓÖS×Öű¨vyIqa^vnVZnfJrr2H®Ë—/CDKKKkkk[[[ 8Ź"áyIa:!/)d'3mľˇ©E7ź¬ÎLfýî k,z÷^ľ|9·Ař9Ź‹‹«©ázKđ¤l¤=đC‘‘‘şG|T>ş:.z0/i{|Ĺá=ĚK*‚ł˘?yňäéÓ§°ĄćQ¨2Ć˝ Íűč™)« Á+ ľž‘—•ž™ň:=1¶¤ă, ˇ€űôéăččhoo/ľ‰†$˝GOŹ#f‹ósP÷ČĎyIAě4sç…k¸ÚÁ"bž4Á]Çn6ŇŽ‹i@„cD#PUUőđáĂű÷ďçććB§ŕŁcĺ0ظ—µ®‘)řÜĚŚ–Q©2úĆćđrró./-ő“÷266¶Ţ>Ü·oߡC‡Â.XËŠ"~GL†^6?;S0Ýa/ź$PĂ™ á‡ýý¤ˇŇTEŹ4Í6ŽUT¤¦¦Ţľ}űůóçőőő°uŐËĆÁÜşčŚÎzę`ny¬{™ççyţUÓP]PZ-Ż˘®@˘őő” y°˙/Î Ą®]ő©a×jy[ňňÄyk¦.9ubĽ^ł€]›|ě·ąO†];÷Ł'ţŻŻĘň2Ř'îׯŻĆöşBEOW¨a$Đ%/_ľ3§ó%ÉŃeř(żĹĘLI žćť7kđŠĹćŔG`Ů#$äbMM5řÁ¸xއĄ'ĆOß°»Ű^/Üú÷ßa˙łĎ>ÓŇŇ‘áK‹î±łłÝ“÷R0şÂ;);éÓ$ş–Şˇ’ą2‰X§ŞL¬¨c×¶H/ĘÖ6†¶`j`Ú„‘Ţ~>ަ”¨·ĎYٰEĘŇÖ›­m Ň-\M8.EúVş%G˛ë–ź\ZddĎ^ś\[šÁňžĽ_^{Üż›—X·ş&Ä«‡535QW sÁ«±ť®€<·000··şk!ö @$žŔöăî=ĹÂnż·Ú·GđďR(Ôţ.ŁzŰ:>¸~>>>zýúőcÇŽuwwU$xcšő(-şô¦‚‚Br|”“»÷Ç™śů7f—H~7Ďśä[-ҋ½5{ÂfD‡]¸şkěŚë˙îžÄ5Ş>ă`+)KŰj’®sS{A2F"©Ő”Ą6M5ŤŚá¬ŁGgĘ/OúşxËÎúx“c_ÂŘú÷ďĎ?ěŘrGŚ7~‰Â/ ŽX‹e€€ež›7o^¸pBňô¶í?dÔ8đü#űyk*ś%ňśěεw.ť<}útDDÄěŮł!yo{élkҢ{Čd2—|đŕAŢ› -=ŁÎbâ]yŮéE—ô¸ş5Ň6pÜ%–5łWD–L X ›YŐFĘŇÖŚaWÄ\I¨ôčGN|–ˇj­G7ŃŁžŮ‹’6ĚV^Ř$eéVX«jý"ÓőBVúţť’Âşq‹Ů'7ą>´˘4úTĐ»ďńżż7˝(ł vҸ›Fٶ]^aßéóřđçNQ^v^VZAŢ›˘Ľśâ‚óCţĆš&H~řĐ…ŕ¤Dé}ś®ęň (ƬŻg±š'{„°1 y-`SB1AŠ`G­cŔ»H !€cę°·I'ŔµĹĹs‚4/ó´śřUďę=Ő´Ź]XȱŁGŹĆÇÇĎš5‹›‘şea~ß‘üĽ¤ď Âa®µk×–—WĚZľŽ'Élů=7ożę¶źßń…Çvş*tĽŻKĺçŔuđŕőzrűĘĆŤgÎś9pŕ@ľöزq)Ň=0řÁ‡\Ľýěľ„é…áÇB†·ś\>Ţ)/)Š}ń8ńU¬Ę@7°ŠâZ÷˘Éuzk¬#†Â~9Ľzš[AaŘËÎHNOŚËHŽ oĽ ř^ż~ö k1íWG`b$€xB ++kçÎť………fVö®ŢS`˝‡'ÍJv#đǰýŕ:7ÎÜ»w/ś-ť8q˘ WîĄK÷@Ę4Çţý!’Ň›´$˝žf’ýlńctज˙*6âaVjŔ†+= Á¸tŤL`Ą‡=¶Ú&Dy70é /ařArŕäŘHpŢ‚ Ü€†6&Á ˇ­Žo"$ ¦bbb‚‚‚ Ć cě» 椰˛ji6¤ťźřů˛«'÷Áń·üüüůóçĂé––ĹřqGşt„ř {^<ş…ş§SĎS}=#>ňIÔ“»°Ňj]ßÄÜŇŢɸ·‰DîT; h?@˘CrŹCö Y9…Ń~sĄ$ 3źŕ7§ç˙;—OĹ˝˙ăŹ?–-[&€\îä~řOăŮf! lT•—ö˛­¤!˘F )Ŕ ĘőÓ`¶–śÝGŽźaba+Ëž ö×Á«Ä]EŽĹĹĆ@śnX†“_čřĚÂÜFžĂuyŘ&6%HUĚ;Ď ĚĚ̸i›ٵ¸÷uţüyXQ†µŠ±3Ô4ńG »ó ={q7c٠Ɖ­­-żW}¤n˝ŕB +++Řš…“Ap쨻“&ˇőłÓ“!ľxaŢ»Đč(»ĂŢÇÚý“ÉH!Ň!\SÄý›”®ŻŻ/8¶wö|™č-DH@``™çęŐ«=Ô5ÇÎWTXżß‘ăP99…{×ĎmÚ´iĺĘ•|Mć%Ťë=đSř ,.ČícÚů›ˇ]ěž×šŞĘŰ—N<ş!s¬{NžŃ6a˝GěGCGßş˙ •š‘’ř"""66~Đé§űS‰ë=Ýg(Üp˝§ üĎś9;\=4´ĆÎXŚ˘§ ŰŻ˘©g(Ż ř:&V} <Díoż|—ß•RÝŁ˘˘«ôŻăbTT5Ô4…ś+¤Ë“ÇŹŠ ŃĎŻß›źťľ2“ýaŐDÜOfBThŘů‚cöµŐŐIŻcď?xűw°ć'Čc“ü)ᶉşG¸ü»ß;ęžÎ2Ľt鬫jhűrDş v–_‡ĘÓ­]éő«/## ŻQ‡Şu˛ŕÎwŇ0ľ?~<śš »ü>¸0ß»íj««®žŘzţHCkŘčÉľłC.Ń6ąÖÁg#ĆúůL[A€.^Ľřëożĺäät˘>EH@Š €› řôŔyđé‘Sŕ×:„ţ0tKűA.ă!Ó٦MUTTđ‰”®÷J®Śz 9ˇ M-řWŚÚĚJM9ňoAN& đţlAă! ÜţWDKű t“^Ç<|řĽç„%]ŚڶLĹőž¶ČË}\ďéřLAdÔ**©€OŹ"]ąă±d×pŹČ%ĹEAlÇĎó„ŚŇ»Ţó1jÔ(]]]ß Ű:]›‰¨Ĺ~v÷úĹ#˙ÖŐV5nĚ´y°""ăj}Ü:cüćQ¨´#GŽěŢ˝2Ă·^ď"$ ő’’’Ă!ÖŃ~ó É Ôóps×ĚôôtČož ĽíUŞul†ä 6;,äřÇ9Ăy‹X”[ WŽď{z÷,„Lü|ąí‰\ći9°¬5eÁ—°¬őôéSÎ^-Ëŕ$€¤śüfŘąs|FŔńôđĂŕě1ηGGGCÎWŢv-ŐşPB:'7778ŘőřÖEŢ’ýÖ Őů™}§%ĆšőŃ#m?ŐSĚűł…v†żÉÎţý÷ßEĘĐB$€F ¦¦f×®]••Ă˝§j ¬_ěKrAÄ8ř` ˝{÷î˙Ű»°¦Î.ś°÷Ţ K\€ Š Ąî·¨ŐşW«Ö[Wm˙ÚZ÷ÖŠVÜqď-(*{‹ Ů3lňźDF dÜÎ},…{żqÎűÝäľ÷|g–¶Î{Jpp653 zšś-Bd >¸ň\ôÝ™—ťáě>hř¤9ŠJbq›'8ŇŐŰs”§×´˛ňňíŰ·í‡ŕŁx" ŘlöŃŁGSSSˇ‚&f¸• ćŤg·„ᓿčąÓ§ĎÄÇÇ7nв3Č{(°Ű5wÎy:B™Š-Ă‘\˝R>Ä^9ľ¸úŹđvőŢĆSůŮ9:Ťš:>`°‘ /äZJ”@ÄŔť;wŢżân‡‹c|S@Ô44‡Śź e°:TTT$`/ţÍ÷pđďćI“&AľľŰŽW±Xü!#űŐ¤¸Čëgł)”a“ć@Ä ŮŐ‰üŕĘăAĽĆŮłgˇ°—HĆÄAD€¤€/3äeV×Ôä5µŤ¸<yĄ`“Ńuŕl˙÷ßÁ×zQ‘÷Ô`Ř·o_www¨\ńäÖĄÖĂJŘŔ›çĆąŁPCš0zżî2iéŚńY¬©­{ůňe® † bE ¸¸řŕˇC`˙jśTë\8¸€tuígŐˇ3T—‚"!váÓ yĎp¦L™bii •8CEéBĹ} _¦[çŹŃéň٦Î3±´‘đěÄź^ďĆř,Ę;زŃ_`”@Dޤ·(ČĎw8ŇĐÔBäă€-E€ ‰gá+Ú˙Úµäää–RÓyĎ!9ŇÂ… µµµźßőOŚ o%˛Dëž–”póś/×ŇcŐX›XH‘>rę°ú€•2´6Ń O#€l"Á Ż_ż† ]]!©B c{ŚšĚb±`·«˛˛˛5’!漢žĄK—*)*ŢątâSRBk%T_Ôż~滪jŘÄŮÉiŔŤâŰ!yăéÓ§Á±‘cĽŠ 2@~~ţ©S§¸ĎWtë!ŕ˛rřhĎľź>}şrĺJkÄCŢÓ=SSÓ P)ěgŽČFçŇ’˘›çŽV”—{ŽťffŐľˇÂřw# ‹#äf…/ČćüáÇF×ń"€Č đŞÎ=îÇŔ~Š Ş'*ązŚĐÖ7„Ŕ[HĺÜb…÷đ€ÎŢŢ~ŢĽyÁ9ŕÔá¬ô-Čs ÂÓ€ŔäfC4¦µ}ň.eI!Ydh›ęž={ Ž@ĘŇŕô" fBBBŢľ} o†ö]{Šy*ľĺĐčô#&V±Ů'Nśhqý ä=Ľ K—._=»˘ĽÔ˙ÄţŚ”DŢŤČpöÉ­‹©I»ąBö-2ČK Á¦ Ë  đÁ™L&$CQD@¤@‘>Řá‚gjżaăE:0&zŚĚÚAy餤¤‡¶ltä=Mâe`ÁęŐÚˇPyjb\“í|!ę}`DđK}cóľĂĆXLâŠćčŇŰľ[Ď„„„sçÎWJ” @Z‡„pćććşôńÔÔŃkÝHŘ[¸yŚPVU/ź‚‚–¤FŢĂo‘śśś.\@aWś:ňš_Sâ]On\TRQ:a&ŤF'ž€ä¨ßĐńúĆfđbQäĄDaČÎÎľ˙>0žn˝<„é‡mĄ†€˘˛ŠŰŔ‘eee-spFŢÓĚĘuîÜůŰożUVVľďúő“Ű-»™ĸ n=w/ůÁîĚ ŃSÔĐGŻ‹¦ďÁă|ýüüĐѧ@bWD€ @Ň řŞt8ß şBĽÄęĐĄ»ž‘é‹/ †ŻëüÎ!ďá‡÷šŤŤÍš5k ôőÝ‚Ľ•ĺÍ÷‘v‹Wodg¤vęáµÖĄ- éç‡đ.÷!cKJJ NˇH˛¤“T`űÍ›7ÝĂ>ȵ¤P»× QŕÚ|ţüya%GŢ#b?üđCBTč…wäçd ÔMJŤŔ‘ůýËGÚz†˝Ť”’˛6­}×6Ý˘ŁŁź@FŚ=`Ăďí9ş c@VŐͬě,m;FDDDEE ĄňAáRUU…”†Ă† Ňsîđ6đ´§dŰUU±ś…ý¸#'BI ÉN.Ëłő:Šő\ĽĺŹx† IDATx’›É˛ž¨"Đf©>¬ě±$I×ÜŐc8…J J~ä=BŔ%''çĺĺµhŃ"Hčüčú9(ű%Ü…č/‘¦oź?ČÉLsěîÁ~™°­L˘¬˘Öű«ŃĄĄĄÜ¬­čŚz"2Ť„qÁSÓĄßW2­Ą,+§khbŐŢ‘Ë_×yŹŕXŐ´„Ô>ëÖýęččř!:ôôţ-1ˇo„BlŠoźÝ nŔ‚ń5ş¸€e’› kVµ 8"€´ŘŘXŘąngë odÖÚ±°żôčŢw0W…ř@Ţ#0TujhhŔž×´iÓ¨”Ş{WN^;y ?Ż\ݬ¬€ŔyĹ–(†}šA€ ™ Áěwöě٧ mfĽŚ AŕÎť;0Ow4öHmńMQ]@^‚‹ş€ł 暴ÍŔ™Ľ_ż~ëׯďÚµkrBôŮC÷ň˛Ň†í$řwVZJtČóöťť%8mŰšJGߍٓDN>{ö¬miŽÚ"2„@ff&¦0±´´®2¤VUĄ«[Đ’0 ¨?ňâÝLKK Ü}–-[¦§§÷ţĺĂ“{˙xűⳲ‚wk1ź |xܙݿ•„ĹŠtĎţCˇb3xŇUVVŠu"@Ä„<#!'E×~b‡•$Ŕ_Áęüö­€é›‘÷`uŔ×ç×_őöö¦ËQ_Ţ»ć·ç÷÷ŻK8ÍKŠŹ˛°±‡D"P ‡hHÝąG_ČařôéÓ¦[áD (‹ëůóç ąťť#AED±„D KĎ~PJ\ŔŠ]Č{„D·‰ćt:ÝÓÓó÷ß3f ›Ĺz~çĘ;7˝ĽPRTŘDźz ą¤)=úń¸8/Ŕ¬Ş¨¤|ăĆ 4ůđ‚Ď!„F (( ‘:ş¸»ˇEáFŔÎŃIEUŢEY,Vłť÷4 ‘ ‡ţÇżŹ7NIIńíóű˙íú ÂÝÁ#Ö<żŮé©ŕcdnÓÁŔÄBq±iKŇÓ©{°©ľ|IĐ4N-Ő ű!˛Ź8çŃh4ô”Ą•–ă,¨KaaaXXXłz!ďi"ˇ@1Ż!C†üľyó¬Ył,-, Ü=ŕÔˇ»~ÇçĚOIB'@‡ŔÇĐŞ«+Ç· É ĐągH yďŢ=±2ZÉ肳 m”””Ź?¶kďąÚŽÖmASűn=AMA"N°L·¸îŘůęU}ŔÇ Śo`YÇgř§ˇĄcaç`ic®Xty…ÖO_RÄ  ^]ćÖZ?Ž 𥠯o_B–tpđ°6Cé"ž= €}WÎ3YBJ3šZ‚˝‡Á`¨««óQ ypDsÉĚĚlňäÉ'NŚŚŚ„xďŢ˝ z ˙ Đ7lKAVec kcseŐľ|D‡AőőÎÝűF\E`:÷ě ĽçŃŁGČ{Ć "ŇD¬łđ Ş˘¦^Ň”çP¤=#őăëׯ=<<řĚ€Ľ‡8˘ĽÉîŕéÇôéÓ“ skZR¸ÁLŕ“Ąch¬ŁgŐżŐµt4´tÁ˘ ¤˘BĄňŰ‹,/+ {ý ˛Ú8tĄ¸„‹ťsv¶Óś÷F‹ü˙é $}!—ʎY»ĐĐPŘT†T–Ň%@ľ@Žfř´vęÍüľWůމ‹€µ}ç'·.#ď!Ö"˛®>FŹ Ń@>|ŹŹOJJ‚TxźăRb«¤¬˘ ¤ 'Á—–{©˛˘ŠŹ2+*ĘJ‹ąYśÝ¤’ ą"9ŕđć-ţÓK@4Ă.CĆŻţsvă¶Ă¦şą>HIËůСC‰uźˇ4"Đ(2ç¬í»4ş‚'deUu 븸8ţď˘mç EÄE•——o_}p…«¨¨€,˘YYY9ŐlRĂO¨… ćŮâ‚\n3S ŃÔ4ŐŐLŤą-í»şJA˝Ň7»'Mţ/…˘×ÓŰ»“NyÂÝ€‡—öú|Tzxw†U[©f6x˝ DŢ#…;§D„AľEÁ&–ÖÂôödBŔşCçÔÄ8đ'‚ MÉŤĽ§)d¤p^AAśŕpnHT°zőjŘ34•BŞőĘOž¤P(fßîöŰŘޱ F˙µčż$#ů’ 6E.ďŮÖßÖďyť[EÓďčőó†ßľ¶SĄVĄťîş,˛ĂŻ˙Lz÷ĎźWSŞÚ Zwze»łkWlÎV±÷9´sýHÝěc3z.‰°Xřó¸ÇŢL.Q·›đ×ßLłhTp¬"ůęľµż]}™[©e7âű_ţXÜYSҶk0łYÚvŚŹ|ź––fll,ŕÂa3D<`VĎĎĎďč䊛\’_b3ZŮwyrű2!áĂ{$ýśňma"đMGđě‘Bö-šş‰Ü>)Ośţ©Eˇjş|ď·}÷ÖÉŽŞěĚ kgmzg2~ýŽEžÔČ +VěâTŽ!(vß?÷MFMď«Z™xoí¨…Âg~ÓEą(ęř˛ďK¨ňJ2ž´ËÂ{&ŮÇž_đÉf-y˝{ň4ßĺ˝×Ú°Ň=÷ÚO W_Ë­’²Ű8tYÁc] să”" 0z m-l: Ü’Uu ]ăčč&łáCŁVä=ä[×Z‰ßżżŰJÉŁYNßkÍĎžZ”Ź×~2ÂAg ç7xţ±„ 2)uť{äâľ‹çWÍţzć·ÓM(”OO_çr˛hV§G­˛śő÷˙ćýřçDŽa+ĎpŮŢË7­ždDˇd……fâ´‘s[±ĺűaC|–®źĂ‘îĂ…şGqđî‹ÉĹ—Nĺ9ű÷Ý(ĹwöJřXÚÚC"îZÔ˙@"!Ľ‡*'gfeG$ˇPŃ#)]**ĘÁ˧©ˇ‘÷4… ÎCPěUë ş/&b•¨*íżąx+đáßľ?ĚM3íńÍ«÷íżóMUYĄŕŮ­óş÷¶TwúĎ'YĆä˘ęCß©˝ŤBÓ2Ö‚?Śím5ä(t mřŁ˘öȸmş´ÓŕÍmÜIţ—ť]Ż(ł 6¦B)ż?}‘»c×Íď(ÖÇČt)T„…$L¦íl“““Á“Ž+;ţD˘!…) ‚ň»@Qa˘ÉFy `v–…şSĎ5e‘¨…b[s\fa?¤©ţčßÓ2D?%ąąą:OŞ%f¨ F.fĂ?`6Ů š(ęôçŁ<ţ^µďÍyÍßG&f\6űPz]8iň4ÎźrrĂMVmß©ţů娪bW3 vU%×NÔp'ŻÚ*¤:pë–5ž?TeĂv"ČYO Áţ€r°ă"ĂĂĂ!MĄ`=°"€H`‡Hs«š ‰ÎÝŇÉ0`¶eČAJ<°ÁGEE5Őí=M!Côó\2 O\) Z™rrŇČŽ:CVß+ŕzŐĐu¬M€Đ°*ŠS‚˘ŕśÍ8ź~]:čW$ĺl!]o˛^GäŔî,»$îet7tĐ«"F×´kŻJˇ”(ŰöčăâębL+cÉ«©)Iç~6Ż^ŕ=RZ ś@šA2†@ x"6ÓŽ8—«f]fvňöţzţčĆŮ!0;ůÔ‡z¦oâČKIh4Č lžś’!Ň<Ą’Îs‚§(xR(¸›—Ć6Bő]cyŁ^ôË+3ĎxŤ2zŐŇ9«§÷™°â)‹˘ŢwbOs[C(öôŢóÇ×˙ü{”X“nÜ|ĎÉň#čľoŢ’˝;ľ˙ţű[ĺą.ęUŰjŐn F™PŘoľ˙qÓÎSŰćÎ?vţšËŮB’+Aei®ť¦Žd€MHHh®!á®CIyȢI8±P D >ËăęŐ«¶ŁĹŔŔ&8÷Ŕă°Ĺ#H¸cť€ŮZ˙÷†cŹ,ÝT÷ę€YVîł-ß}eéŰRVÖSż=[Ě1ŹCŔěT8óŐ_Oe§îdÓyŐńđ´Çëć8k:YOY{-‹I©J?6 Úôůţü?ŢŁíŐť,L&~ç—TÎC9Ý1łç [u'Kó‰‹v…ÖĽŕňhIŔS°ˇÉb2!30OŮ÷đ„…'á)«¦ˇĄ¦ˇ)-YéVówůďť2 ŁÂ‡÷®ś˝ű8^ŃqřĚť÷6 3µśşmž»;lĎß{ß9ţ~íĎ]Jźř~^Xß7™źäF“— /¸±{`žn§˙ţ1٢>í-®ž+NžěŞöďĎ[¶ßSđXµăřw¶ŇŮćâčalŢľ”óň8¶-˛ íĺË—˙Ůş¶Č"3ĘŮ6cj@@ŔÖ­[[F} sđ{H°.•ü®-[2 mnÜ^†ćíŕ—¦ŢEŃż§5ŘJ­/<±ŕ°ut’šś‰ĺÔg||Ć÷Ť…ĐęłđTěÂÚó}źľúÜČ7iâçÓ¦ăă?˙aöÍ«·ßpţ`çWźSs»ÇIíś_¨şët§*ZOZsnŇšzM¤ö‡‘ąU|d|Ě\\\¤&„s+É—WTěÚąsé˛evvç"$‚Ř\RpÓéęłrĺJ]]]ˇfNOO‹‘'ĺiŽę€Ůsó7Ý…€Ůk?Ęi·ďăę1zÔô˝,U¸ł>ęÎÝL)n/ßú§:`¶«ať€Ůńze·zM©Ł‘se†o:'`¶]ť€Yą~ęOnL>Í ťeͱŇ>ęĚzޱ{VÜ ŘĚ 9Tʶ(| Şp77/99th,w? ¸Ůle~˝ôŤ9ösîşKYs#V¨Z}ȵpmPZKc.őÖę“’)V)z†¦d f[±ZPÝRU]3%5•çČ{xÂBô“PĚ DÔŐÇÁDY)Č”˘p×…(2 &‡µ™ę˘I6H}C [Iwg˝ń_™µ€ú|úÄI¦ˇSý!%ŐÁ ýeí[—Ţ8đ 8esf?śźłj߭ʦ‹˙ľtöč7ú¬Ţ!Ň€Ů=goćţ;÷ź·”f[¶dđťśť•ů wGŢÓśá>_Iř1nŰęÍ,ĆŰ@BT\o^ÜĎ- )š¦6yhŕh«ÔGđµĆ–RD`poĂPźšE2ń mí]î\°• _„Ľ§1&$8k uÚUÔÔI k›>fQ©˛’”Q¦H}ÚĚ}JzE[@}€÷Ŕk ©2bŔlkoTm}Ž!Śkęk0ňžÖ‚+•ţ`é…r¤â›ş2öc,,×>ĚăfO.{ł|ěĚEâ›OŘ‘+?<¸QJˇ?ň™řÝaÂă…ťIöŐ+’••%D"5EęC¤Ő@Yř! őt…đ…©ĄËIüN˘f[ąXÚz0Bfffăq0ž«1&D?µHËĘĘLĹÉ{Eý7˙Űĺ¶®#ń˛şWeŢń;ĄŮŞ‚ő?~¶?QÖ‹ËDáÖÄ*’‘ňŕRź˝gâ1‹”ë×–„ęę^¸“Ňl„„ľÂ~‡ş§© mŐr©krVś§ <ňžV!+•ÎŮŮŮ0Żš¦í=0>ÝzöBÓ#ŰÎÍÚ7òVMvQđ±e‹ýÓč´2¦Ĺ¬Ł?Znš}Ŕ¸—aZbD”⥓ďEĆôÜ˝÷'w5VĘ˝M ţ}WĚ.)7łmÓ"§ścSľ;óyHŢhĽďžĄZ~™sřuA%ÓĐó7ßĺó'ď2čať“ń‘a˙óÖMٕڙxeĺůÝýTĘ‚˙¶ĆöÔͱF4Jĺ‡KżnyóJeÍzýőýö|snÉąý”‡Ó¦đ”ä{ËWżŐĂU]l&Nőęáů1“Ę}ҲI‘ú´ 7ě%y¤>ÜŹ¤şéxŹä•©Á7óüBFŢCľ•.*âě8‰ßąGłçĘooNßđtÄa×Ď ± Ňä=wűNvQÍ9=ßk[ÄŻěÂTőqľ[Ĺüä5ůęGçČ]^8ęhÔŠŢvĎ~Ř_¸ôđĄ!šď÷Ś^rnŘ˝Żg_¸eĽľě‚ŰÓöć,9zkŚjôŽźÝËAe¤kxűííLŹ<0ŇçtĚzÍk;Ę[Ť\ęu‚ŃăĎőě©9Mă)ÉżŻĘŠa#®ä†*Şj wuŔźŕСC÷őáćD©»$Üß‘ú4ĆD†Ď_şt©©› ŠóŚĘ١>Ü'źůě=ÁžĽbPÁä>—Ť@ŢÓ˘źá>YÁŻYÜ‚ŇMĽżsŕçýo» ©™Š¦iˇ¶nůÄryZ^TžI)KIŐ®—™"ENÝPÇBĂNSŽZ®ŁIe”łËRž'=‰]8j3d",g”vÍŞ¤4$Đ&R˝W7M9 ­ăňż˙ˇ?<ŻnďaÍQËĚŃ$ďä'ń‡M«,ÇK’ěř§±Í‰ŃôB^QRŠaŘ…„źPE|ĎŐUéŠ M 9Ş›ĺËXO©s{)ż‘úw‚ ĺÁBGS[(`ŇŐŁH§h©Ř[ńäh–úpł¨Cv{bŞ&a©ęg•đä’žŽc› ś^N®žťyʤW˘őóqź¬Šâç=Š’íśMŽc~ĽdoW]˝2éżEG™ëţ;ă©U|c‰çżpęËýDĄ}ľł8ÎĐTŞ˘Őżă m?ßa}'®>÷yź‹nŕµg>´ăV]Ż…MázRĂi* u-ję°7_×”§$ŤÄh=üMŽ ¤Ěá= ٶĹÄ!ć=;‹w;˛IiZ}©O«!$Ó«gwęC&‰ëČĘźúpż0•«Í±x´)Ŕ:®]p¨«×cĚbsvhSčJVYnŤY%%±Ű{@-ŞFż‹ŐÎm{ZÄá#ĚâĚbu[k5Zeú}ż¨"fÓĺ¶”L{wČ˝uűĐśŠř«żýý¶PŢrÖŮł‚/Őü <°Ľ»•«mîĂWą,JEÜÎĹłö'1ŮŚđ1°‰Wű:I§“©"UIťť›Ă©Â—šQM™ůÉ1I%l9(űÎâR$~ŕ+š4Łů>üĆă{MQI®Ë9ßVdş^dZ­¶-+ź/.ď‘€ĽmݵWŞ&»%% C~‘÷qµřË…f8 Ş«¬˙ ŽţmśbB‡u(ŰLš©ylĚŚńvĄyĎr ßľáZ6oîCŐř×*‡ ˇő™ř÷{kđüEÁjÔ?ŹG}ľTüB7»jźg»{hôůöňŰú]§ůM3n/>Vźźvťë{Ô÷„ßęŤ%iVŚú´ć/99Á}™[ nxµ4ě"xnxÁcO^^B{$%’Ľ‘»Ó»ő†_ßůówYUνťËţŚUÖ,Ë yüőďt{•‚€Ń3/Řő3/‰ LµÚ]>7)ěťâô#;űÉż:Ľň·PşvUľ|ݵ{¦uR­űĘX•˙ěđ÷[ÂX´âR›éŰ~ďúÜkćys7kĄ˘¸hŐ‡ć•-X•±ýß…¶¬w«gírxżgőüěÜK#ë4;ňăpjÁó:łě™8íë :VĘ63~öÜP;ţý•ë 3ňăÔŮlűWĺDż—źá·nHĺť]^>9ŕ0–uőXÂgí¦h\˙mńţl+]ŐÜwsN®Roş†J‰o]¸d·®ďw.‰Ý âS­ÍŤĚ}˛R9ţ/x şĽ<±‘4H}D$#vSř”WPűÄ_&`ËO]ă´oă¶{˛ăžf2fłŽ,ůʬđňčůg˘Ľ×9S©l¦Ćčůzeě迦rĽß«đ<ĎDZ…ţôČé ďâöěđőł6źsđzďw3µĆŞ®hÓÓôĹ ÷ă‡gšú=Ď.ĄŇXęă–m¬’ĽwöÂÓ)3xk)WżYňŕĄÔ“ugąŇk“!çý랯ň÷~á~ěóř…IŹë6»Ük«LičĽő_ÉżýnúÎ÷%#†ößĂBiál×ô;:G–×h⤾-qě©#>&~§m«L®7Ýĺ!'¦éKäńE«¶Á7DŢĂűF!ňY:]6WMuŔi˙DľyŮ„·7?&AZ4 >‘ Ĺ@#Đ€úpÂy$ýť©`ę˝uÂÂ{^ěT-UA1űćúŤwU©źâK-¸®‰t s}y¸˘˘˘ĄˇC‡_”äĚňśŘOéÖý&O©*¤ËkkymŰ:±VĹ’7ßö rצQhÚ=}FPŘůńňÚÖ&Ŕé ŇV­řNa“űiu›Ýc¬? ŁR^ÇÖL™ZS 5şvü’ŕ#ő„aTŇ5-ŚÉ(ŞŇX•ŕ,ɵEŐ×®4?ą\ݻޅ®ăč¬w„Żl IŃăvĹ—;˘©Ę$˛ůmüIĄ3ÔjĎfe…,)%şTV”sWG6Ôi E]ęcjf&“:˘R˛@]ęu*jăB%§Ý|ÄĆ…OľŮřšĘ¶Ł”Fîţ5´˙ő]ٵÓ|żZĂ'şBA×ÎÔ˛ëźĚ´+OKĚP ^Zşâú§>Ł`ífˇš›ĹôP-zľ7@nĘ0Jen\j»“\vlşµ®r"3­ĽŠÂ.M˙XXĎó˛nłvZňňUőfQÓ{PýĹ%ŻŰ^ŁÎřŢń]\_;šš|njaEą 6,·rgŘúăXJ„ôÔŠÚ8ÖyŹu$ř)ůęÍ‚ç#8†âOI‰xu=D§m-őILLݨ8" zj©Ť&§ˇĄ'ú š‘fŮaɉzć˙ţ°‘ŃE§˛B“„Rżućrąľ®×Ě”.O©?ËĽšä-&opű2ľ®…}=a>7ű"Ź‚IwŤgŹŰhg®ŐnčÚYş[}Vľę K—SáPŚFJYđČĂÄŹÖťĄËsVc3<µń©ÖM„˝ĹŽŔíŰ·/\¸0ję|3ëöbź¬Ů ń˙Ť“°îŐ:W® ;˙ćÄI˙ŽůďÔt:űŠNž|fţéş>ËÍŽüą»4âÜ÷‹ÎÇSäY•şC˙Ü´ĽŹ&;ëůźł¶ŢI«¬ŇpůöřO^Dp«)/+ý÷ďµNNN ,xôčŃÉ“'猳"oţ>ëó*4÷ß‹ Áu.|šá%"#šYşq_ÄСC!>n×?Vt&oţ>8껕r÷e†‚˘âśŐżóiFÎKěü«cĹţĎwĄ_Š€ÍD†330(ĂÖµłNńYs®Í=¶µ'Ç4ŽŹq‘×Ož:uj˙ţýëÎ/ç"i(,ĂsŞŞrFY)'/°ôşĺ É†§^×HS•t*Ôfú`=ŃĄ).zö«ońĘý.nŃ»đs@ łěÝ˙ţ şőVđĺóK ¶­y’ËÇn,9„ĘK9‘«**’Č«$9­ÍTTÂĽý,ŁŃi<ŕvŔŃ9ű7 ˛l…%îTvúýőŢKçMZµGnâ˘îŇ"= ł’w¦Üç"Ú=ÓĽ<Ü'ki 1x…n4x¶ÉßF˙^ꔪÜcşz;'˙zLmůŇÍ3íąZÝăݠΨ^ZłĺK˙îk˘ČČ(bQ4Ę Jčúş*ĚÔÇ/ŐF,1W Čéy 7[w/şd`Ż/ÁďÍ#(žĺeŢŁ¦&Ëiaôl;›’Q˘§§Ç­Ź+,qTD µé{µ¸”Ą˘ŞJ­_¦ µCĄ?UkôŐ“Í #`łć°ä|űůÂh‹µY‹ăŐ8yŹXaËŕÜ'+׺ – „TÎ`đë˙] Ęw¨‘űřx˛ó2'jzZťňĄáŢ:đ“]pżŮňĄP­bí ‹SúÔ.Î2ůö¶‡^UXJˇŠ'€·˘’Ž%+*yIź÷”Ug•aŢSKz<==322÷đľ©ń,¨%=>>>wîÜ)(j±—2˘bG€ëËő­;îs‰z‘O ­­ cć‹|ä(§ď6˝sÔ©g…Ě´§g’\¦÷Ňâ”/ÝĽ|â…KvFĺĺ”V55pyuůŇ_Žę7uü’űŚŇ(_Úđ`<\ł=kő©Ç—1?±ř|bu›Ď[[ |5ě-Áż…y0›––l–?¬KzĽ˝˝%+N…‡@]ŇÓ«W/č̬ŞČ±pÓakÂ"ŔŤznc‹öÂ.Y“‚ďś…ů9M¶ôŞFŻŻ?ř2jć ĺ]“Ž5,_úE˘šäŁ5uFŐ ĺUľ4ď˝úWMä)rÚÝű'śK`ö˛Đ,NÉeR´čě’ě Ş‘!6ďůą &lI~ńχ¤Güă ˘A 1éÇ€ôô6â‘&ifѬ™ŘF©¬ÎöÂő­; Ú{Ĺ؆”`T`p¬ 9€řLč™xzŻ/Ăs†˝JĂňĄĚ϶ą†uF(_ęlŇ^/'0‚FٞİtíFj&}Ë®&•SXé·ý?ą|e/EmX IDATĎą/+P(ŁĽIA>e(Fł4&=Đ{,x¸ĘDÂ3fŇą‰˝·…~®Uţ~çO?® ĽYtř6`eü5ĹcîÔá>^s/ĆËHqĺ˛j/Xä=|Wž< Š ňŕĂL‘Ő'÷Iľ–?`U[!´*çâHź}qđ„(·zĘ‚»Ś‚çç_ş`ÚâÉłN„7Š€-˙p⏨ÇžĽľ˙Ýó[î4jŃ:ąĄÓ›őÜ÷ŕ>—tÖŁ•łšššFEEĺegčš¶r(QuWvŮv7©f0EĺKGúä\mTg´ůňĄTGďťë{”čt_yůěJQÉ.‚qŘ쪼¬ sŚE!ôf)PfhŠô@·ÚÄęšĎH’TÍŢËúl]?}—AĆCa=¦-iŻ¶Ň«¦6V¤§Oýő+d5(űŐÁű·µf*ô˛ü"&$ůcUŽͬ©É7)Ďiv’ýÓ8ÖyŃVJ y€÷@»ÜĚtâđä–ÝFąŮLf%w]jµ ‰)Č+$´u<ţ ł˘–:ŹĽŹHzd÷nĺˇŮŁ×Y*J˘KşĹc†ÖžR—ëÝMOQ‡oŇłŞ«s˛—µV‚ôWęä=…µćlĚW#®ť/›ľÁEűÖŽ/•żš±"›_Ů/N'öŚmŁ6ü˛j–ˇą’’˘5íxýONŘš3MKĺŐžřç`šü(ĎÄ"<öJ‰ŻJŘÚÚÂ+ZfZ˛Ń`&_™ĺ8ČŮÚĂ^ÝÉÂĐĂŮzPŻq˙ťś:ń»'­ĘxZůáÁµÚ`ŃČĚKÔK©ŤbÜÉwńáÚqHw§ĺűG2´^ČŽ•“‘Ú®];H1ĐúѤ8’)‚ŹS …€€¤Ć„Mg°ŽsÓk 56&5Üw<÷ą÷ueíěě@ôä„h+@7ă~/8ÁݧN_˙ŕ„{/.Îzňě?}[ců¬ĘĽăw*´T´±‘ĽDkʱ~U|¸z;˘¨z6váă·†Źßó$ňĆŤ_¨GşźŮp[§ő¦~ڇ„čÄićŻ)’ţřŕUâ 8é™Ářjdd”›E&â ,’äeg‚°ôŤuÁ-’Ć㌽˝=ĽÄ¤$D÷ě˙ŮĘ)6Á‹îŹźxv‰ďäĂs÷2LKŚR±´cň˝Ř°‚ž»÷ţä®ĆJ©_^ÔU)Ĺoă˘mQ,yfąîŔuŚŹoyóJeÍzýżWk]Z¶¸nÉRćĂiSx »ĹmŰěÝ= łs2>2ěŢşihńSľ;óą’…ĽŃxß=ßvákYyüB®ł§•BUë˝ă?g5] UĂĘBĄŚQĽGÄt?).°ďÔ©“ŘV@ě#é;Ä8Šôpç„€äädpmV“…Pvá(ëĂäfĄŠ bląJ#ď!ëâé“OdddiI‘˛Š$j€SiěÂTőqľ[Ĺüä5ůęGçČ]^8ęhÔŠŢvĎ”˝é°7~Ř©“‹íŘIţßi YęĺÇčńçúŞźä”,mbŘh'*#]ĂŰoogzä‘>§c†,ť}áâlˇ×KNI·şŽ3óÎÎ'&>»ŤEĎ'ÇGAPŚĄĄĄĐÂŁ’b¬JŃ<- =0¨±±1ü“ÁyăÉę­iK×N´ý—TóĐĘZ‹śĚtxJęëë7V ámŚ iÎtîÜ9""âcL„}·žZNŐ®—™"ENÝPÇBĂNSŽZ®ŁIe”łËŞË‹Ć.µö•ĘĄ]łŘ†ýG(Λ´4qÜW#'ŹiC©Ć)YşnůÄryZ^TžIuÉRŢĂ–UQŐí=¬9»kfŽ&y'?•S›Sł"éřĚ5ÇăËňc“X#˝}UŚĽn[ŇIR‘°bŮ.őĺ'fVď‰ňČÉL+ČËquu…˘i˘WRc!é‘Ň8OkHÍ(őV\Ę‚*ëÜ‚ŁŽČ}éŢcaŰQŔ.boĆJ»÷ëâÓqĹŚ‰Z®ť ”´=ţřmÁ_ë[:qeüŽUÇ]˙·ÁM4U{x‹×>űÖ™´îS]t9ŽŚU9÷÷¬Řř†©Z•/çüăŃeýôô ČÎËJ‡M.ž_ˢ~´tͰ_ €`éłgĎĆEĽ“ďˇ|ą‡¨´Ď·8Ç‹¦QyQ8÷Ó‘ÓC\˝ąôôŰOŕęW™ôŹ’ĄM E׹APtť*׸dé‘]Ë;×ßçR°đ9uʇRx}ÂęÜíű¦›U ÉĘşľtÉ˝•'7öŞţ¸ŠöŹ|:;;‹vX‰Ťö&‚“×ÄÓÓ«¬K sś¨e@rEř®–ôŔ\\[l槤–Í+–^4ăA›/˘”†Żë·ŰőÜľáZđŇuĚ×¶l·]=÷‚]?ó’¸ŔT«ˇÝĺs“ÂŢ)N?˛łźü«Ă+ ĄkWĺË÷Z»Ű«|×Ď˙ ˘ëĐŠJl§o™ĎŘuŕUŕ“cYWŹ%(k–eĐ<ţúwş˝JAŔč™PGGGľRú"’B/ ÷–‘č Uótuu3R?’K*•ÍÔ=CŻŚý×TŽ÷Ű`ţ癨B«Đź9ô]Üžľ~Öćs6.ůöíťçHM ÍT5×ßĂBiál×ô;:G–×”­ň^çÜÄhĄÝh¬ őqË6 VIŢ;{áń—î¤Ö$qR„ĘÜuëšo*RÎŻŮv=);ěU~äÔ`ÝŽśn«– U!w#­F-oJâé)śµ†¨gž"!ďá iNöčŃ#>>L>ťş»KOčęň˘g-vR‘]ˇ;ňźßŐi¶´{KÜýäU©L5—ŐG-ŰŰZ4©ÜÖ…35珙qËÚÚóëYN«·o¸¶cdS‚ÓÔM«üżł3!Őďďí¶Ínr}Gcřů5U¤\Ý÷*.'qäĂ?ŕ ÍxŘžkËşŠĆ ăe~JÎĎÍ‚M.ŘHnJ ž‡ř^đ{čÖ­›——a…DÁ@2µB"–QŁFąąąµ xE **,PÓĐlŮěE×0×—§PTT´4tčđ‹’“YžSżÖ„ţĚ]ănýnlb•ÍÄĺë;sÝX¨ ŠŮ7ż”­¨¶—ó ®Čk[›Ŕ·*ř¨ßÓôÚ¶u Ě&lŰ:ˇäÝş™!>~>65vöŞ˘K7Ľuücădbů$Ą'ejÉ6P yź•&ÁĄž={ž?>ňí+‘óµ—î ¬A@mŕ…ëśß=üűVźi·üÄőę_äüůt@őoŤĘ‹:ŻÚuwUő%î1âŹ5/[űž-Ż9·pĚtÎo#ýTź¨?lńĂÉ»ŤÇ®ÚçѬ!«®¨5#ůźB»%oŢ.itZD'˘ŢÂHîîR$ť-׸Úúőë[Ţ{"’BŔÄÄdófplůŹ@ŕ=`ňQÓčŇňQ¤ŮSA·^­ E•äH§GǨ”Fţ9xű}ď €»8r÷ŻĽĘVđ”»27.µŚÝI.;¶@Ý”qqéŠŰźj˛ź)p*WôiÎ+ Şŕůţĺ{T–Ůč˘E$Ďޞ°ĐĘĘ*\öĆĘ#ďiŚ ™Î@xůŔç9+-YßX¦Š*++bĂ‚á5”ě™{5J´î–Gjb¬MG’ňůúµ&¶Í \ř}Ëv5MZI‘ËŘéZ eÝ5žmE_ŰGĘjµlzŞÖč«'eŃŘ^NÉ 1ZZZŕÖ4Č{šB†Lçáé«©©ţć9d, “Üd–őÝó\ĆIf%PvD  !Ľ>ł««Ęŕ!«dĄ§ÂÖ˙Ä"Č{daőét:|ĘJ‹CžČ‚>„×!9>:=%ŃĹĹ…gŃ;‹Ź"mHx S>Äŕűˇ /B$§8¤çŕŁ#ň>ŕéŇ€ 7×ű—ŹĘËJÉ$79e |t^GŽl2ő9ŐB©GňŞK@bL¸ŚëنŐúH–Źs`ĽGFnÍŇóöů}Q‰¨j$D…@Â{ČśÔTr˘ Žr!mn=ř·u dT˙śŚOąŮ]»v…=>*"ďáÉ.őďßrÉ„>.ĚĎ%™čä—Ĺbľ¸ëa\ŁGŹ&ŹÔ()"€p€ęÜPł))> íâ2yCĆż˝š­–ĽGvVîřńăÁŠ fŮŃŠ`š@ě:ĐJđ¦ŠI0ŃPDh(ä_’qŐH(ł|îÜą×ÎőžIˇP‰)')¤‚”H©‰q°˝%ŕ‡ŠJ‰DČ'o˛E2"yň’ź”83ÂVו+W‚žvéŮJ–G0”¤@WYIńđáĂ!Ă Ý‘÷‚)Ű 4("""<<ô틇N˝K†††+W~±ä^l”@CDĆŹľŹOn]¬¶’2Yé)aožC9¦~ýúµ@ä=-ŤÄ]ŕľpáBČĽ’}Őďä¸$±2˘6ţřźyűâlo­ZµJ¨ĐŃI#!€tčŃŁ‡ŁŁ#Ľ~“Ž8«Ŕ@đÝŁ€ó6{úôé4Mŕ~_"ďihäîVźyóćAlvFJâEßťçEn}Z-}EyYŔ©ĂQďlllV®\©ˇˇŃę!qD P}^ źŢş„Î_ąđ×ϳҒáßŘ-•¶~ýú–őÄ^äEÜś!BrČűw±aÁúƦÚşäU§5’ççfůź<™šäââ–0ěhÍhŘ@HФâ„/FřJ+x»öŽ$ŐBćĹ.ĚËąuḊ˛Ę˘E‹Zw‚ĽGćď“&´łłÓ××÷îmTČkŤnlŢ®­•/łvŔ©C%ŚBČXĺ·Zf2m_Ľ€ ¤BŔÚÚ:,,,.:ÂĐÄ\SGźT˛· aa‡ëĆŮŁ…yŮP‚©59ô‘÷´‰ŰĄ)%ˇl{çÎťĂĂÂb#Ţg§§š[µ§Ë·‰Čmg{y?ŕéíËP{ ÂÜŻzMˇ„çD - Aݶ¶¶Ďž=KJ¶ďÚŁŤ|’heß˝xő>ĐÍÍ 2ҵFlä=­AOúB-7¸ŤRSSc"ĂcCßčšČüž±‡ž„¨X±bEűöíea!QDh5ÎJµż{űm»¶5x«ńăYi)÷®řAĐÉâĹ‹Á«53!ďi z2ŇvI{öě ŰŰaaˇ‘ďJ‹ĆÖP¶FFÔ«ŁFUUUČ«Gw.ť(fäCý^¨˛.x _ŮC5BĆ@˘ö>ÄD†)*)šZ6n€g$ʤęń÷Ű1(ŕ…iddÔJ¨Pδ•C`w™A¬>ľľľIIIjZý†Ť·´sŐ@ŘČ{p4µ´¦O›ÖĄKYŇuAQ!Ŕ`06oŢ\XX8fĆ"Cłv˘Çi)ě›g}?Ä„yyyµr‡‹+ňž–.„Śö‹Čť;wüýý+++-lě{5Z[ĎěşB€$$ ~)úôé3nÜ8˛+…ň#€řŹŹßşőeUµ sżUVÁÔíâCşů‘!ł$•…Ś»P;H$ŽČ{š˝ ¶ČČČ8sćLxx¸ŤÖÉĹÝŮ}|ţÉ“YöúYđÓ»`&577ź4iD±‘Q”@$ŚŔÝ»wĎť;[]^>‹á›PÂłăt\ ęöćy_m-­µkׂ3†H`AŢ#esŕ=đ±OKK“WPěÔÝ˝[ŻJʢąí$€‹ĹŚ|ű*řŮÝbF!TÚ5jd4Çâę@§@dăÇŹCx—]'gOŻ©čă,ůeÍÉL»ě»KNŽúý÷ßC2}Q €ĽGTHĘć8°íőâĹ‹k×rssäĺě»őěęÚ_]K‡ČÚ‚i'"řEHŕ“’˘B(YEáŔ„„D^2”  &L&sÇŽ111®ĂÁěML!eUŞŇâ˘óG¶Á×8”•„D»"TyŹÁ”١ŕĂ/=·oßÎÎΓ‰U‡NνLŰىd«U„¨ÁË0ž×ĺ`íßż?0Q™FE('… dA|ś˙üsKVvV˙áśÜČ"6Ůĺ„Đ­ËÇvĂW:”Ś€úTTT ź<×Ě 3ťŠŕ/B2ý[Ž;ł««ëěŮłĹńeŽĽGŚë'ŰC———‡„„ű3l„˛ŕ÷cnÝÁÄÂÚŘÜJMS[ę——–¤%HKJHN(Lź ¨ĘŰ˝ú€\«âÇDD"Ű·oß^Ŧ óžmfŤÔG,wžŰʇpëS$ ň±,^›´´´¨!ˇˇE WwČ|h`b Î?#uMm(}*,,P…®¨ ?/'(NNĆ'H<żCö°ěíí!ŁÔĚĺÂŽŚíD¨¨¨={öT±ŮC&Ě ·°Ý±= ÷Ö9ߏq‘đĹy™éb«€Ľ‡˙BŕU!€ÜßÉÉÉđV?óňňľt¦RUŐ44´t ’Š*ÄĂ+(*——WPâ¶D;,f%ł˛˛¬´¸¬´|ř‹ óáĐ˙ÚA Ů vŕ€ÚK^|ź !tƦ"Đ–ŤŤÝąs«Š5düLËh/Ýe¬¬¬¸uŢ79>,=óćÍë×;ň鮵,Ď^TT”’’űâééé–““˙±ŞwÄš=€iiiéU†††ąBÇĎ7«6@@ŢëvíÚU^Q^»ąĘ€FRWRę_?sSśťťçĚ™#VŇĘ"ď‘úŠ·-`S âB‹‹‹Á'šĹb“W¸Ńˇ<*viá§8ÜŮÚÖ¨-"€(bÔ§ÁčŃop÷ľ"ޞŹČÄ•‘ź{íÔÁüś¬ľ}űNť:UL>=uőGŢCÜ»%CD &`Æ”†âŢŃɵßĐńXȢeË”‘úń湣%EŚ‘#GBVý– "l/ä=Â"†íD@ ®ÁÍůÇFćVC'ĚTVĹxRá÷Źo\€P•)S¦€±G¸Î­hŤĽ§ŕaWD@6Ś@eeĄźźóVň201oĂ`ˇ:dÁ}~çJhĐS55őůóçµo/ŃĽČ{„X*lŠ "€4@ŕŢ˝{çĎź§ĘÉő4˛sŹ>XÁ”˙RTwç҉ô”DsssW‡ űüŰ‹ü*ň‘CŠ""€´-˘ŁŁŹ9RPP`ŐľÓ€Q!UGŰŇ_`m!'á˙ÓP=şwďް˝±,wYCä="BDhł€»ŹŻŻoXXěy 1ŃܦC›…‚§âPjôĹ=˙ŕ—JJJ·e(x6“ŔIä=§@D} wëť;w®\ąÂd±ě»öp˙j $h•}µĐ>ş~ŽQgii9wî\:‰« ňq!‹ă""€´A>}útěرÄÄD0üô6®ťťcˇVeČIřâţµ¨÷AtmÄC‡•@†ţ€#ďáŹ^ED@„CjxjIDATâ•Ŕđăďď_¶ű öŇÔŃnň·ëWDđ‹Ŕ‡7ˇúTňńń111!‚ZČ{° ("€ €¬!Y Ďž=JŁŃşőň€mgŰ+-)áéíËPL’ďŹ3¦_ż~ÄIÁŹĽGÖ>i¨"€ q 9{ö\VV&”dvväčŇ›N—'Žx"—$;#őŐIq‘@túôéăĺ奇D>KkDŢÓô°/"€ "Đ °ŰőŕÁ[·nAµfpúöcß­'ŤFo¦Ů.çfĄżyz7>âěp988Ś72ôP ä=\ @D@Ö€ŞĚŕôs÷î]¨Ç¬˘¦ŢŵżŁs/ŮŘůĘHI ~~?16ŠNX[[Ź;VÂ)…şW÷6FD@Zޤůą˙ţŁGŹŠ‹‹ôttrspvÓŇŃoůŇëYĹb%D‡†˝~Ţ< p×rt$züňéÝ283"€ @›D ¬¬ěéÓ§`űÉËËŁP©¦–6Ž.îíÚ;eó«0/'âíK(,ZZ\~<]ştĆ–R,&ňR, ‰ " k@¸űű÷ďź1ŠJĘÖö]ě:9›XÚ'ú©.č%EŚřČw±ao3>%Á––†††»»;”R—|Ť­ÖÜ Č{ZöED@Z‹@VVÖłgĎsrr`,5 °ý@â3«öňňR¨`Ő@źüśLđÝ­OI ěŞ*ˇÍ,77·nÝşÁď­U^âý‘÷Hrś@Dh„|‚‚‚‚ˇÄ)\§Ńé&6¦ílŤ-¬ ŚÍĺ$H2Š…iÉ ŕ¸“UÇacgŮÖÖ¶gĎžÎÎΕ§‘ř¤9Ľ‡4K…‚""€´€}üřJśBÎCřţ­!돩…‰…®±®ˇ‰¶žśś(m-Ą%E9źŕ_vƧŚÔŹąŮ\¨â€u§SőAjşS{ç ďi "Ô@D€”@üW|||LLL\\\rr2¸qŐ€ & m= -um] -]H ¤¬˘ ©•”9?›ŞUYQ^ZR\VRełJŠ‹ ósůą…đ/7»¤Q şşşťťXwŕ§™™YSŁ‘P yIĹFDh[@â(zšR}Ż Â¦  Ë+Ôuľ©(/ăÚŤ·§ÓézzzP<ËÔÔ2 ÂOřłq3™9ĽGf–ADh[Wh @ůůů H ?Á,—X,V-ŠŠŠ@ jÄaÁO8  (Ž––1ĂÇÄ´–Č{Ä,‹ "€ „C@Žpˇ@"€ "€ä=âÁGED@â!€Ľ‡xk‚!"€ €xř?! ›]ý<IEND®B`‚ceilometer-6.0.0/doc/source/3-Pipeline.png0000664000567000056710000013312612701406223021471 0ustar jenkinsjenkins00000000000000‰PNG  IHDR R`CObKGD˙˙˙ ˝§“ pHYs  šśtIMEß  2ÇJ IDATxÚěÝ|Tőťďń·&f2a†ü$™! “t –° n ( ]AAş’ŞX…îcmq˝ZVí]ő¶BďăÖßm÷b•âţ(X~x]°pďâš T ů&$ĚHHläţ1ĚÉL2 äÇLÎëůxôŃ33çĚ9ó9“€o>ßď÷˛sçÎťÓşśćFH!!`rŃ=˝ŘĐĐ ††ŞD0‡Ă!‡ĂA!@·ş ÷ď߯M›6Q!`Xľ|ą¦OźN!@HÝ7¦>řű=čItovş˘µU1mmT m11úÚbˇŕ‚z&54ĘY[Kµ€RëtŞ>ŐI!Ŕ±ş1`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`rŃůćîyó”Q4G’Tµoź>ţÝÖ^7ç$IÇvíVĹŽý~]»]­OĐsIn·ň~¸J’tđ—Ď©±˘˘Ë1±rĄÜóć=˙ć=+z’ÔtěZ=ŮśNŤ=Z‰îLă8˙>’T_Vň¸PüűĄ|+Wm^Ż?­čq?›Ó©ÄŚ %ŤďVë>( 9ü4ÉíÖ#FŹG$'źÓżżźŻżúŞÇů!“Ün%çćĂ©«Jöő¸ż˙<ź)đţ4~Zˇ“‡]đóööľ'Ťw×ć­«W}YYŻö°9ťJÎÉ‘-%9č¸ŔűŘÓĐ^›Ó)WÁtăÜÝÝ‹Î÷Ä˙=ëm]üÇůďç#F„¬q_B´đdi©>ŢjÓŐ‹n˝äaÇ®ÂBĺ­Z%›3ĄËkŢÚ:xć™ !Á‰™™úÎů“ýćüĎ_Hň…—‡6n Úç­V«®´TEE]ć0ô'ů†‡:®łĽU«”1·¨Ë0ŕVŹGĺoĽˇ˛Ť›şă_mąób)’”]ě;öżž!hő缿ýˇ$I’{Ţ\c5f˙h˙>őee!‡§äć*íÚ.µÍ..–·¶Nź{NU%%]Žó×ĺ­VK’ţňÉ'B~ŢÎ×Ü[»]oýkeÝzk·Ă©ëKKőÎc? DZěö‹ĎHҡŤU_ZfÜÇPĂÇ»»ŮĹę±.ţz—mܤ“ĄĄ!kŰ›{™ävŐŘ˙=s*íŹCÖ¤§ë4¨s~¸i“\…˛Ą¤(§xąŞKJúÜíÔyń'Ń!ćLŃw~±AžůFčŇćőŞľ¬L#’“eKI1Ž“¤3őőÝžëL˝ŻÓ,f„Íč&ěË»]µa˝1ĽÚ[W§úóáNrn®l))Ę..V\rŠ<óŚq\JnnP¨řý×b±Ű•żöÇjőxڍńS_-ýá’·®N_őđů:sĎ›§üµ?î±¶ł~údPm;ť›kÜ˙ů{şćŢę\ÇŻęëŤkKĚĚ4V™žůäÚ}>¨ôł9ťĆb.ţăO=ż2uNޞ‹‹/ŘUůWÖÇűëc·÷ş.IăÝ}Ş‹˙^ú?[›×k uoóz%ůÂY?}ŇxîÔŃŁŞű T)ßĘ ş®˝Ź>FPz4¨!a«ÇŁO?c`}vś@ťŞ8Ş˝Ź=4Ä4ÉíÖô‡V˘;Sůk¬ĆO?UcE…+*´űď {ł€GĹŽŞŘ±#(´ëËÂ×.ďńŕsĎéăßm5^łŘ횲jĄ2çúşý>۵ËčË_»V’/řyëŐ]¬Ŕ0ďęĹ‹ŚČżęňßěÝ#I:şsWŻ»5}„?î¶¶6§Słž|ҨmwA_Nńrµy˝zçŃÇ‚ş*­ěâĺ} ­ÜóćuüxëĆç umÉąąJr»j6= Ó®s—ävkćOźěvžLĐk±ŰC~.‹Ý®ü‡×ĘUP üµ?–·¶6d7©« ŕ‚uÉ[µŞË˝śó?ˇäśť:z´Ëw/oŐ*IľĐňťG3:(môŐä¦W^VŚÍôľˇ úÂ%uĄĄúxë’:V;î­)?ô…"m^ŻŢZ˝şËtŤÚűŘcF§Uö÷‹‡¬°»]W/şU’ttçΠ€Pň¦ď?÷Ľq­s‹$ůÂ:˙pÔ÷ź{>d‡[ĹŽFGcŕÔK1qń"ٶťBIňÖÖę­Ő«Ťëő¶P:a’TUR˘Ł;w÷˝/˛nýkß5ÔŐu ý×V†&Ťß±ívC„>÷\—NżĆŠ ˝µzMçîŢęsůoă;×ixz_ęjř|OüűŰą«Ëkom­Ţîy_×âÉúWĽ¸|(Núá¦MňÖŐIęýjÇ6§ÓŘďhPÄĎ[[ktMą †,qŰźśE;kőx´˙égtđąçŚ}ęJKőÖ«/8D´îŇţ˝Ţ‚IľĐŞ»@Z=ÝąKŇůáŇNg—}NU ŮI'ůńëË}y籟č­VëťGëvźŔU§ă¨É;×Ř>vţÚC}güA]g™Eľđ¶ľ¬¬ŰĎŐęńÁw¨9}ź˝®WuIéćřždÝú×!ëY±c‡ţeÉRíţ»úeÁ0| IHčďľňËxíŹIĚěXa¸úC'O– yÜ` Şzšď®Ş¤D˙nkĐ>uĄĄŞ*)éěXÎĎ—SĽÜŻúC`0X»Pk޶m_y»=6pţÇľÜ˙ŢPuLr»•9w®®˝óÎÇúW…®/+ë1( őą-v»Ń­×ćń(9'§Ű˙]¦s!ëé×ÓÜ'K/.đ ěĚĽmŰď5ë§OęęE·† oz=T'ö;îíjlj݆ţˇťÝé®n0Ą|Ëőeˇ“@»]ir‹r † Ő.°ö‰nwźćş»Ôű’’›«´‚%ŤwkDrJ݆玝|Ń×dş şCĂÁűĎ=Ż$÷xcQ˙5ćýđ‡ňÖ֩ޤDźüó?‡ĹĎoŃCyň‹]í¸/+öF"÷Ľyú‹•÷‡BZ_V¦şJeKIVfŔPÚÁŇöŐW~N‹Ý®™O>2(őŻ}˛ěPĐĘĚ~}ťçŻ?\ał ĘyZ=mżçąçÍSZa1dÜ˙ąŻ^ĽHW/^¤Ź·Uź{Žßv [Cöeµă݆±&ffv;ż[¸ém'›_’Űm„]m^ŻŽîÜĄę’yëë:Âú˛ŕK˛ Ł>Ő‹@·?„őee:¶sW—U„»b[_VÖ«Ĺ].ěu^ť:śřWá–Îw[*%7×č0Ľzń"5;Öeѿ衾€PĂŽC9őiG es:ĄBÂŃgš<`źëR%çäČćL‘Ĺnďv><÷ĽyJ+®S*۸IV ~ëŐÝvVúçÚëŢ€ÎĚŃąą=°aÜ×PŰŔŐ‰«öíÓŢ˙öhčëJĆzkk•ś“Ł„ŚŚĎŞK1°C5·ĺ…tţnŐ•–×íž7ĎśÓ  @·.‡‹čĽÚq(uĄĄFŕ—Q4§Ç÷ó/ęá­«ëŐđĺŘe×Ó\vi…r«đúC¸SG{ĽöŃŮŮýv­ŢÚZŁţ®Â‚÷ő׾Íë”n΀żOzčäÝÍśŤU%ű$ůÂ4÷Ľy!÷±ŘíÝ~§ŞöůŽO»ŔJŮÓ^«żŮ»G·műý ¬¨í*,4Î×]eĹŽňÖÖu©#@gav^í¸;Gwî’äëúę.đązń"c:˙ţˇ tSURbo×ŢygČóĄäćóČůŻŐ„&ş3»˝Ćüµk{uý} †üçOr»uőâE!÷qĎ›gtÜőTŰ’ÖMŘšävkâ­Ýí}8UqT’ô+ďď˛ň°ĹnWţÚw[Ďę€ń/V®ěöüţů!›ŽëqĺţręčQc»Ű•ťÝnăgˇńÓ t':\.$pŘqw>Ü´Ék-íŹuev¶ŽíÜ©ŻNžÔŃŁuőâEF×Ţ©ŠŁ]VK>0¬vć“O¨ľ´TőĄeÖçźoŃćLŃŤ/ż¬C›6éTE…®1B)ßĘUÖ­ľĎÚćőŞüŤ7$ůB)p8óÉ'tđ—Ď…®‚eΛ+WaˇÚĽŢnCŔSG•čÎôuÇť;§Żżňę“7ţąÇđęĐĆŤ[X¨Dw¦ňV­RrNŽ>ţÝVٶsçĘ=Ď„yëęôá¦Mö˝đÖŚ˘9:UQˇę}űÔęńČćt*łhޞn˝U»˝Űšěúi}çd±Űőť_l8V(.9E)ąą˛9Sş=¶bÇcQ÷ĽąJďÖ'[ßčö>ü_ż:$çä(íZť©ŻÓŃ]»ĺ­­UŐľ}ĆuIұť;őőW_éŠ#ds:•˝|ąq]źś˙~„N¸Úq(­ŹŢZ˝ZßٰA‰îLąçuWŞöí Ů™8%çć*97×·Zđß=0 ź§®´Tžůš˛jĄlÎMxm—}ĽuuzçŃÇŚŻbÇeĚ-RrNŽ’ssuÓŻ^ ůůŽîŘ©Y?}R’očiUI‰ńúń’ŁŃßŘ›0ô­Ő«5ó§O*9'G®ÂÂäëËĘ‚®w0ĽóčcFČކm^ŻŢz`µ˛ż_|ţÚ ‚âĆŠ ˝ő€ďłŮRRş|¶Ź·ľˇ6Ż·Űˇîž~Fm«V*sî\%ąÝ=^Co˙řw[Ť…WüßőŻęOŞ˘¶Vž~F¶ ü,´y˝Ú˙ô3A‹Ţt6 !á™úzŐ—•IęÝ"ţaÇŮß/6޵Ďö{îń-řQX ÄĚLŮRRÎĎá÷©ŽíÜŐmć­­Ő[¬ÖÄE·ó¸ů‡a¶y˝Ý^kŕkťőtśä ýęJK5ńÖ[•4Ţm>őeeŞ*)ѱť»şn»˙îąçÍ3ÂBI!?_Őľ}Š±Ů”čv…„‡6nÔ×_y‚0˙µů?o¨á§­ŹqîεőÖ×é莝Aç äŻAOĂZ/T«îÔ•–ęÍ{Vhâ˘[•ś›+[ŠŻóďÔŃŁA5´ GcE…ţeÉRą •čîXôĄş¤DŤ=®Ýęńh˙ÓĎčŘÎ]Ę[¤$÷xcĺŕú˛2Ő}PŞň7ŢśöTďŢÔĄŞ¤D{}L™óćvétěüłcł)9'ǨMO×č˛sçÎť ő¶mŰ´}űvIRrM­śt"!ő´˛t ěâbĺ/W›×«×ç/ p`X¨u:UźęűÇńůóçkÁţžB»ś`8Ë[¤żŮ»G7ýę•{ńŻÝ]Ç(ŔpFHa­ţßĐě$·»ŰŐ‰ó×®5VöŻf `&Ń”ĂYcE…ęËĘ”ś“#÷ĽąJÉÍŐ±];%IWڰÉUXh„Uűö©bÇŠL‡ĂŢ;Ź>¦ü‡×ĘUP ›3%ä"%o}CůKŠL‰Ă^«ÇŁ˝˙íQŮśNą tĹ›bl6µy˝:S_ŻŞ’V¦FHÓđÖÖęăßmĄť°p `r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&M  ďĽ6›±ÝĄł#â( ě|ełŰź~ú©¶oßNQŃ&L`l[­Vą\.Šý„zĐbµĘk·éŚ5N_[bÔ2b„Ú/§ yĘËËU^^N!0ě89ą\.ą\.M0A‡Â@@€6‹E§IňŘlA`|śEΔDąRŠłZ4ÁťjgµČ•Ę_DZůŃc»ĄĄMU'ľTĂ)Żľlôh˙ľµśm•$#4ś0a‚ňóóǨ¸BB¦×f±¨9~¤ť=˙Hç•ńş.+MYî1rŤ%G˘ŤB ±¬ĚÔ Çą“Ç=®ŞiPŐ‰9ZŁňŁGUZZŞ-[¶(77WąąąĘÉÉ!0€n0-ŻÍ¦şT§Ľvß|MÎ+ă5cú$ĺçŤWśŐB"Ś+Ő!WŞCÓ§úć.l8ĺŐ˙}§LĄ‡}ˇŐjUnn®,XŔdčä˛sçÎť ő¶mŰŚ‰Ť“kj嬭ĄZ†…ćřx}‘nśÚ,1J°[µüĆo˙Ę €áÍ•ęPń’Y:ÓŇŞ·˙řˇŢ~÷żtŕŔM0AĹĹĹt0=:  {m‹N¤ŤQsB‚âm±şĺ¦i„&×VËŮV-X°@łgĎfÎB¦E'!€aí‹ŃWŞnĚE[b4V¶ĺQ(ÎjŃ‚˘<Ýđ—×jËż˝§m۶i˙ţýZľ|ą˛˛˛(ÓąśŽÚŁ˘T1aĽN¸\7.Eë~|!şłZTĽd¦ÖÜ?_çÚ[µ~ýzmٲ…Â0:  ;^›MźŤw+ÚŁĺ]ČĐb\PVfŞžzô˝ţŻűôöŰo«ĽĽ\+W®d®B¦A'!€aĄ>Ő©Š¬ Jq:ôŁż˝™€}rű-şżxŽľ<©'ź|RĄĄĄ€)6*ÓÓUët*?o‚úáBąRůW_ô]îäqzěÁEr$Äé…^Đţýű) €aŹ@ÄkŹŠRů¤ktj”CËoźĄâ%łgµP\4G˘MŹ=x«ňó&hÓ¦MÚ¸q#E0¬1'!€ÖĄŠ‰Yj‰ŤŐňŰg1ĽýŞxÉ,IŇ|Ź‹‹) €a‰@Äň„çFÚ´ćîąĘĘLĄ(čwĹKfÉ5f”¶ü›oŘ1A!€á@D úáÍĚ?€uÌɲĆĆhÓë{%~ D¤Ď'ŚWKl¬ÖÜ=—€búÔ jh<­íoËĺŇ 7Ü@Q „„¨ÍbQŰWH’Zâ¬ú&:ř6ž˝"F_[bşg÷z»$—ËEQD4BÂ!r©ˇ sl†$iLş»Ëk©ă2CÓvö¬ľ¬;ôś§ąQ§›ĺij”÷tSŹçôÚíAݱgÎČîńĘćńbŔµY,ŞëRćŘŃ*^2‹‚`HĹY-zpĺ|=ňß«Ť˙IŹ=öŠ ˘˘ćřx5%$¨9!ľWˇ ctŞRÓ342q”ÉNYb­•<撮᪬ž»ŻZ϶čËúF x˘˛B ő5jk=Űeßłqq:§/’GKň…†Ž†Ĺ7źVLk+7ýŞ*ă*EÇĆčîżů+Š€°gµ¨xÉ,˝°q·¶mۦ P‹p€µX­:9zôAÇčTŤJIŐ¨”49’ť!;%ÖjśűެɚŞ"I’§ů”ľ¬;Ńcpx6.N'ââtÂĺšśÔШř¦&: qÉľH-O\śîżc¶‰6 €°‘;yśfNŇöíŰ•››Ë°c‹p´Y,úâĘ+Őś ¶« KRŚ%VWeMVjş[WeM–%Ö֟ɟ({|bPpřeý }Rú˙TSyL 'k‚öďšś®ř¦&Ĺ75+©ˇ/ú¬ĹjŐ‰´4ĺLJWîäqagAQžJ?ŞÔĆŤ˙¤|Hqqq@Ä!$ěGŤ‡NŽľRg»ůÁ1:UWeMÖU'_ň°áp0*yŚ ‹ľ+É7LůłňŹTSYˇĎĘ? ę2lNHPsB‚ŞÓĆ(ľ©YÎş:†#Ł×N¸ŇÍ<„[qV‹Š—ÎŇú¶ëí·ßfŘ1€DHx‰ÚŁ˘ôeňh5$9Bv ÚF&説ÉĘ™6KöřÄa[K¬Us¦jbÎT#0ü¬üC}~ä°±Ď7ŃŃ:5ʡSŁJü˛AކŮĽ^ľDč–×ć[ÜgţĚlĹY-a++3Uă3RôöŰ˙W7ÜpÝ„"!áEň‡őW^r®Á¬ě<]•uí ŽCOó)}Vţ‘>)ýAC’ýaˇÍăQJM-a!BŞĘ¸J‰ńqZP”G1öľżt¶ůďŻjË–-*..¦ " !aµY,ŞMI ą‰md‚r¦ÍRVv^ŘĎ18Xěń‰Ęľn†˛Ż›ˇ•úĎwv©öř1ăuŻÝ®Š,»bZŰ”R[ËĽ…04:j˝â ­şc6Ĺ@Dp$Ú4»p’ţ˝ä€,X ‡ĂAQD BÂ^jŹŠRuZšNŤęúKŢ62ASgÎŐÄś©ŞcŇÝs§[žćSúĎwvŞüĐAăµ6KŚŽŹKWťÓ©±źNg!?o:áJSćŘŃĘĘLĄ  Šňtŕ`…6nܨ|‚„„˝đĹč+UëtvétŽÍĐÄůßݵ IDATśëűČź¨Ů —jęĚą*{oŻĘ4:iłÄ¨"k‚lŹĆVg“jĺP{T”–.šA1Qâ¬-(š˘-˙¶_UUUrą\@D¸śtĎkłé“«'ę„Ë:ÇfháßÜŻ[î\E@x ěń‰*,ú®ľ÷·Ź*oĆĹXb;jo·ëO“'©>Ő©ö¨(Še2 IIr^/W*Ă3yňóĆK’öďßO1D : CđĎ;Řyh±md‚f߼TcŇÝ©Yb­š:łHŮßţKýç;;őá–ŻŐ:ťŞżňJĄUź`ľB“h±Zu6.N3¦O˘HqV‹r&ĄëŔşýöŰ)€@HŘI}ŞłËŠĹ1–Xe_÷—š:ł K¬U…EßŐÄÜëT˛ë_ŤNľ‰ŽÖńqéjt$iLUµ¬--kk8?ął˙__€H4}j–ĘWŞ´´Tąąą@Ř#$<ŻÍbѱګt6..čů¬ěH@üó~ďo•ctŞń|sB‚Ož$ŻÍF‘†‘G’âm±„„ţjfŽ$éŔ@Ř3mHŘbµęÓ ă'±ŤLĐâk”}Ý ľaĆź¨Űî}P×N-4žű&:ZYTźę¤@ĂÄWv»®™8–B`Xp$Ú””hWyy9ĹöL9Üř‹ŃWŞÖé ęĚĘÎSÁś[d‰µň­c…EßŐUŻŐÎ-˙¤¶Öł’|+ źŠŹWƱĎÓÚJ‘"T‹ŐŞö¨(MČLĄ6˛2ť*=|„B{¦ë$¬LO× —Ëc,±ş~ÁÍ^¸”€0BŚIwë{ű¨śc3ŚçÎĆĹ铉Yj±r#UËů9Z\cÆkĚ(µ´´¨ˇˇbk¦ ŰŁ˘ôYfFĐđbÇčTÝ|çJMĚ™Ę7!ÂXb­şĺÎU*s‹ńÜ7ŃŃútÂx5:™"Ń™óŻ+•ű€á#-5I’TUUE1„5S„„íQQútÂx5'$Ďeeçé¶{Ô¨ä1| "Xöu3´xĹĹXb%ů‚ÂăăŇ #PKśUă3R(†•¬óÓéwĂ>$ô/Pr6`ąůĽs4{áRîţ01*yŚn»÷ˇ ŐŹŹŹK׉´4ŠAľ˛ŰŤż@ĂÉřŚ/ö†uH* Ľ~ÁMťYÄťfěń‰şůΕAAáÉŁU™žNq"€×f“䛯n\©UWWSam؆„Í útÂř Jć.ţ>óc–X«nľsĄ˛˛óŚçNŤrč“«'Ş=*ŠE«5†"`؉łZÔŇŇB!„µa6:ú,3#( ĽůΕş*k2w|łÄZ5{áŇ  đl\ś>ť0ž 0ڵŚ8ż˛1‹–`r$Ť”ÄĽ„Â۰ [¬VU§u,Fâ_ÁJĚeöÂĄA+űB„§?_î p㬊€aÇ‘ä›^çĚ™3@ŘV!ˇB!ˇąe_7C×/Xb<>Ç…!D—Ň9 ڱÄjŢíwÉkĺ.›Ę=Ű^“䛣P’Ň++)ş(?ZŁ÷#ťiiĄŔÉrŹŃüďü…Â̰ Ű,–.áÍw®”=>‘; MĚ™ŞšĘ •:(‰ 0yí6ŤĎHňëxűŹŞě0ß ` 9Z«üĽ r$Ú(Ŕ4˛2S}9˘¬¬,  ,Eüpăö¨(˸ŞK@ČcęĽÉ©Q5:X$Áč Ç—Ť§)f"ş“°=*JźNŻłqľ•Q ѓ٠—J’ŃQx|śo~¤†Š.n›ëRZ2ÓýeËÎ*U×·P LEtHXť–f„’T0çBôhöÂĄ:ÝÔ¨ÚăÇ$ů‚Bë™3˛¶đ®––lUÖ8;…ú‰ŐE€0±ĂŤżHmĚ-'I×/Xb,RôdŢmwÉ1:ŐxüYf¦ÚŁřŹW`^¶X­:‘–f<ÎĘÎ# DŻYb­šwű]бÄJ’Ú,1ĆĐc3Џ°=*JźefŹŁSŤąć€Ţ˛Ç'}ošTëtR`J—®6KŚ$ßB%ónż‹»‹rUÖdĺÍc<®OuĘkłQ`:Ö:ťjNH0Ď^¸TöřDî".ÚÔ™ErŽÍ0ËĚ`~B`:zm6Ő§v ˝vjˇ®ĘšĚÄ%›w[Çü„ßDGëłĚ ŠL%bBÂăăĆŰα*,ú.wýÂkŐÜŰľo<öÚíú"y4…¦!a­Ó<ámĚCţ5&Ý4?amJ ĂŽ€i„}HŘf± 3ž:s®,±VîúÝÔ™E˛ŤôÍyůMt´N¸Ň( 0…° ʧŹ5¶ťc3”}Ý îĚě›—ŰŤ«Së°Ńá×n7ÝÂĂ€“îVVvžń8p.L€á*lCÂö¨(U§Ť1_;µPŁ’ÇpÇ0ŕ ćÜb¬vÜf‰Q­ÓIQŔ°¶!a­Ó©o˘Ł%I¶‘ š:s.w Âk úľ}1úJµY, [a¶Y,ú2y´ń¸°č»,V‚A•}Ý 9F§Jň-b87&Ŕp–!ať3ĹŘ7a’®ĘšĚťÂ ›}ócŰk·ł ¶Â.$lłXÔčpʧÎ,â.aHŚJ´Iă(EĂRŘ…„]„α,V‚!87aŁĂÁÜ„`X «.B„{|bP7a` 0\„UHŘą‹pLş›;„!—•3Őئ› GŃár!t"\ŤIwË96CµÇŹIň…Ůc?ݤ0č“-;«TUw†BŔ´Şë[:~ţí€â¬1ýúţqV‹ćĎ™"W*óÇ#lBBşΦÎ,Ňď˙÷ ’|Ý„)µuŠimĄ0蕪ş3zű?NRŕĽęš†y_klŚŠ—̢ŔŔE‹áĆíQQt"¬ů» ý›đšăă~®ÍěĚŮvŠ ‚/OSŕ"…E'a㨎 Á1:•.B„ĄŔn¦řxŤĄ$ÝjNHĐg™ľPŐcł)˝’áŮ~iÉVÝ6×E!€~R]ߢ-;«(p‰Â"$lHJ2¶łżý—Ü„Ą1énŮF&Č{şIßDG«ŃáPRC… áŚŐjlź:˙Ź…>VK”˛ĆŮ) ¬ ůpă«UgăâŚÇWeMć® lMĚąÎŘnJ§ ˝tj”C•éé€05ä!aCŔśeYŮy˛ÄZą+[s;BÂÓ jŹŠ˘(˝DP@řňđtB‚±}Uֵ܄5{|bĐ&ói"4«Ą#H%( < iHŘś 6KŚ$É62ˇĆCŽçÓDh7L­üśŽ0• €đ3Ä!aÇśn„ßŐłqqjłX(Ęß2Ž €06d!a{T”ć#Ě™6‹»`‰µ*+;Ďx|ňĘ+)J/ľ†,$ôÚíƶctŞěń‰Ü DŚŔů3çŐDĎ OCzl6c›ˇĆ4WeMVŚ%V’Ôf‰aČq„ ĎR?†Ô…„ÝW©ă2ą8©éß[o@čŤ ë¶ÇĨîd…` IHŘf±«KŇt7wg̸ńƶ×NHŘWťBŹ÷¬6ľ¶—Â0†$$ ěşrŽÍŕ. "Ą¦g|§íä"t 8î‰řÉćHN5¶Ű®¸˘WÇüůň(óÔ'ˇď]… =`p z'á×Ă-±Ö(Ҭ97iďî7µů×Ďu;ě¸;ďżW"ĎéfŮGĆkÂŐ“eŹ^ZSU©ÚUš2­P凩öD•&\=Y©®t•>$IĘš”­šŞJůřŁ.ďÓÝóťůßŰsşI©iéš2­đ‚×îż¶Îď뾦şň‚ďĺż>IĆçęŽ˙í#ă{u}ábdB’jŹ“$µŚ“ÍëíÓńů9ĺç:†ĺ/¸Ř(ąRâ.ęX: €Kó /¨´´”BhţüůZ°`Á°ř,ú-‘"g¸ńĎľ¨ &ëĄ_<­Ľi3”5)ű‚Çěٵ]?âaŐTUĎŮăôĐOžŇÂĹß3žűýÖÍziĂSZ˙ĘoµfĹRI’3m¬ţ°˙°~öřZIŇÄI9Úüëç‚Ţç•×ŢÔ¶­Ż=źęJ×+Ż˝Ä•>¤5÷Ţtţ÷X˙ň«=v úŻ-pNŔź?ţpĐ9ýç]˙ň«Auń47iÍ˝wčŕwöÍËźˇő/ż:†Ú7kR¶¦|;2‚B{|’±8śľ· 1Ęgç7k…ŔĹ# ŢŰożMHx1vpŚNŤ"ŮăôÄł/jÍŠĄZ÷ĐýzmÇľ÷ßłk»Ö¬XŞ ×\«őŻüV×Íמ]Űőâ†Ôşď—}d‚®/štĚş‡î×w­”=>^©icŤç߯DĺúPë_ů­˛®ąV{vo×ĎXkî˝CçÎťëňüĎ_« żzM’/|[±ä&ť;wÎú<ÍMúýÖÍúůăkÝC÷ëÍ}őşĽ«Íż~N -ÓŹÖ=-{|‚xW«W,ŐŠ%7éŹVűúCż¬ţ{-\´LRGč¸îˇűµţ•ßvŮ÷ˇuOká˘eŞ©>®ź=ľVŻţćůř~¤ŽË”Îç›ĂéŃ? {gËÎ*UŐťˇ0ť3gŰŤíęšF=ű¶9OśŐ˘ůs¦Č•ę č0´´´ ›Ď2¨!a{tÇébbc#ŞP×Í7†ż¸áußęGşÝ÷Ą_<%IÚđĘoŤ®ľë‹ć+ëškuűĽýü‰‡»„„łľs“~ôĎ„|ż'ž}ŃŘŮÝ«´ů×Ď«¦Ş2¨ĂĎ˙|퉎 ®üOĘ9ĆĄ…‹—űŮă´ěîUÚłk»ŢݤO58řž/ [¸x™Ń —?C÷­ţ{yN7«¦ŞR©®tíٵ]Ľ«‹–ŐéľŐʍ¦ę¸¶mݬňÇ”5)[ĺ‡űú‡rgĹ'hĂ+żŐŤ“ĺ=ÝöߍŔaó__䪾čAaĎŞęÎčí˙8I!Ŕ_ĐζéČŃÚ{kl żwký˝÷R ­yůĺa÷™5$Ţe¨pjÚŘ ŕ//†^ßą?hOs“Ž|üQPŘ[Y×ř®wÝC÷kŮÝ+5ĺŰ…Ęš”ÝežF˙°á…‹—uyŹ…‹—iŰÖÍÚł{»˛&ekĎîí!÷µÇ'há˘eŃM8l>p8=úAaż[:© ś/OS„UUÓ ˛Ă• ŽD›r&Ą+ÎĘ?@$Ôđë€ĹžqĹęͰc@–šzެIŮÚ»űMy:uČŮGĆČ5×TUęŐ߼ ň?Růź>”§ąé˘ßëú˘ůúÁęż×KžŇĎţa­Q“ëçĚ×xŘEË˙ä[peĹí7^đ==ÍÍ=Ô;>bľ¶‘ ňžöŐ¶Ĺj•uµ‡‚ KK¶ę¶ą. ô“ęúmŮYE!"Ř™–Výtý`ł 'éö[ (D°A [cCÂÄ,XçaÇťůĂ>Ďé¦!żÖšŞJ-ą±Pžć&M™V¨;îş_Y×d+ëškő“ďëópcÉ7dxŮ]+µg÷vô—†¦6ăďc€Č7¨!áWöŽî–Hś“Đ/pŘńŢÝovyÝßixđŔ»A!YůáCz˙˝M¸ćÚ.ó7cçn>˙uôŐ‹žŇ¶­›őÚŽ}Aó)úç*ô»ľhľ^ýÍóző7χ\ĹyĎ®íZ~Q—ëçĚ×ϸ˾žć&m{ăU~BŃ-‚B@_9âc´`V*…úIůçBBF.§Ç?ě8”űV?"ŰČx­ą÷˝´á)˝˙^‰^ýÍóZ±Ä·˙Ďľ8h×(I?âa˝˙^IĐuŘÎwđ•>Ôë÷[v÷JIŇš{ďж­›őţ{%Ú¶uł~ţÄòŤŚ×ÂEľĹGňňghÁ˘e:xŕ]­¸ýFíÝý¦ŢŻDkV,Őž]Ű5áškŤkKuĄë«˙>hßm[7kĹ’›.iţDCń-㔟ÓŃrŕŕm|m/… Źn| üĂŽ˝ť†ĘfMĘÖŻ^˙~öřZ߼…|ĎO™V¨­{¦ŰU‘űŰÂĹßSMőq˝´á)ÝsŰWçsKęqµßňÇ”š6Ö¸ÎýĂ3|qA…\BÂAĐS6XbÄŢ~®ľś{°ş,űۨ”1ĆvKś•/ý (ŕâ Úś„^›ÍŘvŽÍ ňVbbcŤíö¨( 2DŁ€‹ĂÂ%†‚BúŽŔ°S|Ë8ĺdĹŹ<˘ňŁ5€nvŞęÎčČç^ăqR‚M®T… „„†•Şş3zvăµ´¶K’¬±1ZyW‘⬊@7-$ŚůúkcŰŰ|ŠĘčwˇÂW. ‹€ Ľ°µŐŘöbi¨Ż5¶­gZ(Č ŕâ1Üč­-gŚí¨öv 2ȸ4„„ćĹ ˙¨oŤµëŕw/ú=ĘRMU%Ĺİ@@ŔĄ#$4™ňÇ´d^jŞŹS D<BúÇ †„W0/áóśn¦č?Ńy˛¶6}m±H’N75ȟȸ€ňÇtä㕚–®)Ó {Ü·¦ŞRµ'ŞTS]©Ô´t9Ǹ”ęJzýČÇJ’Ž|üˇ.»ě˛.ďY~řĽžÓĆ{L¸z˛ěń Ü ř˛ţ„±×ÂÂ%Ť€zżŮ˝[}ţ9…€éýŕ?°÷ž?ľ,X0(ź#š[ž<ÍMZ÷ĐýÚłk»ń\ŞËÚ…ÚwÍ˝w„ś§pŮÝ«ôĐş§%IżßşY/mxJ’ôłX+Iúŕ¸G’/\sď]ć*´Ç'hýËŻ*/7Ą­g;‚Á¨?˙™‚ B <ďí·ß´pP‡Ű<^c»ćóŁÜéüěń‡µg×vÝq×J˝ąď#˝ąď#Můvˇöî~3侼«‡Ö=­?~XĄŽ{ôĘëĐ„k®Őć_?§ňÇ$I -3ÇÖ=­W^˙$_ȸbÉM:ÝܤW^˙>8îŃ?¬ŇăĎľ`„•čYC}Ť±óő×d€ ś”­Ńł/lÓö·ţ‹b-8ZqP; Łżi7¶=ÍŤÜénÔTUjŰÖÍš2­P?ú‡gŚçźX˙˘ľ÷®j;-:RS]©)Ó µěîUĆsyů3´ěî•Z÷ŕý:ř޻ʚ”­TWş˛®É–$e]“mtÖT—sŚK /3žłÇ'háâďé÷żŰ¬÷ß+á¦ô ől‹ÚZĎŹcćŢD˙! @¸ŮňŻűU]ۨ#GkőeĂi/™eÚZ¬ż÷^ľ@?ZóňË~ÎA ­_ť1¶O7vgĎnßăë‹ćwymáâeĆaż_mŮŃeżňÇtäOIşđb%Y“˛őúÎýAĎyš›täăŹT{˘ŠróŽđx(Č „űźSeĺ,óihj3¶›ĽÖMçH´)gRşâ¬–°«AumÇ×8xD’Ll»pIŔ0ĚŔá™ćőü]RÓƆDa/´ťíX´$ęϦ Şë[´żôK-•Ú§ă |•~ôąZÎúć!Űôú^I"(<ĎÓç?ótŻüsOD…„Ĺ·Ś“$‚BëňÁ>aŕpĚ/몹!ŘăôŁuO«¦ŞR+–ܤ˝»ßÔŢÝojĹ’›tşÓ\©iceŻWó‚¶mő­DĽw÷›Z2Ż@'ÎĎMĘ«żyA/mxJ5U•ĘË/”$ýü‰‡µw÷›z˙˝˝ú›çµbÉMĆĽ†ĽËŤéFĂÉŽů5m^oŻŽIhjҬ‚!„·3gÎ=Ţôú^í˙Ď#ä ós:ţŢzŕŕm|m/…˝“0p8fMĺ1î@7.ţž$éĹ Oiő=K$IS¦ę‰g_ÔšKŤýR]ézâŮőłÇ×ę'kî3ž_°h™^yíMÝX0Y{wż©űV?"IĘËźˇYs|ÁcůáCš2­P O5ŐÇőRŔąśicőĐO|«(Ż{đţ ÎCtjÜ—ů­--šôŃáa[—Z§Ső©Î>G@‘‰ŽBčĐ]G!„»A ă›::áNÖ°xI.ţž/Ŕ«Ş”}dĽ1ěřăž ý®/šŻë‹ćËÓܤšęăACŹßý¨k·fçą %éľŐŹčľŐŹ+#§şŇ®ˇ}öɇĆöʇ‚\Bl…Đ!TPxîÜ9  ¬]>' Sj*Źr. Ő•n„=±Ç'„ś›°Żç ŃłŔnX{/‡Ł+BLłŻ»RiÉ˙ŘËĐcčĐyčńe—]¦ŇŇR  l IHhót„)ź•Č]@Dj=Ű<!ť„…€"Wś5ZO (€nt «««µqăF  , IHh§“Ă@ŕww„Ç´r7z‡€"_\,A!ţ?{÷ĺ}çű˙e´( ›V!M=1 Óͦ§Đ“eŚ{Ĺô|ˇâů®¦izq›h“ţ”ěŐ=9ÝËál›sş'ţČ›4¦©°ß¦¦ëý®ş_ĂPO Ťń8í†i¬C1ZFqq—Ă÷Źńľ˝ç'‚Éd= IDAT3ĎÇuĺŠŔĚýă}ßsĎđâýą?â‰Ě¤«‹ @Rş5ť„–a™ĂCäH`Ö9ëůÝőszˇĆ“ŢvŰBĄ  >‚BłÁm·jĹ‹,śő|Ě‘Ŕ¬r?B†OJx@8g|\ůą„0‹@|…’Ý- ­“<ô÷bv ˝!“–$,< ś;6¦ŰGF´`ţ§(Ěr…A!€dvËB¬K×;ŻÎôśäH`V±žłÖ®XÄ- Ľ§Ç­ŰţĎ˙ˇ8"  >‚BÉjŢ­ZqF  OŤŽę_,ĐŐŃ+ú¨ű¨ľPöGłÂG®÷Íçř‡(Hb„Ĺ€c…ŰÝę;ĽÎ7ýÂ)IŞx¨„4÷ésÚôü®”ŮźÚšbIRW·/ř˙®®ŕ÷kk9Řn™ŰnĺĘďüăÍźéůŁYaŕüYs¨ńܱ1eÓI8!BH?t@|tH6·4$̶t`yܧĺł‚µ‹0Űď×ܱ1ŠGßů!¤)‚BéÂöMA!€d2ďV®|ţč¨ůýş”“#)xź·ľü0GI­çÄćżłj‚BÉŕ–‡„ą>źÎhlî\  jŕüY-ţěRŽ ’ŇGÝÖˇĆt&Š€@:qťôčŐĆC" A!ÄGP駇é¤ÇĂÉ´·iÓ¦[vUU•Ş««%Ýâ{¬ÝX'~ó+Ž>’Ňč•€<îSć×Ö‰wjŢ˙ą~źFBéĆ{v mö53croć…÷( E@̼Ç_˙]>6č3ç/čb^đBx¦ç¤FŻ´ŕö Ž’Š5ŔÎ hţč(E‰!wŔ'v¶$©ŔŰG@ }݇Ůó•—=?%÷­đs*/Ëťôóč(€ř˘uöőőéűß˙>Ĺ0í–ßד"$Ěô©ŃQý낺:zE'~ó+=TąŠ#…¤1z% ď_ ?sáE‰cîŘîu˙ŽBH{¶˙Uóß/]U[ÇąYO^Î|••f+óöyIµ˙µ5Ĺf7ˇ$ýńŹLű ŔĚJš«`®Ď§sK>G7!’’µ‹pÉąO(…hDÝżí›ňółeȶĽ0mjŐ´·Kž>ź\§Ľ˛——Şň+÷Ę^Qʉ„´BP8±‘+˙¦ĽöĎiĎ翪Vg˙Ś-˙‘/FŹmYŇ× ŔLJŞ?•ĐMdôQ÷Qş¸Nyµbíö)?ż˛ĽDηëSľN´ę‡ňŤßsvşŐľw 'ŇAa|ŢO¸Ż/Ŕk-A!€™’T!!Ý„HFüę ůoşÜ(Ƕ63 ,*ČSqapâ.ă˙@:"(Lđłrö|UظVÓĹçż2ś7Ů•—ĺ™ŰKP`&ĚK¶ ˘›ÉäŁîŁ”D!0{E©ĆĎľőgŽm­jŘŢ&I1“.öę–$•ÝW ×?ý 'p AáÄň˛ç30Ťz<Ăł*$ źő Ŕt»-Ů6(×çÓ§FG%Éě&nşL7Ł‹°ć1ĹÂAaÁgŻ˙¸éNuuSP0(,/»ŢQl…0nKĆŤ˛†1'Ţ˙•FŻp/Ü|G;š]„sÇĆ”í÷SfA!ÄGP`¦ĚKĆŤ ż7á{‡ZôČןŕhᦠ™ŃřsçÎiîŘ…n’k·I’v8ÖI’Ö|űUyĽ>ć©v]…¶n©2ëńčĺ7Ţ•łÓ-×)ŻůýâÂ<ŮËK´uK•Š Ç]GqAžv´©±ąËěôł-/TÝÓ+µ~myÔmôŤčĺ7ŢUËWČzí%ŞYeÓúµĺĘÉΔśÔełŁ9äůM{»Ôńë`č±~mąj×UÄݧśěLŮËKôÜ·‰: ˛uí{ëŐŘܩ͎˝ňŤČ¶ĽP[·T©ć1[Ä~żüĆ»jlî”Çë3÷Űx¬±ź›Íj9ŘŇąuKUÜ٨›öv©±ąSÎNwHmj×UĬédŽ;RCŹ >† ó’uĂŠ<˝ú¸4ř!°çÄ*-{HK‹îáá¦x÷ť=ćŚĆ#}ćüŠÜDF° ľöšá”ÇëS{gŹ˝ü“ĂŞŰÚuŻOŤŢ.56wißĎD ď5Öáět«aG[ČlĂĆşkëµóőĂ:~čűaËĐŠµŰÍ`-|ąÎN·›»Ôľw‹r˛3ĺ ËŚí3ž_ů•ëˇGË—6liŠŘ˙ĐZ¸ÔrŔĄÚuĺÚ˝Ł6âçĆ:Z¸´asSČľ8»zTóÍ|ŚÇëÓ†ÍM!§ńŘ5ß~U»w¬—myˇV¬Ý±-Ćv?ôý Đ?4˘k·G,7´6ťÚ÷“gĚu˘câńútüäxa¤‚B/ZPř·ű·ú›żážÇ¦ć¶dݰ¬Ë—µČ2Ľłýť·8Z¸)ÎôśTďióëeEn# ¬,/ŃsO=˘˛ű T».Ř…ÖrŔe„e÷hßĎčĚŻ¨ÁßîPűŢ-!Ýj¶4ĹYGłüC#Zż¶\í{·¨}ďíޱ^EÁa<®S^56w†e/ĘĐîëuć×?ÔřŮ×tüĐ÷ÍÓuĘknźmyˇąlu}Ć>ťhÄ\öŕowhüěkÚ÷Ć3Ş,#ŤÍ]Ú°ą1ć>ű»zU™ÖŻ-WQAžęžZń×)oČvěp¬Sö˘ łökľýŞĆÇǵuK•ůçžzÄ\†c[kČ2ĂÂçžzDí{·hüěk:óëšµqvşă# \˝ŞLĎ=őŠ ň¸‡cšbč1Ä>ô¸ŻŻOŤŤŤŔ”ĚKćŤ+ňôę·_\¨±ąs5<4¨ŁőPĺ*ŽfĚč•€Ţ;Ôb~˝řüe¸'&p«ř‡F´Ă±NuOŻŚř™1[rQAžśoׇtĄŮ+JÍ!ąM{C]§Ľ1‡Ç¶ďÝ1„×^^˘»ľň_%C9ëp`ŁămgĂşďŰ–ʶĽP®S^í?Řr»ł68T8lůĹ…yß[óíW%IŮ‹2ä|»>d{kł˙űËW´˙`·›»´~myÔˇÇţˇ‘¨]~áŹŮşĄJŽúęşů/Ť¨a{0¤‹¶{E©<^źöěÖţÝ!ËÜů“Ăf@~ÜŠ ËQ_­śE™ÁáË\jlî ©_¬ăľłáq^ iŚŽB/ĽŁ°««+řýÚZŠ`RnK捛;6¦Ďť;g~ýÁ‘CćDŔL8Úq d˛’%–óŔÍWT5 ”¤Ęň{UY^˘ş§VF [5?4Ż»ŢM>döúrJ˘mĹ…‹ÍνˇK¨ËńE˙#‚ŁľZűŢx&¤sp"ÎÎsřqÝS+c|Ť–aĆ;r8ęcVŻ*‹JÁ Ňěĺ×C—őkËŁ.Çú=k=^~ă]ł¦±Ž[ÝÓ+Í.Í–®Iw¤':  ľđŽÂ®®.: LÚmÉľź9A#× {÷ť=5ĚógőáŃ˙e~˝ĚÓËd%Ŕ-f[^óg;—óíú¸a’§Ď7á:¬ˇX¸śkCo­÷×ËÉÎ4C®ÍŽfmŘܨŽ.wŘvŞć1[Ôđ1§eÖp3b›˛3µzU™$©ă׿‹±OĄ Ô6zX\őߡuąĘµqťňšaĽšJRÍcÁíďDLä }@|Ń‚Âńńq  aófĂF.óxÔsßż“$ő÷žÖGÝGő…˛‡8z6ŁWzw˙őű^~zxXŮ–{b¸5&ę†3¸NyŐŰç“ëTźü—‚C‹Ă' ™ęúŁYŽú*sbĆćŕä(ĆěĂ5ŹŮ´úѲݍńöÁm6ćhŰ«;2^¸:‘‰ÖkůÖmٰۜą9ë„/ď@Ä:c…“@´ˇÇÍű;e»żH™ (€´>ôxÎś9ל9s(€ ÍŠ0#Đâó4đŮ;%Ií­oińçňµřłK9‚ďj‘ďBż¤ŕ0ă˘^fŃf–.mv4GťeX NhŇýŰľé˙ľ®BĹyŞŰÚl.ß:ű°Ľ‡ŕÇÚ„B7Iň_Ň\v_ÁM©ÝtwëąNőYţíMřyŻ/áŃ®\UŕĘż€ń9%FP™7[6tÉąsú—… Č 19ĐĽ[kź®×‚Ű38Џ!uUωĚŻ—zű4t”ÂI®në/ôňOŢ5ż.»Ż@¶ĺ…Á AĘKĚ DV¬Ý>#ë·W”ĘőO#Źw@-şĺěę é:l9ŕ’łË­ă˙ë¤B°™5o†śěëďÇ;ëîfL´[¤‘+˙ŇE(Ië·+ďŽ,Šŕ†¸{‡µ©áXĘěO´ pŹÓ©'ěv6€fMH8wlLwýţ÷ęůw_0g;no}KŹ­ÝŔQÄ” ś?2›q®Ď§\źŹÂIÎă0²ű ÔňÓgnY7ZqábŐ=˝ŇĽ7bË—ŰZŐýŰľkÝ…Ý MÂa[^qoĂŘűź|שâ‚Đ{Nć~Ś@"b„Ěp Ń……GÝÁĎ…bąm6měüŃQ-óôš_źé9©ďá(bJFŻt y·®Ž^‘$eŚ´ÔŰGa€Y ±ąËü·Łľ:f@h;]\§ĽjŘަ5ß~5ęĎkłÉův˝ůuËAWB˵vÔĹšő×`Üďďf Mžîíßůúa5loSĂö6Nf$„€@şłNH2á“™u»µÇ餠˘š7Ű68Űďą?á{‡Z”÷Ů%ZZtG“ŇŢú–†‡%;U—y<Ěf ĚBĆ ÄáüC#zůŤĂÓľ>×)ŻŰZ%ðšÇl‘Űd™´$Ńa·5«lŞ[Ô¬ˇK5lo‹ş\)°ť„µë*’ç8dgŞ˛ĽD]n5ííRÝÓ+Ł%öŤ¨aG›üC#Ş,/ŃÖ-UśÄ‹€ŔÍPR´PĎoHÍ.x: $ę¶Ů¸Ń}}ĘąţAń@ónŤ^ p4‘°ďŃ™ž“ć×Ë<˝Ępł…uŇŤ—ßx7âçď€V¬Ý2,×äFŐ¬˛)űZ0ąaK“śť=!?÷ŤhĂćĆÇ'"';SuO‡%»NyµćŰŻFĚ^üňOkłŁYR°‹°v]yR—ť ëĚŻX»=˘ŁĐ8.Ć~91BtHÄĽŮşá÷¸Ýúíď×ŘÜąş:zEű˙á­ţĎĎ2‘ &ôQ÷Ńű.>AŮ~?…f{E©ŮµÖrŔĄ»ľň‚ěĺ%ĘÉΔë”WÎÎŕ_Čź{ęóŢ…®SŢÝy“‘“ť©Ćµf·bívŮ–šw¸Nő™!Řúµĺ“ş7źŁľZŻOM{»Ě™’íÁ0Äăő™ˇgö˘ 5î¬ éXL¶ĺ…Ú˝c˝6ln’hDkľýŞŠ óT\üĄÄ8.’´{Çzî[¸`zŃQ`"·ÍÖ ź;6¦»>>m~í»ĐŻý˙đ Gq ś?«öÖ·ĚŻ3F*čă>„ŔlÔňĆ3Ş,ż 56wiçë‡ĺět«¨ OűŢxF;7Ó´·kÚÖ]óMí{·¨čÚdF0éětË?4˘ěEÚáX§Ćťµ“^văÎZíޱŢěV4–ëńú”˝(Cë×–Ëóë“vVŕÚujß»%äŘű É<6É4Tɇ€fč}šŽBq̛͟uů˛–yző‡â"IÁ đÝwö葯?Á‘E„ógC‚䌑€îq») ˘j>ŕUćís§é—]îu騯–Łľ:áÇŹź}mÂÇädgĘův˝<ŢyĽ>ąNőɶĽŕZçÚő‰L¬“Lvń¶Ű^Q*Ďo^”ë”Wţˇsý9Ů™qĽDÖ[»®Bµë*Ěe{ú|*.Ȼֱs{YötBYŃ: ű/^TýúOHsófűäú‚6#(ě9ń$"„3ĎÓ=n7• &ë/§HnĹ…‹U\¸ř– ]5BŻ™X˛v Ζcه€n#(ět hÎś9:;0 ľő–ţë7ľAq€4v[*ěD®Ďg†…R0(|÷ť=]H’†‡#ÂBD*˝;ź"Ŕ-B@7WmM±ćĚ™c~í»tI?|ë- ¤±y©˛#Ë<˝’¤‹yÁű+ôśř@‹?W ľü0G9ŤŤ^ čńÓ€™ŚMőŞ?UĹ—ż ‹—¦uąŢł>í}§‹@ „Ś ŽB =ÍKĄť  Ťl ÓÓč•€ö˙Ă+ň]č—D@ÄäÝ‘ĄĽ;˛(Ü$„\ ôu[ŞíĐ2OŻ>=ú÷ĘąţaÓ¸Gáč˘t`LR2<4h~o™§7äľ•ŕÖ" €äR[SLP¤ąy©¸SsÇĆTúĎ˙¬?…ÜŁpŕ“~­ţĎĎjÁíůőQ÷Q˝w¨%ä„w}|ZY—/SŔM×Őí“Ű3ś’űfűww¨Ľ,W™·Oţă$!$'cÖă®î`CŹô2/•w.ü…ľ ýÚ˙ŻčkŹ˙ĄfßÁŃO1uU{ëőżtqBŔ­ćó_•Ď5%÷ÍÝüÜĘ礞G@ÉŤ H_·Ąú.óôšaˇ ›w˝¤óg9ú)ä˝C-!aĆH@Ą˙ü!ŕ¦+\ş8möu$đo“{<!Ě =ŇÓĽtŘIă^t(.’$]˝˘ý˙đŠľúhŤľPögÁ,6z% öÖ·t¦ç¤ů˝Ś‘€îq»5wlŚn:ŰýĹúţ–˙[#єܿ®Łnu}ŕžôó`v‰ÖQřŇŰoëůżř Ф¨y鲣ą>ź2FFôqi‰ĆćÎŐŐŃ+jo}Ký˝뫏ÖpźÂYčlďÇjç­ Jr}>-őön©ÂüĽ”Ý7÷Çý“~!ĚNáAa˙Ĺ‹Úătę »ťâ)h^:ílF  {zÜúCq±™ÁPĐĐä‘ŐßĐâĎ.ĺŚ%>8rHG;†|ońů *čëŁ8$BÝÂÂŁî`79A!zćĄŰg‚CQĎ„Lh˛÷őíúęŁ5zŕËsV$±áˇA˝űÎő÷ž6ż7wlLË<˝Ęöű)I„€RC¬ @j™—Ž;m„JYĂ—u¶°@csçJ N~Ńßű±VTáÇIčLĎI˝űÎ]˝b~ďÓĂĂúüéß3Ľ€$C@©%ZP8>>®9sćP EĚKçťĎőů”uů˛Î|ţóćđă3='uÖóý‡UkTúŔźr†$Ń+˝w¨E='>ůţgűĎiÉąs€$C@©)<(ś3gA!BćĄ{揎Şôź˙Y}řěť’‚łżűÎ}ÔýľľúčjîUx ťx˙ŽvéüÔč¨>ú÷Ę(I†€R[¬ Ŕě7Źôő)Çď×™{î6‡÷÷žÖŢ×·ë/?¬?ýłU Aľ‰Îö~¬÷î—ďBč Š‹ü~yz^ @" jZťý:üë Śňű‡U´ YŹŮŹĐ"ëňeÝ÷áI]¸óNťĎ_b~˙ÄűGôQ÷Q† ßĂCzďP‹Îôś ůţ§FGUäéUÖĺË €$D@ ĆƬÇ@ę!$ 3wlLKÎťSŢĹ‹ę-Z¦Y¸PRčä?ýłGµ´čŠ5ŤFŻôáŃ#ęţuGČĐâąccZ|ţ÷ ‰HUé–—ĺMéy…@j!$Śaţč¨îu˙NC99ę+XŞ]°@Rpň;˙Ď«Ę/ş[|ůĎtWéýëÄ ĄŕÄ2K˝} - ‰HŻm}"DAP¤B dűýĘÖ…;ďÔŔgď ą_aďi-ĚľCU>Ć0äIÔŃŽ3KRĆH@K˝^†äF„‚B U&Ŕ:ů“%źÓĹĽë­ŘĂCz÷ť=:Úq@U>¦â’ĺLpGĽpđSŁŁZrîĺú| €YŕÝ÷˙ň5!¤/‚B`ö#$ś„ůŁŁZćéŐRo_DgˇÎ_p»ľPöJËŇâĎ.Ąh )ö¸OéŁî÷Őß{:âçźÖťţ¨lżźb0K ŮŤp ŚÎÂ;/\ ŻŽ^щ÷ŹčÄűG”wgľľ`ű˛î*˝_ łďH»:ťé9©3=ęLĎÉű JÁppI˙9†#­ôťh[cĎ´.sä ÷ípk …ŔěEHxڰpÉąsş—§sK>gNp"Iľ ýzďP‹Ţ;Ô˘ü˘»ő…˛/§üpäógŐÓ}TuŤ JÁ I>sţ‚2N"¤ťŔčÜ˝ăR! AabŽ;¦#ÇŽ%üřJKőĹ{ďUQ~~JÖŕ…Ť9)n1BÂi’ëó)×çÓPNކr˛5”“cvJ×':‘¤»JďW~Ń=Ę/úü¬’Ö™ž“Śú¸Ś‘€r/ú”íŇüŃQN¤•Âü_+W)€YŻâË_Đá#'¸r•€@Ęý7ą{‡)Ä4"(śŘ‘cÇôâ®]“~ŢwżůMýhË–”«!á­GH8Ͳý~eűý›Ű§ˇśůs˛u)''ä1Áa¸'%IóÜ®ĄĹ÷ĚšĐĐ žőś–ďBĚÇ~jtTŮţ!ĺů|t "­ef,ĐÎÔŞçt˙Ś,ß{Ö§˝ďtQh7EŢYú›úżĐH`T…ůy@Jčű$ mŤn 1Í ÷Ĺ’ĺ,\óç˝ýýúĂąs’¤ż˙ůĎ54<¬oÝJá0­ gČܱ1ł»plî\]\ś§‹ąy d†5ľ:z%jh¸řsK•żěnÍżýö[Ž^ Čwľ_ĂCř¤oÂPĐŘď`P:ÄD$@Ň»ó)€”wG–ňîȢ€ &ćďęëőđĆ}Ě‘cÇôŤúz ]ľ¬źµ¶ęż<ýô¬züdUŐ„űŤ›‡đ&;6¦Ďśż Ďśż « h('[ĂYYú—… C†$K‘ˇˇaaöZ“«ĹźÍׂŚLĺ/»;řýśÜšĹ=<4¨áˇ‹řä¬Ů-¨Ś‘€>=<¬…—/ u2nźŻÂĄ‘ÝŃżűý'gNʇ|P/lܨďmß.Izł­mÖŃ-ĘĎO©{,Îv„„7ŮüŃQ30”¤@F†./Z344CĽÁ„»ů n×âĎ…vŽv&üj ł†‡5wŚ™U¸9K7%ô8{E‰Š ň´z•M5ŹŮfl;¶n©’ŁľzRĎulkUĂö6IŇřŮ×Ěď;;{´bmđřö˝[dŻ(˝ĄŰ‰Ä.ÍÓóĎ~=âű›žßEq¦ Aáô¨˛ŰÍp˘IO¬C”%MşcĎşüeK–$ćťčéŃĐĺŕdŤŮYYz ´ô–ÖlŞű^Ătčx$$ĽĹ2efhxuÁ]ÎĘŇčüůşĽ0KWçĎ™19WGŻLŞ0žO_ 3FĘ pÓ8;!Bcs—ě%Ú÷“g”“ťIa€YŠ đĆeÇąoˇÁ $Zř­ęjýŹ-[âŢ˙đĹ]»ôf[›zűCŤŚNĆXaŮ‹»vé•={䝨(?_˙cËUG9Î/îÚeN\rů$IߨŻW[G‡r.T_{{Ěíěíď×ňŻĂým٢ď~ó›ćĎüĂĂúëíŰőłÖÖçĹŰŹŻmÚ¤#ÇŽé…ŤµlÉ}§ˇ!d?Ţ{ó͸µ›í “ĚüŃQĺł˙^üČČĐŘÜąľÖmhÜŰp*!˘Ő§Ż˝xŤ pÁŐ«š?:ŞŚ@€03ި OµëĘcţÜăő©ĺ KC—rvşU·µYŤ;k)0‹ŢWöě1˙-äúYkkH°etÎq?kmŐ ·[{ţç˙Śč¨óë?~ç;:ŃÓ#éz xXşÝ:rěľ¶i“öĽôRHŕţ<ë¶9vL˝ýýzâůçő­ęę„&[y˛şZmň«ŐéŚ.JR›Óiţ»Ęň==zâŻţ*$ä|řÁ#öăÇ[·ę[ŐŃ»ł{űűCj-I‹˛˛R: ” g cvŕ¬k-»ńŚÍť«@Fč)sÇĆaIĄ¸0oÂáł®S^Ů˙b›†.Ô´·KuOŻ”myaŇî“˝˘4dř1€H…SóĘž=fםśôĂęDOŹ.[˛DŻ9!Ab«Ó©ď8fÖůć›!Ď˙޶mfĐ÷ÂĆŤ!÷;ěíď×7ž^şÝz¦ˇA?ř ýőöíćóž}â ˝°qŁů3˙đ°ľ·m›ŢlkÓĎZ[őţäObs†j»]ŮYYş|YmqBÂ7ůKIRUeĄxú‡‡őť˙ößÔŰ߯ě¬,˝°qcH‡ˇu?ţzűv=PRu8ô›mmf~đAťčéI¨‹s¶#$LAsÇĆ €<žúş IDATdg[^¨ť ë´as“$©ĺ€+©CB‰‰Ţ‘•ĄÇţôOÓ˛o¶µEüˇŰmvŔY‡ńţxëÖN@ă^…ŮYYúÇ×^‹řyµÝ.9zâůçu˘§G?km5»==f0f}VEůůzmëVU<ů¤üĂĂzłµUßýć7ÍĺHÁĐňďęëCž—łpˇ^s8ô‡sçtäŘ1ý÷×_ź0$”‚Ý„ŻěŮŁ7ŰÚôŁúúľ==f0ií"ls:C‚Nk@hěÇ?ľöš–WWË?<¬żßłGŻ9Q·Á:„9]f`&$ÔŠ ®Ď:ëńú,˙PoßEIReyIÔçú‡FÔýŰľŕ/‡ą*.\s=ÎÎí?Ô-×)ŻlË e[^¨ŐŹ–Mę>Öő•ÝWőąűşäěrËuĘ«śE™×ÖU Ő«›śĹăPÓŢ_ËuĘ+)¤®_ű•¸űfl›±®S^ä÷qUYĚçvtąCöĺĺźVËA—ěĺĄZ˝ŞŚŔ7dĺWî4CBIú¸ż?mkíŢyŃ,[˛D?ŞŻŹč®ëíď7CĆ'««cNÎQm·ë‹%%úĐíÖ›mmf`×ÖŃa>&ÖŚÉ”–ęÉŞ*ĺçkٵĺ˙ěZ°(I? ­ž}â sčń‘cÇ& ÝľUUe÷ms:#‚Ec˝ŮYY!?{ĺ­·$I_,)‰ 9 NBJŠůüTFH`VjlîŠ:ð•ë”לm8ŢěŔ67ޱąËüÚ4Ą¸0OűŢx&á0ĚşľđŮŤ]§ĽZóíWC‚N)Ř™čş6;šµóőĂĎwlk•ŁľZ[·TŨU§6;öĘ?4ö“.ŐmmVÝÓ+µĂ±.âyöżŘfîKÓŢ.łFÎN·vĽţ˙Éóë™LSâýdDŰÝć×·ĎźŻ5_ýjÚÖă‹%%A•µłđÉŞ*}÷‰'bÎl}lőö‹–,чn·>t»#žm;¬Â»îŚeLôýÚ¦M‰]—Âf!–4é‰9ŚpńC·[YÓŁâÂĹňx891%„‰+ĘĎמ—^Ň×6m҇n·ţţç?×ĐđpB3OVďMęíŢ‹ŃTŮíҵ‰XÚśNsřŻ1«ń˛%KBBÓˇ)ÎÍđˇŰ=a÷eşH($ü—…Y:?g Ő0#®ÎźOHCC—ć=ďÂyú|rvö¨ĺ`·®=÷Ô#Ţwo*˛eD„gRp¦bÇ–jmv4ËuĘ+ggOČđáɰ1ŽÖ‘XóMë×–K’r˛3bngx@h,oőŞ2í?Ř­ý»C~ćŘÖvýą?}&jç_ăÎZ9»Üęíóéĺ7G %™ˇ¤9H}„“—łpˇŢzé%U|ó›ş|Y?kmŐ˙UY7ÔękoźtG`Q~~H7âdUUVę­mۦ}ßź¬Ş vţň—úî7ż)˙đ°9ÔŘÚE(ďOhźť‰I($ĽĽpˇ.§ÁTĎnŤńńqŠ@rťňš÷Ľ›ČęUerl©ž‘í7l¶v]ą6;š%IÎ.÷”CÂśE×—żasŁv8ÖE¬łqgmÜeŘËKbn§myaD@(Iűu›ĎŤěŐ®+WĂö69;ÝňŤD¬§¨ Ź`7„€pęŠňóőăkłKŇ3 z ¤$dr’/ZîÁwäر)wĆM¶:ťúĐíVQ~ľž¬ŞŇĂ>¨#ÇŽéź|2#ű^e·ëͶ6ťčé1'=1†?vźBkWaoO‚s#n‹őĚLî-ŕÖÉ^”ˇŐ«Ę´ďŤgÔňÓggěţwńî5“ť©˘kł+;ct=&˘v]ą˛;›»tÇ}›őĄG †ímć,Ĺ7˛ťŃ¸NyÍ.̡ံ·ĹüĎ‘Ůx^¸âB‚L፫¶ŰUUY))x˝żŢľ=äçÖ‰:Śá¸±,˙ú×Uńä“Úd™„ÄXvoÜ€íżżţş^ܵ˜ŤŮ'Ť/–#ÇŽ©`Ĺ }mÓ&µN°}áű˝lÉsżŚ}«Ş¬ŚÚ-iÜ×ń—qď5řâ®]ZţőŻëk›6(ZÄě$\ąrĄňňňäőz©€ŐŮŮIHC•ĺ%rľ]˷öĽ îĎ‹ óÔŰ绡uädgĘův˝jţňUsY®S^ąNyĺŘÖŞśěLŐ¬*ÓÖ-UÓÖ±gťÉŘŮé6glžŠâÂL áôůQ}˝Ž;¦ˇË—ŐętŞŐé4;‹ňóUUY©¶Ž˝ŮÖ¦oUWGťˇ÷ďţs33‚A)ر÷˝kÁă_oß®=/˝ńÜV§ÓśÄxîwźxÂś%ř‰żú+ýż?ţqDxçÖ÷¶o—xXGŽ‹şěxľU]­wíŇ›żüĄąţđ.BĂ“UUf·á_oßőţŤ'zzôĘž=ňkQVVHGfş‹;ÜŘfłÉfłQ%3ާ§GcF)ÚlË ĺůÍ‹j9ŕ ţwĐĄˇKŕ/Ń×&,ilîŇîëU»®bZ×]v_AÂťŃG'!¦‚€pzĺçë…ŤÍ0=üŕf(÷ÂĆŤfřµM›ôÂĆŤŞŞ¬ÔĄĄňë•={ôâ®]’‚“~Ř‘ý@i©ţńµ×ôµM›4tů˛^ÜµË ­˛ł˛ôÖK/E„u/lܨŢţ~˝ŮÖfv*&ňÜż«Ż×Đđ°ŢlkSożľQ˝CüÉŞ*s['Ł(?߼÷ˇ»‹Đ`ťúȱcúÚ¦MQ÷ă­[™Ő8 !!€”ĺżÖ©ʧo öó‡FĚ™‰'{OŔđĺt˙¶OEą!ÉmË e[^¨ş§WŞć/_Ńţkł9ßČLĘëó­ł+G­w@˝}#¶ ÂÄ,[˛ÄěpłÎĚ;‘mŮbv~čv«·żß2ű@i©Nµ¶ę•={ôłÖÖ‰H–-Y˘*»]/lÜł›ď5‡ĂŢk„rĆsżU]łĐxŢß˙üçf7ŁˇŞ˛ROVWG ä¬5ÇÚőř­ŞŞ¸ŹÍY¸P]?˙ą~ÖÚjv˛ł˛Te·ÇŽmÜcŃ:L:!$0ëE›‘W’Z¸&|îţÝÚŮđxÔźíüÉaóßF÷ßTŘýzű|qďĂX»®"ę Ĺ7bőŞ2í?Ř­¦˝]rÔÇľßá†-Mć= »cĆ&‰Aę# LÜ·Ş«ő­ęÉwř±ä,\¨6nśňPÚ‡|0îňă=/‘Ŕo*5¨¶Ű'Ýő7•úNĄÓ1•ÜĆËŔld˝OŢfGsÄĎ›;Ő´·kÂĺxĽ>mŘÜńý–.˝üĆ»’‚“¬ÜH'aÍce’¤Ž.wĚŕňĺ7‚dö˘ŚZ—UÝS+ÍŻůöŹĺńD­“®_[N@)# f7:  …ôťh[c…¦ÉČ•1Š$±šU6Ő-jÖĐĄ€›»äéóÉ^bëěꑳӭ˛ű‚3÷ű‹¦¨ Ď|~Í*[p&âÎ56ĆěEjܱţ†¶Ő±ĄZŤÍ]şĐšożŞÚuĺ*.\,ŰňąNő©±ąÓ\÷ÔĘi ęěĄzî©GôňOŢ•ë”W_ZőCŐ®+—myˇüC#j9č2²ű ´ła'¦„€ý  …FÇäî˝L!i!';Sηëe˙‹mşłÓm^R0ôjůé3ŞÝÜw9;ÖiçOG<_ -?}ć†ďÓglkÍ_ľŞŢ>ź@†ŰşĄjÚ'ŮŮ𸊠˱˝Uţˇí|ýpÄcĘî+óízş1%„@j $€Y®0?O·ĎWŕĘUŠĚ /}ńóa–»xéŞÚ:ÎMë2üŁv ¶n ŢtŢ:\xŞlË ĺůő‹j9čRË—ü—*.Č“˝˘Äś!¸v]ąěĺ%˛——DÝŰň`@ÖŘÜsáěĺ%Ň–Č›çćĹÜ?ŰňBy~ó˘›;ĺ:ĺ•ëT°»1gQ†ě奪y¬,ji,/|űŮCÝÓ+U»®\ŤÍ]rvőşŘ–\[·-·îtáîNŮ}+řl†2oźZ<@@¤Ž9ăăăă”Ŕ­ôŇK/IcĂzţŮŻSŚĐsşź"3dqî"ĺÝ‘E!féµqű«m7e]÷.ËŇóJ§wű=ĂÚŢäž±ĺi}}°ľľ>˙ą¨źE7=ż+-j‘—3_/>÷ĹI?o¦Â-»®×ű'á0ńëëµkŐĐI)˘ôî|Šaç.˘¦,7'Ký©+źň#Rč R!!RVŢYz¦öQyĎĚĚ/Ö‡ŐuěwHQŹ×T¨óhŹF7~k÷ésĘËËS^…hżűÝÔ®_áˇ$B  ĄŮî/–íţâYvĎé~BB€ëGB6=żKŞ®®NšýŰ´iÓ¤ź- ”D@¤€Ű(H¬€@j $q…„H1„„ ¦ha}}=…R !!*V@XXXHq€CH"é…„ Ň!!0é‰H" Ň!! Ň!!iŽ€!!iŽ€!!iŽ€!! Ň!!iŽ€!!i¨¬¬L!€ y”€ôóř㏫°°P6›í†Â-»vQP`–#$ ĺĺ婺şšBÄpc0UUUĘČČ @Š “LZuuőŚv"nÚ´‰"7!!0 .^şŞ¶ŽsÓşĚ˙(…7!!0 |ţ«juöS0+qOB`Šç.˘ %ĐILQŢYz¦öQyĎĚČň}‡ŐuěwĚ8BBŕŘî/–íţâYvĎé~BBpS0ÜHs„„@š#$Ň!!ć €4GH¤9BB ͸ĺ ő»ßB!’zN÷K’JJJ(€¤EHŕ–ËĚ̤ÜB„„@š#$pËť„ľÁË)ÇwńrČç^HF„„ną‚‚IŇŔĹK)Çwísnaa!Ĺ´ €4GHŕ–+--•$ą?î§H9=§űuď˝÷RIŤ@R((X*ożŹB ĺüî÷ź0Ô@Ň›G $ÂÂer?F!RŚ?„Łg05[ví˘Ŕ Ł“@R())QŕĘUş RܧĎIbŇÉ)77×ü7!!€¤`|hňž%$@ęđžPFF†ňňň(Ć$UUU)##B3$77WŐŐŐć× 7 •‘‘!÷é~UÝŻÎŁn €¤×ĽżK+ŁzüńÇ)€Y‡@ŇŞ¨¨Đ˝÷ŢŁćýť ŚR$­žÓýęúŔ­ŞŞ*ĺĺĺQł!!€¤V]ýu®\UëÁ(’VsK—222ŃŔ¬EH ©•––ę‘GŃ»˙ë”zN÷S$ťÖ¨ďśOµµµĘĚ̤ f%BBIŻşşZKŐôVĂŽTĽý>µýÓ˙Vyyąl60kHz™™™Ş­Ý ßŕ°š~ŃAAFŁzu÷!ĺććjÝşuŔ¬FH`V(,,TUU•\'=Ěv €¤Đô‹ů‡f %ĚŁf‹ęęjů|>5ýÂ)IŞx¨„˘ŕ–h|Ë)×IŹÖŻ_ŻŇŇR `ÖŁ“Ŕ¬˛nÝ:,UóţNyű}7]çQ·ş>pë‘GQEE Ě*™™™ŞŻ^y‹?ŁmŻ´ŕ¦ę<ęVÓ/ś*//×ăŹ?NA¤ BBłŽjÎmÚöJ«|—) fśŢ{ď=Ş­­Ą R !!€YÉţí¶·é(ŔŚ2‚‚ĄzöŮďR)‡Ŕ¬UXXŇQHP€™` ëëźg&c)iÎřřř8e0›ŤŚŚh۶—äřŁjża—íţbŠ€iŃĽżS‡ŹśTyy9Cڤ4BB)Á űúÎjÝę ­|ř~Š€©ľ ŚŞérťôH „„RçÜČŐÝÝ-ŰýĹZ˙xĄ23PLŠ·ß§Ww’opXëÖ­ÓĘ•+) €”GH ĺ>|XÍÍÍĘ»cˇžŮđ¨ óó( ŇyÔ­ćý]ŇśŰT__ŻÂÂBŠ -HI^ŻWŻĽňŠ.^Ľ¨•߯ŞG¤«1ů/«ń-§Ü§űuď˝÷ęŮgźe‚i…@ĘQkk«Ţ}÷]ĺĺ.ŇşŻ…IMˇíźţ·Z~ ŚŚ UWW3Ľ@Z"$ňzzzÔÜܬľľ>•ÜťŻŞG˙DĄwçS€4×ő[­ŹÉ78¬˛˛2ŐÖÖŇ= mH‡Vkk«a!@ł†ąąąŞ­­Uii)…Ö ¤•‘‘>|Xťťťşxń˘Jî^ŞŠ‡îUŮň"îYÂ|—ĺ:éŃá#'ĺ»xIZąrĄ***(@ëěěTkk«.^Ľ¨ŚŰČv‘l÷sßB€1U÷©^ąNöĘuňŚ$éŢ{ďUuu5ť†@ÚëééQWW—\.—€2n_ Ň{–¨äî|•Ü˝D…ůy `–pź>'÷ďĎ©çă~ąO÷K’rssełŮôçţçĘËăłDCH.—K.—K===şxń˘$)ăö*\š«Âü™™™Q;Ť .A„„@š»ŤéŤHs„„@š#$Ň!!ć €4GH¤9BB ÍiŽHs„„@š#$Ň!!ć €4GH¤9BB ÍiŽHs„„@š#$Ň!!ć €4GH¤9BB ÍiŽHs„„@š#$Ň!!ć €4GH¤9BB0żßŻŽŽy<Š1 ŹGňűýIi%ÉĺrióćÍ =6''Gv»]ëׯWNNNÄĎëęęÔÝÝ­˛˛2íÜą3)öݱ±QMMM’¤ööö¨ű˝cÇŮl6N†$çńxT\\|KΡ†††pđąçžKř÷űýQ_/©nóćÍjll ÷íۧššNfܒ׋q.†/ĹŠ’¤őë׫¶¶6ejëýo¦ë™Ş¨'¤6BB¸öˇÔét&üř––9íر#â—)—ËĄŽŽŤŹŹ'Íţy<ž¨űgÝo:ś’›ÓéÔćÍ›µzőj9Ž›ľî 6D|?Ѱňĺ—_–ĂáĐŕŕ`Z3‡Ă5D˝!/fĎ{QCCŽ?>©÷¤D555©®®Nűöí“ÝnŹxťKReeeJŐ4ÖűßL×3UQOHm„„¦˛˛2ć‡SăĂqooŻü~ż6lŘ ââb>ĚbĆ]>«WŻľéënll4˙˝cÇŮívůýţ„:OťN§ęęęŇňÝ6ŮŮŮjll”Íf“Çăˇc1ŐÔÔ¨ŁŁcF‚:§Ó™R‚·ő¤žŠ  ŚÝnź°SËáp¨ˇˇARp8áńăÇC>čΦ}M¦ŽG$'cqYYYÚ~7R·ÚÚZsx1]„HVĽ ‡ă¦włn&.€)~H6:=\.—\.EAĘăQÔ ©‹NB"»Ý®ŽŽIˇ÷ósą\RvvvȰÂđďűý~íßżßě6˛Ůl %őx<ÚżČ ľWŻ^=é.%żßŻîînIÁ.1kaě›ń}żßݦ¦&sťĹĹĹZ˝zuBHGGGH‡ĺDűjÝ®đ:NÖţýű#B\»Ý~ĂĂůŚ{ONĺLćřÇÁĐŰŰk~o*ű`̲kśw‰¬{hhČüżń˝˘˘˘¸űjCă8Z—g=¦áçYSS“99K´ó+|űŤs±˛˛2ęöXĎ%Ł^áőOäµţzŤuY×í…żÎb“ĘĘĘç}ř>Y_›Ö}™č5mż=Ź9LZ oOäőţ:›č5︯_ż~Âcžčv%˛îđşD»ĺD˘u‰uýźčşëńxÔŰŰőµ휉v]ťčś´ž—ÝÝÝš3gNÔ×břkŰŘ'ë÷Ťă5Ůkźő<ÉÉÉ1'3ö˙F®÷.—Kű÷ďźň{ât×3Ţű…ńú«¬¬śŇ¦óóDř¶Ĺ»žŻ…ŢŢŢ÷ X×Zëë&Övutt$\Ďhď˝ń®—€IŚ···ŹK—4ľuëÖ„žłuëVó9íííć÷+++Ç%ŤWVV†<Ţúý}űöŤçääĎ7ţËÉÉß˝{wĚuŽ×ÖÖF<Ďřݶ¶v|pp0î¶ĆÚoë>ŚÇť™ßw8Q×7ŃöîŰ·oĽ¸¸8ęs‹‹‹Ç÷íŰ7áńŻc˘GÔ[×üřńI/÷Ě™3ăv»=ćrív{Ôc0Őă뱓} źhÝѶ;Ţş'zťXŹařÖcj=Ďl6[ČăĘĘĘĚÇ?~bť˙Ń^ńö;üu688s{Ścm›¬ë8sćLÄyţÜsĎEÔvçÎťQ×ałŮĚšĹÚ–ÚÚÚÇy÷îÝ1_gńjj>2ŃŠŐ† ÔŘزFWڱëţŰeÜ´¨¨H555ćk ĄĄĹ| äää?‹wĚŚă~>X—~.µ··Çě’1®%áÝOVMMMfM¬>˝˝˝rą\Ş««Óś9sÔŘز­ÖşE܇¬®®N/żü˛¤`w¨Ýn7'g1^ă_úŇ—´oß>óx…ŰąsgÄq7®Ö×±őcĆ5äřńăSę"zůĺ—Íĺ]Ćő!Z]Śkµµ.ÓuźNŁ{+ÚkÍz}2ÎáÉ\ۢ˝ŤîÄÉÔ­»»Ű|-”••©¸¸8¤;wîTvvvÄyb}/0öËfł™aĆ{ČTçőÚ`L¬ÔŇŇ"‡Ă÷ú>“őliiŃš5kĚŻ­ď÷N§Ó|u8***šŇű`´Ďáצhź'\.WÔkŞQ·ŢŢŢú<±fÍ9ťNózb}mE{ďM¤žÖëeĽ÷ŢÉĽ§č$€ié$´>ľ¨¨(ˇNăűĆáá/++3;Îś9ňsë_ĐwěصŁ'Ľ“h:: Ť} ßëúÖŻ_ѡ`í ˙ëţŕŕ`H=Â~üřńńĘĘĘńĘĘĘ}™Čľ}űbn—aýúő1ŹC<;věąÍá?_x]>‰Ěşm6Ű”şŁy­Äë겞+ăűöí3Ď9ëńŠV÷3gÎŚMxŽK_˝zuDwŚőő^{ŁCgőęŐë=~üřxvvvĚşĹ;fÖ}ŠöóöövsŮĹĹĹ!ŰľOëׯ?sćĚř™3gBλđÚZ—a˝ŢX—c}LĽýłľÎ*++#jzćĚ™ëYĽ.Uëqß˝{·ą/ńÎYë¶E;6‰tĆŞ‹ő\ŠvNÄ«ËT; yľőy±®mĎ=÷\ĚkPĽő&ŇIhĽß…?×ZŹśśśk˝ń***ŠxýîŰ·Ď|îT®1Önµxď©]©gřy8Q=Ť¢˘˘¨y7Ň1o=&ŮŮŮűţú ˙6¶-;;;j7®ő˝¬¦¦fŇź'Ś×Mř~Ç{ďŤWOë{T´Z[ízÄç$JˇNkkkÇťNgĚ˙‡ż…ĐM$$ŚôX1¶ţbzüřń¸R´_h¬żÜHHťťsř¬ńKHqqqÔ_˛łł#~9±ţgürë´©°®;–ÁÁÁ)…nF}ŁBÖšCŔ&{ü¬Ç)ĽnS ­Ç8Ţşăś7#$ŚužYŹUĽó$Öđ˙xľ•q.Z÷q˘Ŕ¨[eeeÔ *Ö1łľÎă\Ö_|c i¶ÍŤ†E{Z—k9ÖkŠ•<Ç»>X÷3Ľ÷đ!Ő±¶­˛˛rŇ׏ÉÔ%Ö9o ;nVHh˝¶Ĺ»ĄA¬šOGHëu`=O¬A`¬ď‡…S Ëâť_ÖÇXCČXÇ0V=ăŐ%^=­űďQÖÚN5$ŚŠĹú<‘h f­Ďd?OÄ:Žń®çńę9ŃqŁ&•••„„pÝÂ466Ęn·ÇüoçÎťćcׯ_?Ą!B±†§“5H2oŔn §™čąáCŠ¬ĎąĆČhŚaŤÖ ŚáŤĆĎc KÍÉÉ1·×zcóé8~ÇŹʏ}řş§ÂxžËĺ2‡Y†są\r:ť!Ă+=~±žs#¦rîÄ«ÝL1†˛G«ůŕŕ ÚŰŰ#†1F;ă‰÷ZŤ6ÜŃú˝Í›7G ë5jęt:'uĽ¬ŹŤ·Oµµµ***2Ďë‰Î™XbMB`ý^¬ĺD;&ŹÇxžGŽăPÇEQeŐ‹†!˝ľľµÁG´ßĄĹ׏šÜ>==M°™=Ů–Őů|~ńĽYTN&PĹqĚWŔfYĆ'ŞľďÓrąüră%ŰNÜU0éă×,.ł»¸e2Ë2 ĂÂ0ĽJ„ô[¤( šĎçŇ^†Ă!Y–uÖŘv­çÖ-#F7ľ´=Ł(’SŔ"e;ŽCišžuÜĆWÄŘîŤ(Š(ŽăĘ föě ‚ 1<€v Ŕ NL¬d˲nzĺH¶%č˝ódˇmŰäyY–uŃIišäű>ůľOEQđmcL@Ę˛Śžžžčp8‰ďÝ~}&‘Ě÷ú?ďŃFŁ/Űl6#×uʶň]Ű®l[9›˛öf"q†4Ź{ýŔĐe…Î- -˘OApŐUŘŮĄLűłßt:m<˙ôW!.h]kl»$}üâśvâ±}đ}˙jöL’„¦Ó)ýü1#žv˝˙\ę}ˇŤz˝şŘüTŰ^s}KG 8ŽĂÄhŰ*9Ťh0ô^ĺçş. ŤFŤ×ÔűÚ­Đ$Ŕś2ţĎŚkŚmׂµQ=xF˝n§e]ŁÁËĆQXĽ†=ĹT[Qť~Ľąôű„¸"şILź=ţX–E÷÷÷Ť‘‹Ő.DBřdçŐ?_x=Ďă“ňů|.ťP±óŞná…™ \Y–)ëEź ¸®[YaÁ&OMQQU‰¶Izš¦;őI2™đI§ę>–Żxţŕ%ŰŻŻŇ5oqkÜ-‰„˘_¨l.úŇ5f;•Ŕ!óż&Xä^˘ź«ĂÚÚä'Ŕ¬oóľĺg¶/ŠB)¤ŠÎ{kĐÔ¶¬Ü2Q>Š˘FA›‰©őńKiTÂooÍOŘřóôôtäëěĽŃS°,‹Ż°UŤmĎĎĎŇĎĹľz {ÖÇe6n5­V=/ő>!ŽâŹőqMvoš¦ôüüLD?W!ľw˙Ş—‰µ×fłQŽőlĄ­ŘŹô"!|išŇĂĂ­V+Úív´Ůlh4ńIĺb±8ŘwEQĐăă#ÍçsÚl6´Űíčĺĺ…ůdh˝^čćžçń-MQŃh4âuÝív4ťNůĆ0Ś# MSr‡Çé%.čşÎ'ŤqÓóó3źîv;z~~ćç۱‰kźI+ ;'ŹĄ/¶!.ęĺfźźÚ~¬Ľ«ŐŞbż.y×Ë˝Z­čáá‹9Q]m‚5ťNéůůą×*Iqr:ťNyTW5ôé鉦Óie{ţĄV•Y–uäǬÍ?1CÓ´^Á3â8&MÓx›ĚçsžnÝ–ËĺÍMz}ßç}- CŤFôňňRéălŇ>{ń<Ź ©óůś¦Ó)O›ĄĎdâëÉĘČÚ”őۦ­ă¬ťEżĎX%úżóîęcŰăă#eYVé 2áęÔľx ,*-×GŁFŁ=>>VĆăľDQ$íGll‚@™¶hϧ§§Š=W«·gÓóFŰD{Šă7Řö]ÖżYŮĎ·Ň4ĄÇÇÇŁ÷ ć7ő÷ Ó4ů1 ő{ëĎIńůqmÄ1ŽőfO±źžžź˝_1â9\”@ąÝnK"*‰¨\,gĄeŰvIDĄmŰŇω¨śL&ü˙ëMůżľľ–š¦)ď%˘rą\Ý·X,ř÷Şzo·ŰĘw]ĘŁJ·,Ë2ĎóĆzQiFąßďŰŁnÇ6‡CŁŤ4M+___yŮt]ď•ţz˝nmŮlvŃö«ŰšýÉlwŞď0»ôńé® ‡ĂJ^–eőňłőzÝh3۶+í.ÚżÉÇ»Ô1ĎóŁňwőă¶şí÷űŇ0ŚĆ6YŻ×ŤýŁ©NmůwI§­Ź‹ăšěo8–yž÷.[ű°ö’ĄßĵíŇäŻĂá°<Ęôeă+g×±m6›)Ƕş/›¦Ůj—.ýżÍfŞń‡ůř9cĚ~żWÚĹ0ŚJ{Ôý·‹=›Úşî˙˘ÍŰžłŮ¬b7ŐřŰ6^ťú>Ńö,Ó4M:®ťň>ŃŐźšěąÝn[źź“É/´p&n ˙‚˝X,čüłw<ĎkŤ´EQĺŔmV† ďs]—˛,ăŰ+‹˘ ˘(H×u˛,‹|ß—ŢŻŞ“Xďú}]ěŃôť®ëEyžGqó3 t]çŽ×·w)W—¶Ě˛ŚÂ0ä6by:ŽCžç‘®ë•č¸Y–u·µ/‹¨X_yÖtřý©íÇVR¦Éy°:uEĚ»^n×uą]Nőé&’$áíQoÓ.~ćy™¦IQńí{¬ YŮŘŠ¶,Ë*őčęKŞ:ęşNišň6óg~¬Z%×V7˲*i‹ţŔú‡¬Ě]ëÔ–—tÚúx’$”$ÉQŰśkŃ>¬Ż‰ö1M“÷çľ\Ű.l `ţ(ëgŞôMÓäýĄř¨ëŘ&FĐ­Źm¬­ŘĐĄ/vé˙m6căŹxţ išüĐt^j–eIíÂV˘EÁËV÷_qLl˛'ŁnĎ8Ž•öŚ˘?+šĆzYŮúPϧëűkWqüéň|>§ß´ůS“=Ç9z†‰6u]÷¦În€ĎĘŕď_\Çqřv& ˝~uŇ4í´śmŻŹÇź*RőGżO°ČĚŔĄŔ™„€‹†! z||T^“eŮMFńľ" ‡­"dŰęëdYV ţs+QŞ€Ż Î$\Ďó( CúńăŹxí8Nĺ.€w‚E÷%ÂąKľ,şq=âs—Ŕ&@ţ>QŹľ ś DBľ8\Ŕ"!_„|q đĹHŔ"!QE4Ťh4]%ý˘(¨( úFȲěâ>ŕű>ŤF#ň}˙ÓŮCĺźĚQ}hŮdíu«¤iĘí–¦éÉ~T÷ĂsÓ˝t9?3·ŕ×·ÎgĎđśţ5úŮg÷ŕW"!üý2š$ %Irń´ă8¦‡‡‡/7żŐIŕt:%Ďó.îišR’$ź®ť›ü“ŮăŁ&k«ŐŠ>Őd±( n·SćGu?<7ÝK—ó3óŃ~ýř¬ăžÓźżź5=§\źŔpÝ—ä§§'âFp]—v»ٶ cܸ&IňĄV10<§ŕŁHDAC€ł&š\ Çq¨,K\ l7ŕ‹•„@?ĎŁűńăQe‹KQôýű÷ĘçY–ŃËË żĆ˛,ŹÇGiîv;~/Ń÷ďßi0¦idY–´ »ÝŽźßŁë:ŤÇc2Mó¬şEA›Í¦r.ă8ť¶ňEA»Ý®rNSŰ˝»ÝކĂ!éşNY–ŃfłáçžŮ¶MŽăŐ]´éx<–Ú¨­lŞűXűľ˝˝ŃŰŰŰQ9e¤iJ›Í¦SąŇ4Ą···Łöeź†ÁŰrłŮđr÷ig±<â}2?mŁŻĘ|É4MŹÇJű‰unň%V—ľe“Ą!Ú#IŢćm6WµĄ*Ź&jj·ľý¸-żşżš¦I¶mwΫŢWUc[—öí;NĽĽĽP–eRźŞŹŤ}ưsź˘ÔË|NŘív•ŐÇ–e‘mŰG÷ŞžM2írM×>Ô6ŢęşN“ɤS]e~)łS—şö±G—şţ ĎéKôż˘(čĺĺ…÷ý®ľ\/wź1ăĎé®ý@Jĺb±(‰¨¬‹Űí–žçyéş.˙·řgšfąßď+÷Ę®#˘Ň¶íĘuyž—žç)Żw]·Ěóü¤zA LW×őňőőµń^]וőUÝË®Ůn·ĘüÇ)óÉ^˛ľ´^Żů÷őqŤ!¦§CŘ÷Ë岓ŤÄ>đúúŞěĂA4ö‘¦şÖď}}}­ôť¦ru±Çl6»Č8xËĎ馶1MSŮw»<§Ű|Ů÷}iŢ–e•űýľ±źµ˝‡©žÓ§ö#=Ţ“`č&˛Éľ¦iĄmŰĺx<. èĽŠ“۶ËápX™řÚ¶]™¸äy^yчĺb±(‹Eĺ^]ץÂT×:†QÎfłr±X”“ɤňB-›€“!MÓĘÉdR.‹r<Wî•M D±®n/MÓ*fSĂ0Ęńx\™ŞŇŻ—m<•M×őĘr˝^—¶móüY™lŰć׉öb˘+W˝­-Ëę-ş®[Xţb}'“IŁhÁŇ_,•ú"KWşřgÝ,˙ÉdrTv™‰™a?Rőťý~ß©l]EB±-YÓîÓ–}EÂşÍfłŁĽëÂB_‘0ĎóŠ0ĎfłŠŤÇiµë“ɤÓř# Xb_śL&•ľŢ4NÔýHwfł™tl¬ŹCM"f‘°.ÖŰF]Ĺň×ýUŐD{‘Ň^â$yž7 |‡ĂˇuĚĹ·®ĎÖ.˘o ‡Ciß—•KU׺_Ö bvőuŃŞ|EqJ%"öoő9-ŢŰÔ˙d÷6=§ëľ,ë_bÇvvŻřĚč*vyNźÚŹ@$€‹‹„lâRź8Š/óőIKÓꟲ,++śÚ&Z21Ł 6ąS O˘Ŕ¤Ęs2™Ő÷p8đ‰€lRTyďĎóĽ2ATMÔU«9IJŐÓ®—Í4MĄ#ę«ęĺĘóĽ"NÔ'^m"!›ôÔď«Ű¤nO69Ő4Mş‚¬nĎ>´ůg}uW˝l˘řPŻ·řťĚë}§.n´•­k˝T~ÜÔo/%ŞÄ&Ń×ębH_‘ iš¦IE±žMĺ”ő§¦rŠ÷Ë|#ĎóÎăÄp8,÷ű}™çyůúúZ‡ŠřĄZńÇưńx|¶HŘ&öíőqµ‹˝Tă łŁ¬žËĺ˛ő‡ć#â Í®"ˇĘ·Äú¦y43[ĘîÍóĽâ—b;°ĎeĎ;ŃľmöĐ4íbăŕ->§E[śŰ˙lŰ>úľţ ˛—lܨűdW‘°Ëř+ľôíG ŔEEB•` ®n¨OZš&âwM/ŃbŮşľđvX–Ë%_9 ›´†ˇL_|QŻO ĉĄl˘-ÖGeSń%_&©&ńőş×…§®"ˇJth.ş„Ş­[â„Ll/ńsUŰ‹mqM‘PµBFśěË>oš¬‹}§nĎK‰„M~,®–ş†H¨ę˘ŹËV ÷ ŰĘĘDh¶mTe#U9™]O_\¤şWě/u‘Qô-Ůý]Úź­¨=W$ě"víޱKü\uŻř(Ľ‰˘‹*?Ů}őqłŹX#úˇĘţâJOŐĐ´šO±ÍĹ1Żn'Qě”ő۲,ů 6Ő}ÇÁ[}N÷íő˛wŁÄÜdľŃ4nţ~I‘đś~ n q]Wúą®ë4ů!Ţ]‰ă˙żďűĘëÄďÄ{š\źĎçŇrůľOI’TŇL’„_Acú“É„.ĂqéáábĐĎóZË/–ŤŽîyžň`rÇqx{DQtR[«Ęuîáô*ÓŰŠČn†ňŢsŠďJSđ ŐçQŃ~żŻ*/ë;צɏY;EqTĺ4ů©w“ŤÚ`öŻŕ/Ú8Ë2J’DéC®ë*Ë)k_±ĚMudÁšĆ Uń31 B˝mëcX_Ň4ĄŃhDEQĐp8¤$IZý˛©?¨e°úO&彺®óń>Ë2î“b»ŐŰv`~žeY%řEš¦üߪöoÂ0ŚŁ@S˛şŠĺM4auý©©®Ě†˛űęýř”ş~–çtźţÇ‚—ô}N‹ĎjŃźX›¶qCő =—sú€n@$€Ž4MvN;Ř‹k[dT]×ů‹ľ…± ńž4MéţţžFŁ­V«Ććz”Ŕ.öP ,]"höÝÄ<ív;ĺß`0čeŻ>íÉČS„UşőhČőɶj’ŢÇÖçpŞ8jYÖQŮŘ$sµZŃt:ýĐ~+ÚőˇIE“P!~wÎ$VśŚŹF#zxx étZ‰R|Žż×#űöí‹÷÷÷GBŞ ęůŠb˙ýý====ŃjµR¦Ő—ďßżsč§ĐÖe,ďŰßı˘O_f÷™¦I†aů)űQÇ0 ň<Ź4M;ĘOüˇá”q˘©ď«ľë*JĘú€řěë!Šźžçq{×0Űhšv1‘đźÓâw÷÷÷ŤýďááˇqŚéű^#¦Óu|»}ú‘řý9?ÂŔWä0|,}Ć>bBÇä8ź°$IR™tş®KłŮ¬÷ŠŮ÷}VPžŠGÓŠŐ=}hš€\cő›*M6)néÎ]áxŽPĐeb÷ňňÂWł˝7M~|m»ťâۧřęz˝ć‚k–eE_Eëş.ą®ËWţ^ÂßĹň†aHavö…¶É˝HEäyĂâ8¦8ŽÉ÷ýNcX—1Rd>ź7®¬şÄĐVVŃ>˘ť]ץŐjUkYb÷8ŽC›Í†’$áâńËË ż˙=čÓÇE;ÖëşŰí*«ßXşLTs‡^^^¤‚hűŚĎi1ý¦•ҲçÉ­Śy×~&˝÷ű üJ@$€˘ĎjS&Ąş®Sš¦|bÇ1˝˝˝ńĽŮä~±XôšhÜl˘n—˘(h:ťJWč±íš®ëŇÓÓŚu&žç‘ă8†!%IRYÉÄú~´ßď/"r‹"EŰ «sĆ1˲(MSŠ˘â8¦$I¤cŘz˝>y{ăp8$ß÷i:ťR–eAgŃó=a"!Ű>lš&Ńę"!ݱý¶})Ń7\×ĺŰoÓ4%˲ř8Âęęş.˝ĽĽTDSfŹKÖőźÓ˘čŐ§˙]B,듆®ëčŕ‘>Ó4éÇŹ˝^ĚOÇŘJ"6ąg&$Sď/LD·¤†ÁýDz¬ĘY}˛` ďń\cvl—D[Ôźm¬?&IBišRQ±S &öŹÇ…nń9­Úzüφ®y_clżĺ÷ř•€H„(J4Mpă8î˝B" ä8Žňe]ÜžÇ&b™ŘYVŞ űţÚ‘ue/úmeŤF4 >ý䀵QÓy~Y–5Úă#č*ľ]#XHź<Ä@˛r6­ ę"4MdĹ>ÎJ Á`@Ę#t]ż¸HdY‚¬b4Ńýý}oáÄ÷} 4ŤŻą¤ř!Fľä ˘ ĄŠ2Ë`ö”ů$kGvî ě™Ŕîa+#ű<7.ýlk ž#özŮŘ·Ű펶×óI’ähëőŻüśű_ŰŘĎž…—:‹­ňlËűŃľo?jň­¦~ „đNÔ_hĹh”óů\ú«|Q4źĎůËn×ɇřR¬šěĘ"ëşÎÄq¬|ŃgçEŐ'1×Ä4ÍJŮTâ„X6•ŔÎ|şuÄ(žOOOG˘Sš¦;ÓďZ+§T« Ó4ĺľÍţ} V«•T¬‹˘Orë~ÂĘźe™TdŚ˘¨ÓŞĽ——iJ’„űďd29k϶6˛ŕ02€Â|é0›eY¦ŚRÍVü5őĹ6*ŠB9†‰g^Bń}źŰSĺ7×´—čW˛q•ŤëâŹ4*áLQŢ[$dueç’ŞęĘęŔÎ(­·?óWÖWT˘én·ëQůWxN‹6NÓTiă *ˇ/Ýľő1\ËŮqç ű‘FěG˛Ľ»ô#-”ĘĹbQQY·Ű-˙|»Ý*ď·m»$˘Ň¶íĘç‡Ă߯ëzéű~ąX,¤éëş^AP&IR&IR†aXęşÎż}}íU'V&"*Ç)ă8ćiAŔÓÖ4­Ěóśß—çyiż×ó<~oEĄă8ü»ńx|”/űN¬g_›ŞÚ#ĎóRÓ4ţťëşeEe’$eÇ­e›Íf›řľĎË Ęłk[·}Ţ–®ĘnűýľRgÇqĘ J×u+mŘ%Ź:mţŮÖ–Mv‡üsć×eYJýOf7ŃOLÓ, h,‡ę^öçű>÷ß÷ůçĂá°â˙*»0˙÷<Źß'óă>y×ű^“ż¨úM} aýAÖ'Öëuď±­©_6°,«’7ł“ĘĆ]|KLßó<žv݇†Ăa/żoĘűp8pź4MłRîsúCY–ĺx<®ôcqěíeĆ‘˝diQŁďŞĆÁ>ĎŹzżě:–O&“Nľ!ë˛4¨Üď÷ʶ<Ĺ>űsZě˘?]Ş˙5ů˛Xn×uyą}߯أ-ʶ|ëĎéKő# ďI0\O$¬żLËňx}}­<ő?MÓzOákv»Ź$üŐI’„ĽŘn·0Đ.n”(Ščţţžîďď•×EÁ\*‚%ŕ둸QÄݞ–EQĐÓÓÓ»GšüzŕLBŕFq]—lۦÝnÇϬb焥iĘĎú#"šÍfŘV 8„Ŕ Ç1ůľO///üŕ|MÓ( C¬"ś—ź€,ËxôK†ă8X=(ˇ( MŐ˛¬“˘ŽđŐ€HŔKřâ@$ŕ‹‘€/DBľ8 řâ@$€ ˲/Wç4Mi4Ńh4˘4MŃľđ@$€ćůů™ż\˝‹˘ $I(I*Šâ—­ăt:%Ďóŕ耛"!| APż¬HöŐq]—˘(‚!7DBľ8 řâü&€cv»%IÂ˙mYŤÇăŁëŠ˘ ďßżó۶-MOĽNÓ42M“ľ˙N?~ü¨äIDd™¦y”Fš¦´ŰířÖdÓ4i<“®ëŇ<Ó4Ą··7ž^’$<ŹÉdB¦iVĘĹĘže˝ĽĽ´Ö˝N–e´Űí*A:LÓ$۶ĄőąišŇfłiÍ/Ë2nëáp¨´YýÚz{öÍďííŤŢŢ޸íUůwő9UűEA›Í†Űß4MšL&GĺÚl6܇Ćă1Y–ĄĚŁ( Úív•Ŕ2]ýđÉ(p^__KÓ4K":ú3Mł|}}=şg<ók–ËĄ4]×uů5Űí¶Ün·Ň<¨\,•{‡Cé8Žňzß÷ĄyÚ¶ÍÓ[Ż×G÷í÷űJ9ň<Ż”ł^÷ý~/Ígżß7–ŹJĎóĘ<Ď+÷‰yo·Ű^í´ßď•í$Ëďőő•·^ŻÓféN&““ó[,ĘkëuÝn·Ę´u]W–·Ţľş®ÝoYVy8ʲ,Ë ¤y8ŽsÔ6eY–aJÓl+ŕs‘ţ¦.¤Ů¶].‹r2™”š¦)E¦<Ďů÷ş®sQF–îl6+Ëň§čdŰviF%?۶+éď÷űŠP3ËŮlV.‹r8ňĎ]×=Ş‘&“ɑأiZY–UˇŽ „†a”¶m—ăń¸R>]׏ĤĂá ,Ëźý‰˘[=ď>"ˇ§¦iĺd2)‹E9›Í*ĺu§rkŁńx¬L{żßóű™ |J~ëőş´m›ç©io_QlUů\=m™řĚěkYÖ‘ŐËĹDK±˘O×…éĺrÉż3 ·i˝/ČDsŔç"!”?… QčŞ }yžWDąú÷âJ5Q,¦ápx”ݏâL€4M“ 2˘Szę"Ýz˝.ó?nµZńłćę, ň<Źt]'˲¤iٶMať—†!˙ńl:"âgN&éŮv¦i’çyüßâ™{§ÂĘ`Y–ô¬Ă0 É0 ˛m»r>˘XU[°úöé’ßp8<ĘŻŤ0 y{EQtd?]×)Š"2 žźźÓŞ—M¬ŻaÇqĄmu]ç×EÁË"ţż†xźďűdŰ6 ‡C ü"@$ú?qČqe ]׹x$Ż`AŔE“ů|NľďsI&â´! Yľď+ŻcßEˇádbO™pČęÍęUÁň<§ív{$šŠ8ŽsѶbBW=Їř}–e”$IĄN–eńzYD{3qL´7Ëďű÷ďĘüŇ4=ĘŻ ćC¶m7am—$‰T6 Cjc1MUąDźdľ* ‰ĎĎĎRáÓ÷}J’¤·đ ¸]ÝđĺWÇÝßßó(´2ýäŇ4­1lĺ×ăă#EA«ŐŠ~®˛ë"ŇŐaĺ°,«±L,‚.«‹Lę"P6 UŞhŔş®K*Ý÷B’ďű\`ŤFdš&ą®KŽămŰŤ‘‹=ĎŁů|NišR–e»°r‡ĂĘçžçńŔ}óS‘eß:ůkßş˝Um+~®*ŁęŢńxL›Í†Ň4Ą‡‡˛,‹<Ďk4ź„€/ʏ:+Š"ľU¸Ď} ˲h±X𭡆aT¶ëö‰H2a¨l‹ô5í·Ůl(ŽcʲěhKňĄq‡–Ë%Íçsn§0 ąť]×%×ui2™ÝËDB"ŞÜS_]X_µéşîÉůµµm_ź{/˘("×uąx™¦)· [Q;™L.ľJđq@$ápŘyeęşďßżó˙˙ńăÇŮ"źať·*Ë®»ćĘŻ4MééééhKަidY9ŽCş®síRřľO®ëR†”$IĹćqSdž!m·ŰŁsř&“ ˝ĽĽĐfłáBź¸ÚQuöă)ů˝—Ď]v'«[Ç|ĹjQ\Řô}ź–Ë%~ €@†g zQqÁIÓ4z{{٧§':' <–eÝäŮolű-[Q9›ÍČu]2Mł"V^"X‰ Ó4ąČÇÎ dÂÖŰŰĄiJžçŮÎu]zyyá«-ËŞ`iښۖźďű˝Wzž×xćäGÂVI?w1Š".’˛ -§l§Ü\řň+íÚĸ,Ë”„ł,ă+ćlŰćâXQ4ťN{—˶m"Ş®Ll*×{#úX.—\`­ŻfĽĆöăz;°(ĘQQ–e<"°,ŔŚëşü{v=k+U€Ź®ůɢȅč6űцߓzą,ËâÁx¶Ű-˙üZ"0ŕ}HřňčşŢ(*‰L§Sşżż']׏„›étJEQ¦iEY–EłŮŚ~ j}W1ÁJ±dDQD4 Ţől;QlŠęŰfÓ>EAÁ€”•u]o]Ů&F©f¶5 ă¨b~Şł%»ä'c<ó24‰€ľďÓýý= w  Á`@ŹŹŹĘüD‘ó#LŔĺHD\pʲLąę/Š˘ĘŠ3q[jü» řjş ¸9źĎEžşŘâş.:2ťNĄ÷¦iĘW/†ń®Ű>Ĺú«VÉ6»TžĂá~®Ú“ Tbfű:l{o–e<ČŚLčó[­V'ç'F ®—­4•ĄEQe+t׳)ĎAô!f™ß1ęŰóÓ4ĄÝn×±p{@$ú)ڰí˝QŃăă#­V+.vL§S.‡ĂĘŞ˛4MąbŰvĺ|9]×ůę>ٶcq«óăă#=??óëëgŕ=>>Ň|>çez~~®ś ¨ZYw-ę"&;ç/Ë2Úl6ÜfbtĺK¬:cő,Š‚éĺĺ…ŰdłŮT©¨V˙™¦ÉĹ?v­ę\Ŕsňc훦)ŤF#šĎç\`s‡GDŽă¸Ńç4M{·s MÓ䫣(˘ŃhD›ÍćČďXąę´ďűä8"đŮ(”eY–yž—“ɤ$"ĺźaĺ~żŻÜcYVIDĄ¦iĺáp¦=›Íx‹Ĺ˘rż¦i•` is available in the developer documentation, and it is also relatively easy to add your own (and eventually contribute it). Ceilometer is part of OpenStack, but is not tied to OpenStack's definition of "users" and "tenants." The "source" field of each sample refers to the authority defining the user and tenant associated with the sample. Deployers can define custom sources through a configuration file, and then create agents to collect samples for new meters using those sources. This means that you can collect data for applications running on top of OpenStack, such as a PaaS or SaaS layer, and use the same tools for metering your entire cloud. Moreover, end users can also :ref:`send their own application specific data ` into the database through the REST API for a various set of use cases. .. _send their own application centric data: ./webapi/v2.html#user-defined-data ceilometer-6.0.0/doc/source/releasenotes/0000775000567000056710000000000012701406364021547 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/doc/source/releasenotes/index.rst0000664000567000056710000000246012701406223023404 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================ Release Notes ============================ .. toctree:: :hidden: folsom * :ref:`folsom` * `Havana`_ * `Icehouse`_ * `Juno`_ * `Kilo`_ * `Liberty`_ .. _Havana: https://wiki.openstack.org/wiki/ReleaseNotes/Havana#OpenStack_Metering_.28Ceilometer.29 .. _IceHouse: https://wiki.openstack.org/wiki/ReleaseNotes/Icehouse#OpenStack_Telemetry_.28Ceilometer.29 .. _Juno: https://wiki.openstack.org/wiki/ReleaseNotes/Juno#OpenStack_Telemetry_.28Ceilometer.29 .. _Kilo: https://wiki.openstack.org/wiki/ReleaseNotes/Kilo#OpenStack_Telemetry_.28Ceilometer.29 .. _Liberty: https://wiki.openstack.org/wiki/ReleaseNotes/Liberty#OpenStack_Telemetry_.28Ceilometer.29 ceilometer-6.0.0/doc/source/releasenotes/folsom.rst0000664000567000056710000000503412701406223023574 0ustar jenkinsjenkins00000000000000.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _folsom: ==================== Folsom ==================== This is the first release (Version 0.1) of Ceilometer. Please take all appropriate caution in using it, as it is a technology preview at this time. Version of OpenStack It is currently tested to work with OpenStack 2012.2 Folsom. Due to its use of openstack-common, and the modification that were made in term of notification to many other components (glance, cinder, quantum), it will not easily work with any prior version of OpenStack. Components Currently covered components are: Nova, Nova-network, Glance, Cinder and Quantum. Notably, there is no support yet for Swift and it was decided not to support nova-volume in favor of Cinder. A detailed list of meters covered per component can be found at in :ref:`measurements`. Nova with libvirt only Most of the Nova meters will only work with libvirt fronted hypervisors at the moment, and our test coverage was mostly done on KVM. Contributors are welcome to implement other virtualization backends' meters. Quantum delete events Quantum delete notifications do not include the same metadata as the other messages, so we ignore them for now. This isn't ideal, since it may mean we miss charging for some amount of time, but it is better than throwing away the existing metadata for a resource when it is deleted. Database backend The only tested and complete database backend is currently MongoDB, the SQLAlchemy one is still work in progress. Installation The current best source of information on how to deploy this project is found as the devstack implementation but feel free to come to #openstack-metering on freenode for more info. Volume of data Please note that metering can generate lots of data very quickly. Have a look at the following spreadsheet to evaluate what you will end up with. http://wiki.openstack.org/EfficientMetering#Volume_of_data ceilometer-6.0.0/doc/source/overview.rst0000664000567000056710000000377712701406223021466 0ustar jenkinsjenkins00000000000000======== Overview ======== Objectives ========== The Ceilometer project was started in 2012 with one simple goal in mind: to provide an infrastructure to collect any information needed regarding OpenStack projects. It was designed so that rating engines could use this single source to transform events into billable items which we label as "metering". As the project started to come to life, collecting an `increasing number of meters`_ across multiple projects, the OpenStack community started to realize that a secondary goal could be added to Ceilometer: become a standard way to collect meter, regardless of the purpose of the collection. For example, Ceilometer can now publish information for monitoring, debugging and graphing tools in addition or in parallel to the metering backend. We labelled this effort as "multi-publisher". .. _increasing number of meters: http://docs.openstack.org/developer/ceilometer/measurements.html Metering ======== If you divide a billing process into a 3 step process, as is commonly done in the telco industry, the steps are: 1. :term:`Metering` 2. :term:`Rating` 3. :term:`Billing` Ceilometer's initial goal was, and still is, strictly limited to step one. This is a choice made from the beginning not to go into rating or billing, as the variety of possibilities seemed too large for the project to ever deliver a solution that would fit everyone's needs, from private to public clouds. This means that if you are looking at this project to solve your billing needs, this is the right way to go, but certainly not the end of the road for you. Once Ceilometer is in place on your OpenStack deployment, you will still have several things to do before you can produce a bill for your customers. One of you first task could be: finding the right queries within the Ceilometer API to extract the information you need for your very own rating engine. .. seealso:: * http://wiki.openstack.org/EfficientMetering/ArchitectureProposalV1 * http://wiki.openstack.org/EfficientMetering#Architecture ceilometer-6.0.0/doc/source/api/0000775000567000056710000000000012701406364017627 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/doc/source/api/index.rst0000664000567000056710000000014712701406223021464 0ustar jenkinsjenkins00000000000000=================== Source Code Index =================== .. toctree:: :maxdepth: 1 autoindex ceilometer-6.0.0/doc/source/install/0000775000567000056710000000000012701406364020524 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/doc/source/install/index.rst0000664000567000056710000000145512701406223022364 0ustar jenkinsjenkins00000000000000.. Copyright 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _install: ======================= Installing Ceilometer ======================= .. toctree:: :maxdepth: 2 dbreco development manual upgrade mod_wsgi ceilometer-6.0.0/doc/source/install/manual.rst0000664000567000056710000005416012701406224022534 0ustar jenkinsjenkins00000000000000.. Copyright 2012 Nicolas Barcet for Canonical 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _installing_manually: ===================== Installing Manually ===================== Storage Backend Installation ============================ This step is a prerequisite for the collector, notification agent and API services. You may use one of the listed database backends below to store Ceilometer data. .. note:: Please notice, MongoDB requires pymongo_ to be installed on the system. The required minimum version of pymongo is 2.4. .. MongoDB ------- The recommended Ceilometer storage backend is `MongoDB`. Follow the instructions to install the MongoDB_ package for your operating system, then start the service. The required minimum version of MongoDB is 2.4. To use MongoDB as the storage backend, change the 'database' section in ceilometer.conf as follows:: [database] connection = mongodb://username:password@host:27017/ceilometer SQLalchemy-supported DBs ------------------------ You may alternatively use `MySQL` (or any other SQLAlchemy-supported DB like `PostgreSQL`). In case of SQL-based database backends, you need to create a `ceilometer` database first and then initialise it by running:: ceilometer-dbsync To use MySQL as the storage backend, change the 'database' section in ceilometer.conf as follows:: [database] connection = mysql+pymysql://username:password@host/ceilometer?charset=utf8 HBase ----- HBase backend is implemented to use HBase Thrift interface, therefore it is mandatory to have the HBase Thrift server installed and running. To start the Thrift server, please run the following command:: ${HBASE_HOME}/bin/hbase thrift start The implementation uses `HappyBase`_, which is a wrapper library used to interact with HBase via Thrift protocol. You can verify the thrift connection by running a quick test from a client:: import happybase conn = happybase.Connection(host=$hbase-thrift-server, port=9090, table_prefix=None, table_prefix_separator='_') print conn.tables() # this returns a list of HBase tables in your HBase server .. note:: HappyBase version 0.5 or greater is required. Additionally, version 0.7 is not currently supported. .. In case of HBase, the needed database tables (`project`, `user`, `resource`, `meter`) should be created manually with `f` column family for each one. To use HBase as the storage backend, change the 'database' section in ceilometer.conf as follows:: [database] connection = hbase://hbase-thrift-host:9090 It is possible to customize happybase's `table_prefix` and `table_prefix_separator` via query string. By default `table_prefix` is not set and `table_prefix_separator` is '_'. When `table_prefix` is not specified `table_prefix_separator` is not taken into account. E.g. the resource table in the default case will be 'resource' while with `table_prefix` set to 'ceilo' and `table_prefix_separator` to '.' the resulting table will be 'ceilo.resource'. For this second case this is the database connection configuration:: [database] connection = hbase://hbase-thrift-host:9090?table_prefix=ceilo&table_prefix_separator=. .. _HappyBase: http://happybase.readthedocs.org/en/latest/index.html# .. _MongoDB: http://www.mongodb.org/ .. _pymongo: https://pypi.python.org/pypi/pymongo/ Installing the notification agent ====================================== .. index:: double: installing; agent-notification 1. If you want to be able to retrieve image samples, you need to instruct Glance to send notifications to the bus by changing ``notifier_strategy`` to ``rabbit`` in ``glance-api.conf`` and restarting the service. 2. If you want to be able to retrieve volume samples, you need to instruct Cinder to send notifications to the bus by changing ``notification_driver`` to ``messagingv2`` and ``control_exchange`` to ``cinder``, before restarting the service. 3. If you want to be able to retrieve instance samples, you need to instruct Nova to send notifications to the bus by setting these values:: # nova-compute configuration for ceilometer instance_usage_audit=True instance_usage_audit_period=hour notify_on_state_change=vm_and_task_state notification_driver=messagingv2 4. In order to retrieve object store statistics, ceilometer needs access to swift with ``ResellerAdmin`` role. You should give this role to your ``os_username`` user for tenant ``os_tenant_name``: :: $ keystone role-create --name=ResellerAdmin +----------+----------------------------------+ | Property | Value | +----------+----------------------------------+ | id | 462fa46c13fd4798a95a3bfbe27b5e54 | | name | ResellerAdmin | +----------+----------------------------------+ $ keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $CEILOMETER_USER \ --role_id 462fa46c13fd4798a95a3bfbe27b5e54 You'll also need to add the Ceilometer middleware to Swift to account for incoming and outgoing traffic, by adding these lines to ``/etc/swift/proxy-server.conf``:: [filter:ceilometer] use = egg:ceilometer#swift And adding ``ceilometer`` in the ``pipeline`` of that same file, right before ``proxy-server``. Additionally, if you want to store extra metadata from headers, you need to set ``metadata_headers`` so it would look like:: [filter:ceilometer] use = egg:ceilometer#swift metadata_headers = X-FOO, X-BAR .. note:: Please make sure that ceilometer's logging directory (if it's configured) is read and write accessible for the user swift is started by. 5. Clone the ceilometer git repository to the management server:: $ cd /opt/stack $ git clone https://git.openstack.org/openstack/ceilometer.git 6. As a user with ``root`` permissions or ``sudo`` privileges, run the ceilometer installer:: $ cd ceilometer $ sudo python setup.py install 7. Copy the sample configuration files from the source tree to their final location. :: $ mkdir -p /etc/ceilometer $ cp etc/ceilometer/*.json /etc/ceilometer $ cp etc/ceilometer/*.yaml /etc/ceilometer $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf 8. Edit ``/etc/ceilometer/ceilometer.conf`` 1. Configure messaging Set the messaging related options correctly so ceilometer's daemons can communicate with each other and receive notifications from the other projects. In particular, look for the ``*_control_exchange`` options and make sure the names are correct. If you did not change the ``control_exchange`` settings for the other components, the defaults should be correct. .. note:: Ceilometer makes extensive use of the messaging bus, but has not yet been tested with ZeroMQ. We recommend using Rabbit for now. 2. Set the ``telemetry_secret`` value. Set the ``telemetry_secret`` value to a large, random, value. Use the same value in all ceilometer configuration files, on all nodes, so that messages passing between the nodes can be validated. Refer to :doc:`/configuration` for details about any other options you might want to modify before starting the service. 9. Start the notification daemon. :: $ ceilometer-agent-notification .. note:: The default development configuration of the collector logs to stderr, so you may want to run this step using a screen session or other tool for maintaining a long-running program in the background. Installing the collector ======================== .. index:: double: installing; collector .. _storage_backends: 1. Clone the ceilometer git repository to the management server:: $ cd /opt/stack $ git clone https://git.openstack.org/openstack/ceilometer.git 2. As a user with ``root`` permissions or ``sudo`` privileges, run the ceilometer installer:: $ cd ceilometer $ sudo python setup.py install 3. Copy the sample configuration files from the source tree to their final location. :: $ mkdir -p /etc/ceilometer $ cp etc/ceilometer/*.json /etc/ceilometer $ cp etc/ceilometer/*.yaml /etc/ceilometer $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf 4. Edit ``/etc/ceilometer/ceilometer.conf`` 1. Configure messaging Set the messaging related options correctly so ceilometer's daemons can communicate with each other and receive notifications from the other projects. In particular, look for the ``*_control_exchange`` options and make sure the names are correct. If you did not change the ``control_exchange`` settings for the other components, the defaults should be correct. .. note:: Ceilometer makes extensive use of the messaging bus, but has not yet been tested with ZeroMQ. We recommend using Rabbit for now. 2. Set the ``telemetry_secret`` value. Set the ``telemetry_secret`` value to a large, random, value. Use the same value in all ceilometer configuration files, on all nodes, so that messages passing between the nodes can be validated. Refer to :doc:`/configuration` for details about any other options you might want to modify before starting the service. 5. Start the collector. :: $ ceilometer-collector .. note:: The default development configuration of the collector logs to stderr, so you may want to run this step using a screen session or other tool for maintaining a long-running program in the background. Installing the Polling Agent ============================ .. index:: double: installing; agent .. note:: The polling agent needs to be able to talk to Keystone and any of the services being polled for updates. It also needs to run on your compute nodes to poll instances. 1. Clone the ceilometer git repository to the server:: $ cd /opt/stack $ git clone https://git.openstack.org/openstack/ceilometer.git 2. As a user with ``root`` permissions or ``sudo`` privileges, run the ceilometer installer:: $ cd ceilometer $ sudo python setup.py install 3. Copy the sample configuration files from the source tree to their final location. :: $ mkdir -p /etc/ceilometer $ cp etc/ceilometer/*.json /etc/ceilometer $ cp etc/ceilometer/*.yaml /etc/ceilometer $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf 4. Edit ``/etc/ceilometer/ceilometer.conf`` Set the messaging related options correctly so ceilometer's daemons can communicate with each other and receive notifications from the other projects. In particular, look for the ``*_control_exchange`` options and make sure the names are correct. If you did not change the ``control_exchange`` settings for the other components, the defaults should be correct. .. note:: Ceilometer makes extensive use of the messaging bus, but has not yet been tested with ZeroMQ. We recommend using Rabbit for now. Refer to :doc:`/configuration` for details about any other options you might want to modify before starting the service. 5. Start the agent :: $ ceilometer-polling 6. By default, the polling agent polls the `compute` and `central` namespaces. You can specify which namespace to poll in the `ceilometer.conf` configuration file or on the command line:: $ ceilometer-polling --polling-namespaces central,ipmi Installing the API Server ========================= .. index:: double: installing; API .. note:: The API server needs to be able to talk to keystone and ceilometer's database. 1. Clone the ceilometer git repository to the server:: $ cd /opt/stack $ git clone https://git.openstack.org/openstack/ceilometer.git 2. As a user with ``root`` permissions or ``sudo`` privileges, run the ceilometer installer:: $ cd ceilometer $ sudo python setup.py install 3. Copy the sample configuration files from the source tree to their final location. :: $ mkdir -p /etc/ceilometer $ cp etc/ceilometer/api_paste.ini /etc/ceilometer $ cp etc/ceilometer/*.json /etc/ceilometer $ cp etc/ceilometer/*.yaml /etc/ceilometer $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf 4. Edit ``/etc/ceilometer/ceilometer.conf`` 1. Configure messaging Set the messaging related options correctly so ceilometer's daemons can communicate with each other and receive notifications from the other projects. In particular, look for the ``*_control_exchange`` options and make sure the names are correct. If you did not change the ``control_exchange`` settings for the other components, the defaults should be correct. .. note:: Ceilometer makes extensive use of the messaging bus, but has not yet been tested with ZeroMQ. We recommend using Rabbit for now. Refer to :doc:`/configuration` for details about any other options you might want to modify before starting the service. 5. (Optional) As of the Juno release, Ceilometer utilises Paste Deploy to manage WSGI applications. Ceilometer uses keystonemiddleware by default but additional middleware and applications can be configured in api_paste.ini. For examples on how to use Paste Deploy, refer to this documentation_. .. _documentation: http://pythonpaste.org/deploy/ 6. Choose and start the API server. Ceilometer includes the ``ceilometer-api`` command. This can be used to run the API server. For smaller or proof-of-concept installations this is a reasonable choice. For larger installations it is strongly recommended to install the API server in a WSGI host such as mod_wsgi (see :doc:`mod_wsgi`). Doing so will provide better performance and more options for making adjustments specific to the installation environment. If you are using the ``ceilometer-api`` command it can be started as:: $ ceilometer-api .. note:: The development version of the API server logs to stderr, so you may want to run this step using a screen session or other tool for maintaining a long-running program in the background. Configuring keystone to work with API ===================================== .. index:: double: installing; configure keystone .. note:: The API server needs to be able to talk to keystone to authenticate. 1. Create a service for ceilometer in keystone :: $ keystone service-create --name=ceilometer \ --type=metering \ --description="Ceilometer Service" 2. Create an endpoint in keystone for ceilometer :: $ keystone endpoint-create --region RegionOne \ --service_id $CEILOMETER_SERVICE \ --publicurl "http://$SERVICE_HOST:8777/" \ --adminurl "http://$SERVICE_HOST:8777/" \ --internalurl "http://$SERVICE_HOST:8777/" .. note:: CEILOMETER_SERVICE is the id of the service created by the first command and SERVICE_HOST is the host where the Ceilometer API is running. The default port value for ceilometer API is 8777. If the port value has been customized, adjust accordingly. Configuring Heat to send notifications ====================================== Configure the driver in ``heat.conf`` :: notification_driver=messagingv2 Configuring Sahara to send notifications ======================================== Configure the driver in ``sahara.conf`` :: enable_notifications=true notification_driver=messagingv2 Also you need to configure messaging related options correctly as written above for other parts of installation guide. Refer to :doc:`/configuration` for details about any other options you might want to modify before starting the service. Configuring MagnetoDB to send notifications =========================================== Configure the driver in ``magnetodb-async-task-executor.conf`` :: notification_driver=messagingv2 You also would need to restart the service magnetodb-async-task-executor (if it's already running) after changing the above configuration file. Notifications queues ======================== .. index:: double: installing; notifications queues; multiple topics By default, Ceilometer consumes notifications on the messaging bus sent to **notification_topics** by using a queue/pool name that is identical to the topic name. You shouldn't have different applications consuming messages from this queue. If you want to also consume the topic notifications with a system other than Ceilometer, you should configure a separate queue that listens for the same messages. Ceilometer allows multiple topics to be configured so that polling agent can send the same messages of notifications to other queues. Notification agents also use **notification_topics** to configure which queue to listen for. If you use multiple topics, you should configure notification agent and polling agent separately, otherwise Ceilometer collects duplicate samples. By default, the ceilometer.conf file is as follows:: [DEFAULT] notification_topics = notifications To use multiple topics, you should give ceilometer-agent-notification and ceilometer-polling services different ceilometer.conf files. The Ceilometer configuration file ceilometer.conf is normally locate in the /etc/ceilometer directory. Make changes according to your requirements which may look like the following:: For notification agent using ceilometer-notification.conf, settings like:: [DEFAULT] notification_topics = notifications,xxx For polling agent using ceilometer-polling.conf, settings like:: [DEFAULT] notification_topics = notifications,foo .. note:: notification_topics in ceilometer-notification.conf should only have one same topic in ceilometer-polling.conf Doing this, it's easy to listen/receive data from multiple internal and external services. Using multiple dispatchers ================================ .. index:: double: installing; multiple dispatchers The Ceilometer collector allows multiple dispatchers to be configured so that data can be easily sent to multiple internal and external systems. Dispatchers are divided between ``event_dispatchers`` and ``meter_dispatchers`` which can each be provided with their own set of receiving systems. .. note:: In Liberty and prior the configuration option for all data was ``dispatcher`` but this was changed for the Mitaka release to break out separate destination systems by type of data. By default, Ceilometer only saves event and meter data in a database. If you want Ceilometer to send data to other systems, instead of or in addition to the Ceilometer database, multiple dispatchers can be enabled by modifying the Ceilometer configuration file. Ceilometer ships multiple dispatchers currently. They are ``database``, ``file``, ``http`` and ``gnocchi`` dispatcher. As the names imply, database dispatcher sends metering data to a database, file dispatcher logs meters into a file, http dispatcher posts the meters onto a http target, gnocchi dispatcher posts the meters onto Gnocchi_ backend. Each dispatcher can have its own configuration parameters. Please see available configuration parameters at the beginning of each dispatcher file. .. _Gnocchi: http://gnocchi.readthedocs.org/en/latest/basic.html To check if any of the dispatchers is available in your system, you can inspect the Ceilometer egg entry_points.txt file, you should normally see text like the following:: [ceilometer.dispatcher] database = ceilometer.dispatcher.database:DatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher http = ceilometer.dispatcher.http:HttpDispatcher gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher To configure one or multiple dispatchers for Ceilometer, find the Ceilometer configuration file ceilometer.conf which is normally located at /etc/ceilometer directory and make changes accordingly. Your configuration file can be in a different directory. To use multiple dispatchers on a Ceilometer collector service, add multiple dispatcher lines in ceilometer.conf file like the following:: [DEFAULT] meter_dispatchers=database meter_dispatchers=file If there is no dispatcher present, database dispatcher is used as the default. If in some cases such as traffic tests, no dispatcher is needed, one can configure the line without a dispatcher, like the following:: event_dispatchers= With the above configuration, no event dispatcher is used by the Ceilometer collector service, all event data received by Ceilometer collector will be dropped. For Gnocchi dispatcher, the following configuration settings should be added:: [DEFAULT] meter_dispatchers = gnocchi [dispatcher_gnocchi] archive_policy = low The value specified for ``archive_policy`` should correspond to the name of an ``archive_policy`` configured within Gnocchi. For Gnocchi dispatcher backed by Swift storage, the following additional configuration settings should be added:: [dispatcher_gnocchi] filter_project = gnocchi_swift filter_service_activity = True .. note:: If gnocchi dispatcher is enabled, Ceilometer api calls will return a 410 with an empty result. The Gnocchi Api should be used instead to access the data. ceilometer-6.0.0/doc/source/install/dbreco.rst0000664000567000056710000000566312701406223022520 0ustar jenkinsjenkins00000000000000.. Copyright 2013 Nicolas Barcet for eNovance Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _choosing_db_backend: ===================================== Choosing a database backend - Legacy ===================================== .. note: Ceilometer's existing database capabilities is intended for post processing and auditing purposes where responsiveness is not a requirement. It captures the full fidelity of each datapoint and thus is not designed for low latency use cases. For more responsive use cases, it's recommended to store data in an alternative source such as Gnocchi_ Selecting a database backend for Ceilometer should not be done lightly for numerous reasons: 1. Not all backend drivers are equally implemented and tested. To help you make your choice, the table below will give you some idea of the status of each of the drivers available in trunk. Note that we do welcome patches to improve completeness and quality of drivers. 2. It may not be a good idea to use the same host as another database as Ceilometer can generate a LOT OF WRITES. For this reason it is generally recommended, if the deployment is targeting going into production, to use a dedicated host, or at least a VM which will be migratable to another physical host if needed. The following spreadsheet can help you get an idea of the volumes that ceilometer can generate: `Google spreadsheet `_ 3. If you are relying on this backend to bill customers, you will note that your capacity to generate revenue is very much linked to its reliability, which seems to be a factor dear to many managers. The following is a table indicating the status of each database drivers: ================== ============================= =================== ====== Driver API querying API statistics Alarms ================== ============================= =================== ====== MongoDB Yes Yes Yes MySQL Yes Yes Yes PostgreSQL Yes Yes Yes HBase Yes Yes, except groupby Yes ================== ============================= =================== ====== .. _Gnocchi: http://gnocchi.xyz ceilometer-6.0.0/doc/source/install/mod_wsgi.rst0000664000567000056710000000436612701406223023071 0ustar jenkinsjenkins00000000000000.. Copyright 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================================== Installing the API behind mod_wsgi =================================== Ceilometer comes with a few example files for configuring the API service to run behind Apache with ``mod_wsgi``. app.wsgi ======== The file ``ceilometer/api/app.wsgi`` sets up the V2 API WSGI application. The file is installed with the rest of the ceilometer application code, and should not need to be modified. etc/apache2/ceilometer ====================== The ``etc/apache2/ceilometer`` file contains example settings that work with a copy of ceilometer installed via devstack. .. literalinclude:: ../../../etc/apache2/ceilometer 1. On deb-based systems copy or symlink the file to ``/etc/apache2/sites-available``. For rpm-based systems the file will go in ``/etc/httpd/conf.d``. 2. Modify the ``WSGIDaemonProcess`` directive to set the ``user`` and ``group`` values to a appropriate user on your server. In many installations ``ceilometer`` will be correct. 3. Enable the ceilometer site. On deb-based systems:: $ a2ensite ceilometer $ service apache2 reload On rpm-based systems:: $ service httpd reload Limitation ========== As Ceilometer is using Pecan and Pecan's DebugMiddleware doesn't support multiple processes, there is no way to set debug mode in the multiprocessing case. To allow multiple processes the DebugMiddleware may be turned off by setting ``pecan_debug`` to ``False`` in the ``api`` section of ``ceilometer.conf``. For other WSGI setup you can refer to the `pecan deployment`_ documentation. .. _`pecan deployment`: http://pecan.readthedocs.org/en/latest/deployment.html ceilometer-6.0.0/doc/source/install/upgrade.rst0000664000567000056710000001000412701406223022672 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _upgrade: ========== Upgrading ========== Ceilometer's services support both full upgrades as well as partial (rolling) upgrades. The required steps for each process are described below. Full upgrades ============= The following describes how to upgrade your entire Ceilometer environment in one pass. .. _full upgrade path: 1. Upgrade the database (if applicable) Run ceilometer-dbsync to upgrade the database if using one of Ceilometer's databases (see :ref:`choosing_db_backend`). The database does not need to be taken offline as no data is modified or deleted. Ideally this should be done during a period of low activity. Best practices should still be followed (ie. back up your data). If not using a Ceilometer database, you should consult the documentation of that storage beforehand. 2. Upgrade the collector service(s) Shutdown all collector services. The new collector, that knows how to interpret the new payload, can then be started. It will disregard any historical attributes and can continue to process older data from the agents. You may restart as many new collectors as required. 3. Upgrade the notification agent(s) The notification agent can then be taken offline and upgraded with the same conditions as the collector service. 4. Upgrade the polling agent(s) In this path, you'll want to take down agents on all hosts before starting. After starting the first agent, you should verify that data is again being polled. Additional agents can be added to support coordination if enabled. .. note:: The API service can be taken offline and upgraded at any point in the process (if applicable). Partial upgrades ================ The following describes how to upgrade parts of your Ceilometer environment gradually. The ultimate goal is to have all services upgraded to the new version in time. 1. Upgrade the database (if applicable) Upgrading the database here is the same as the `full upgrade path`_. 2. Upgrade the collector service(s) The new collector services can be started alongside the old collectors. Collectors old and new will disregard any new or historical attributes. 3. Upgrade the notification agent(s) The new notification agent can be started alongside the old agent if no workload_partioning is enabled OR if it has the same pipeline configuration. If the pipeline configuration is changed, the old agents must be loaded with the same pipeline configuration first to ensure the notification agents all work against same pipeline sets. 4. Upgrade the polling agent(s) The new polling agent can be started alongside the old agent only if no new pollsters were added. If not, new polling agents must start only in it's own partitioning group and poll only the new pollsters. After all old agents are upgraded, the polling agents can be changed to poll both new pollsters AND the old ones. 5. Upgrade the API service(s) API management is handled by WSGI so there is only ever one version of API service running .. note:: Upgrade ordering does not matter in partial upgrade path. The only requirement is that the database be upgraded first. It is advisable to upgrade following the same ordering as currently described: database, collector, notification agent, polling agent, api. Developer notes =============== When updating data models in the database or IPC, we need to adhere to a single mantra: 'always add, never delete or modify.' ceilometer-6.0.0/doc/source/install/development.rst0000664000567000056710000000544212701406223023577 0ustar jenkinsjenkins00000000000000.. Copyright 2012 Nicolas Barcet for Canonical 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================== Installing development sandbox =============================== Ceilometer has several daemons. The basic are: :term:`polling agent` running either on the Nova compute node(s) or :term:`polling agent` running on the central management node(s), :term:`collector` and :term:`notification agent` running on the cloud's management node(s). In a development environment created by devstack_, these services are typically running on the same server. They do not have to be, though, so some of the instructions below are duplicated. Skip the steps you have already done. .. note:: In fact, previously ceilometer had separated compute and central agents, and their support is implemented in devstack_ right now, not one agent variant. For now we do have deprecated cmd scripts emulating old compute/central behavior using namespaces option passed to polling agent, which will be maintained for a transitional period. Configuring devstack ==================== .. index:: double: installing; devstack 1. Download devstack_. 2. Create a ``local.conf`` file as input to devstack. 3. Ceilometer makes extensive use of the messaging bus, but has not yet been tested with ZeroMQ. We recommend using Rabbit for now. By default, RabbitMQ will be used by devstack. 4. The ceilometer services are not enabled by default, so they must be enabled in ``local.conf`` before running ``stack.sh``. This example ``local.conf`` file shows all of the settings required for ceilometer:: [[local|localrc]] # Enable the Ceilometer devstack plugin enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer.git 5. Nova does not generate the periodic notifications for all known instances by default. To enable these auditing events, set ``instance_usage_audit`` to true in the nova configuration file and restart the service. 6. Cinder does not generate notifications by default. To enable these auditing events, set the following in the cinder configuration file and restart the service:: notification_driver=messagingv2 .. _devstack: http://www.devstack.org/ ceilometer-6.0.0/doc/source/6-storagemodel.png0000664000567000056710000014720112701406223022413 0ustar jenkinsjenkins00000000000000‰PNG  IHDR$Dβői‡zTXtRaw profile type exifxÚUŽŰ €0E˙™ÂxĘ8ĆhâŽ/XMăů€››ćPŘŻó€Ą dĐćÝ  ^3t"1Ríśw eâYđÝQçC}űŹ&Öípu·f›mśvŢ…¤ŐLQY±ľSÂú(óěżWĹźnAą,"jD—@ iTXtXML:com.adobe.xmp ·"˘äsBIT|d IDATxÚěÝw|Tuľ˙ń7)3“IĎ„„tJˇ PQ îZײ°şö˝«ëş÷®îŞWeu®¸ĺ*kY]•âŞ4Ĺ‚„PÔ$ %…^'™ôß“ ŇH†„ĽžŹÇ>–Ěśsć|?ç€9ďů–~őőőőp!7J\Ť@¸p9 ŕrŔĺ$€ËyP wűú믵jŐ*Ůl6Š}”ÉdŇ‚ 4~üxŠč5č!ôr‰‰‰„ĐÇŮl6mÚ´‰BzzHçĎĘJĺçSčc ,UŤĐëHçĎĘJ…eť ĐÇ”úřHz%†l—#.G \Ž@¸p9 ŕrŔĺ$€ËH—#.G \Ž@¸p9 ŕrŔĺ$€ËH—#.G \Ž@¸p9 ŕrŔĺ$€ËH—#.G \Ž@¸p9 ŕrŔĺ<ÎŐ†ĺĺĺ)??_éééŽ˙o*==]6›M’4tčP§÷,‹‚)‹Ĺ˘ččhîşĐ9HX­VĄ¦¦*55U)))ĘĚĚěĐţlőgI R\\ś† ¦¸¸8s÷ĐI˝6ČËËÓćÍ›Ű@Ś&…„9~6šĽ< ÂńsiQˇJŠň?—ެ¤ČéÚľ}»¶oß.éd@1nÜ8Ť?ž; €čUD^^ž’’’”xÚb@Ô E ڕݠ|ý9hh§?/7+C•¶ OűQy'2••vXU•öaMŠ   Ť7N“&MbxíĐ+‰ääd­_żľĹˇŤDxô3 ZŇ?,R’śŽ››•ˇĽěL?vXGS¨ŞŇ¦‚‚mٲE[¶lQPPćĚ™ŁÉ“'swp=:HLLTbbbł ¦ac'hŘč d2{»ôśú‡EŞX¤†Ź»P’t8yż’÷~Łcż—dď9±|ůr­[·N“'OÖ´iÓäííÍť@=2ŘłgŹV­ZĄ‚‚ÇkŁIŁ'NUü ň ´ôs?ZăGËVnŐ‘ÔÚőĺ§*+)RAAÖŻ_ŻM›6iĆŚ4ŃŁ‰´´4­ZµĘ©G„Ź_€Ć\xÉYé Ń&ł·†Ź»PĂÇ]¨Ś#µóËOt"ýl6›ÖŻ_ŻÄÄD†rĐ GV«U«V­r¬`!Ů .ąŇ14˘7‰4T‘†:ŤC95wî\&żôig=HNNÖ’%KdłŮWŻhš1fÂÔÝ#˘=‰ĂÉűőő§¨¬¤HÔ˘E‹4{ölÍ™3‡;Đ'ťŐ@bĺʕڲe‹ăçˇ#tń•?éQsDt…ÁńŁ=XűvnŐţo¶ŞŞŇ>Ś#%%E ,Ppp0w" O9+DZZš–-[¦ĚĚLIö^—Ď™ŻÁńŁĎŮB›ĚŢšxéLĹŹ™ ŹW/U~N–<¨§žzJ ,Đřńăą}†›«?pĎž=zţůçaÄ€¨AşůľGÎé0˘)ż@‹ćŢů[ť?ő I’ÍfÓ?˙ůO­\ą’»Đg¸´‡Ä×_­7ŢxĂńóä+4öÂKűdá'^:SáŃCôÉ»ËTUiÓ–-[T^^®ąsç˛<(ŕśç˛K—.u„ŁIsnľ»Ď†Ť" ŐÍ÷="KH$iűöízţůçeµZą3ç4—K—.u,ééă „[îQä ˇT_öą%nąG1CGH’233 %çĽn$š†–0ÝxÇoÔ?,’Ę7a2{ëšy·+nô’%çľn $N #nąG&3ó#śÎôko"”ô ÝHFtˇ /č–@bÓ¦M„gŕÔPbÉ’%pNéň@"99Y«WŻ–Dq&š†ÔŠ+( ŕśŃĄ„Őju|›o0štůśů„g`úµ79–ýěłĎ´gĎŠ8'ti ńüóĎËfłI’.ź3źŐ4ş@Â-÷Č`4I’–-[¦´´4Ščőş,X±b…233%IŁ.¸XăGSÝ.`2{ëŞH’l6›–-[Ć$—€^ŻK‰ääd}öŮg’¤Q4ućO©lŠ4T“ŻHdźäróćÍĐ«uI ±|ůrIöy#®ľq!Uíc/ĽT˘I’6lŘŔĐ @ŻvĆÄÚµkUPP Işŕ’+™Ä˛MO¸Éi> z«3 $ňňň´aĂIöˇc/Ľ”Šv#ż@‹FOś*É>tcÓ¦MĐ+ťQ Ńô[úé 7QMxéLÇR ëÖ­c‚K@ŻÔé@"99Y”$ť?ő ůZ¨¦‹\>gľ$űŞLpŮýöěŮŁ»îşK+W®¤ĐE:H¬_ż^’}"Ë1¦RI꩸ŃH’6oŢL/‰nÖ84fË–-Zşt)€.Đ©@˘iďѧ2‘ĺY0á’+%ŃKÂŐ¶oßN(] S˝#Î>ż@ ˝$ÎB 8s$čŃsĐKµű‡+°¸$B 8S$¶mŰ&‰Ţ=_ E1CG8]tÁK×ĚýˇtV«UIII’¤qŁčŃÄŹť(I*((Đž={(H73yyJ@čP ‘””$›Í&I3‘Ţ=ÁŕřŃ2MŽëîG(g®CDă°żő‹¤z=Dăä–Ű·ogrK!”€3Óî@"//Ď1™ĺ /ˇr=HüŘ Ž?3—„ëJ@çµ;h:`PÜ(*×ô‹”%$L’”’’BA\P:§ÝDă®%$L~*×Ă„E‘$íŰ·Źb¸ˇtśG{7LMMuzđEĎ2hŘ(Řő•$)99Yńńń>˝+ZVQQŃć6ŤˇÄ‡«ţˇÂÜăÚľ}»$iáÂ…ZĐ®@"99ٱşFÄŔXŞÖEęřsRRR‡‰_˙ú׎kŚÎ!”€ök׍ĆŢ’=ŞőP˘5»^íŃ4pÂéM^mnĂđ hźvőhěĘ? jLfoŞÖCE ŚŐ‰ô#ĘĚĚěô1.Ľě§ ěA1[` i_]č)mkW ‘‘‘!I ĺAµ' Ź˘oµQRçç‘ěˇ8Šy†% uí˛ŃŘťß/0Šő`~'ŻO{&bD÷břś^›Drr˛ăĎ–p*Ö5]Ž5==ť‚ô„в6‰üüü“Ľôčé,!a’$zB h®cD“oŕ]i|´ŻĆGűjÉâEmnűÎëKŰ÷E>ţÍ®Î>B pÖf ŃřM{ă’’gÓ{o/UuuőißŻŻŻ×Ę7^=ŁĎxĺŻĎöę0#x€}âŃ3YiÝPNj3(//ď'ęîślmţhÍi·Ůöĺf;|Pîîîťţś/7Ě]nC(v˝ĺD== Š1L+—˝¬™ 7´¸ÍĘeŻ(nř(ĄůQµµ'W™(-.Ň’ĹŇçź®WnÎ ëŇWëއS`}ĘžťŰô‹ëŻtě3>ÚW^fo%&źh×ţĺÖ2M¦čA±ZńŃWZôČúj˧ި°jÇÁ<î44 %X´÷ęçć¦Ó¦)zęTYâ‡Éč ~ý¤Š‚ĺ%'ëȧ•±m[—}Ţ­ź&Izó˛ËŰő:z‡–®×ô%n˝ĺDkjŞ5ăšź(i×vĄ|·ŻŮű™iGőŐgźčŠŮ×Éf;FŘlşcîŐzďíĄzđŃEÚ˛ç~üĎZ˙źwtÇŤ3UQaď2~Â$íI+uě·'­T‰É'Ú˝ż§§ˇ!(Ő_žüĺś8®{űýćriťŚ&/îę^JĐS˘÷ń‹ŽÖś×˙Ą‹˙đ¨˘/˝Dޡˇň0ĺn4Ę',L/ż\—?ó'Íxţ/2řůQ°żî§ŽrP@ ˇŠ űĂ˝_€ĺ¬žhMMŤ®ţÉ\őë×O+ßxĄŮű«Ţ|M’4ëşůNŻŻ\ö˛R8 [~yż®ýSůúůëŠŮ?ŐĎďy@‡&kusN´wOOIRqQˇ óó´ä­µš{Ű/5ďçwş´NM—fmşd+%ĐaDd¤fľřň8PÖěíüűßőÁÍ·čí+ŻŇ;3ŻÖÇ÷˙JG·lQ}]ťÂÎ?_3ţßźĺćqćŃŢĽěň>ńŤyP\7ő'5NŽčxÖO6,"J&_˘Ź>X­’˘BÇë6[…Ö¬|S&_ް(§}6´V’4ăšź8˝~éŚk$I_lú¨ŐĎlďţýúő“$UWUéć;î?Ły,Đ·C‰ŻżţšÂôP˙áQ}}ub÷n­[¸PÉッŇĚLŐVU©ĆfSîÚúäSújŃ"Ő×ÖĘŻáso¤píd‰F¨?čCú`µŠ‹ tíÜ[šmäÇTIRdŚó*!1c%IY™é­~^göŹ6ś; íVVR ˛âÇĎfł™˘ô@^(K|Ľl……úâ‰˙Uu+ţÝĽEÁńń 4XĄ-¬xÓÄ ź;W!ŁGËčď§ŞŇRĺ~÷˝~x÷]e'%5ŰľŁó „ťľâŻżNÁ#FČŕăŁj«Uů))J]»Vé_5Ľ<˝˝5Ăz§Ąiím?×ŕ+®ĐČ›n’Ox¸¬ŮŮú~Ĺ úČľĆΚĄ7Ţ źđpUčІ Ú˙ćżĎ¨Ť#v“λóÎfíÝúäS:şeK§jfđńŃĽőëT|ô¨ÖÝ~‡.¸ď> Ľü2yLzçękÚ¬á€ńăĂőęßPCkNŽ~üä}żbĄj«ŞÎ¸ćŐŃöw¤ í©GŰw¦őö_>ccučĐ!•6é‘p6MżćZ=űLJ´úÍé–_ţJýúőÓŞ7^‘Źźż.ź9§ŮöÖ2IŇÔ‘-/?7»ŐĎëĚţţAÜYh—Ľět}¸ňŞ®˛I’n»í6Ť?žÂô@Ń—^"IJ]»VU%%mnżëĹ—Z|}ČĚ™šôđoŐŻI/*S` ˘.ž˘¨)“őÍß˙ˇ”÷ßďôyŽś?_çÝ}—ÓkF…Oś¨đ‰uŕ­·´çŐלޯ­¬´˙ÁhŇŔË/×”G9ůďYL´&ý÷ďTž—§ŔÁuŢ=w;Ţó0@ănż]•%ĄJ]ł¦ŰÚŘ™ăŐ4´ÉÝhÔČyóÝOŰýyĂo¸AÜwŻÔĐóM’|#"4îżPÔ”)úäW˙ĺü@߉šwDgÚßŃ6tő=u&őÍ”ĺ÷öň2ëŠY?Ń+ßÔןo”ŻŻź’ěŐ 7˙B¦&t4űřŞ´¸H‰É'äeöîđçťéţ®–źsÜńçřřxîđ^FL™2…ÂôPýGŽ”$eîŘŃécřF„ëÂß<(I:đÖ[:ôáG*ĎÍ•—ŢӦiěÂşŕľű”µk—JŇÓ;|üŔ!C4ţ—wHőő:đÎ ýřńG˛fçČl±(ćňË5îö_hÔĎ~¦ŚŻ•űý÷Žýęjjě˙A0{iěíżĐöçëČĆOeô÷×ÄT䤋4ú¶[奯ž~Zi[ż’)ŕä{ŻşŇHt¦Ťß˝ýŽľ{űť{‚t¶fŽ6yy)îÚkµíążččćÍNč-ń8Pçßs·ęjkµó˙§Łź}¦şŞJĹ Ó¤ß>$˰a}ë­Jú׿ΨćÝyĎt´ ­Ő˙Śď©Öô-n˝ń¤çÜhšńéş÷´ţ˝’¤„†kHŇŕˇö‡ňśÇ;őYgşż«U6Ya„č:^űÄľ%iiť>Fܵ?‘»Á ¤×—jĎ«Ż9ćź(ËĘŇ·ŢŇľĺËĺćᮡłgwîř sÔĎÝ]‡>üP{^yE%i骭¬Téńă:đÖ[Jů`ŤÔŻź†\su‹ű}}uěłĎupÝ:ŐŘ*eÍÎŃ·K–H’BFŹVęktdÓfŐV:żçÓmměôńęë%I¦€Ąő•mŘ ›M5­˙9ěÚősw×·ßVęš5Ş*)QŤ­R9űöéË'źRŤÍ¦ŃŁ»¬ćÝŃţ޶ˇ[î©NÖHôxçMś¬¨ÁÚ™ř…ľţěS :LŁÇOhqŰéW'H:99eŁďöíÖĚ ăőěrzÝŁaFüÚÚÚNíFś›<z`UWŘ:}ڰóěĂqúi‹ďŮ´Y’:nl§Žßżá!óŕ† -ăFűv ˝=ZrxŁóą•eeťÜóćß3x›»­Ť]qĽŁ§śwkBÇŽ“$űüófď:¤wf^­Ox KkŢŐíďhşűžęHý@ű»­ 'Ř«Ş´ő¨ź}ĂĎ´äů§%I<ňÔi·›{ë/őáű+őęßţ¬ÁCă5é’é:”ú˝ţřŕťĘ9qĽY5p°ŽJUŇÎm>z\‡÷#ÎMŐÖ2ýýeđńVeqI§Žá=`€$éúŐ«ZÝÎ'<ĽSÇ÷ •$kąGqCďďÓŁ<'Çéçşęę“ďĺžć˝&Ý𻺍]qĽâŚŚö×0ĚţyeÇŹ»¬ć]Ýţ޶ˇ»Űבú€ľĄÍQQöe4 r˛zԉϹágrss“»»»f]wÓi·3šLzu凚żđ.-~ę]<2\wÍź-KpżöŽf]7ßiű‡źř ‹ŚÖÝ?›Łź\:ľĂűźm™GI˛OF Âtk¶ýaľ~úőďźŇŻ˙T›ź5é’éú0ń»NíßŇyşZcO–Ž$Ś@×Ęýţ;Ĺ Uô%Sub÷îNŁş˘B­ś=GUee]~ŽŐ6|ĽĺáĺĄj«µůĂ­ŮËqÝĄ«ŰŘÝ5kéŰÓŰ[F??ŮŠŠÎzÍ;Óţ޶ˇ§ßSŕÜŐf §@˘¨€ŠőpŤ=Y{¶€0]Łqţ„!3gĘ7˘íá!ŁFiöż^Óŕ+ŻtĽVš™)Iň‹îžżźÖě’¤€[|ß?Ćţşőĉn«SW·±»kvŞ˛†ÚřEG÷šw¦ýmCOż§@$š.y<íG*Öĺfť§K A.ţűµ˙€NěŢ-“I—=ő´Ľ,A§Ý6hčPM}âq"˰aŽ×O|kďY1bîĽ÷ ź8Q׾ů†ĆÝ~{§Î1{ď>IRě¬kZ|?¶a%„ě}ű»­N]ŃĆ~îî.«Ůéj8䪫š˝8d~öÉÇšůâ˙ą¬ćťiGŰĐZý{Â=úp !I’¤JşdöhM—ülÚł„č‰ţł*ňó0xćĽţşFŢ4_~ŃŃr7eôóSđđášđ«űuŐ?ţ.sp°ňSSµçŐWű§®]«›M1—]Ş‹˙đ¨ü"#ĺćá!/Kâ®MĐ%Ź?&ż¨(yz{węüR׬Q]M­bŻąFăy‡|#Âĺn4Ę/2Rănż]±×\ŁşšZĄ®]Űm5:“6ÖŘěWb.»Tn2v{ÍNupý:Ő×Ő)öš«5ęć›eô÷—‡É¨Đ±c5ő±?ĘÝhTîď\VóδżŁmhµţ=ŕžç.Źöld±XTXXH‰®éő‰îdWÝCß}ŁěL®sK†Ž(߀ö=„ç&kvŽ>ľ˙Wşä‰Çe6LçÝu—λë®·M˙ę+}őô"§‰ ËNśPâłÖĹxTfĚĐ 3ší—źšŞ˝Ż˙«SçW|ěľ]ň’&üęWuóÍuóÍÎÔ×ëŰ—^TńŃŁÝVŁ3ic~rŠBÇŤŐÔ?ţQúŁýµ7/»Ľ[kvŞ˘ĂG´ç•WuŢÝwiü/ďĐř_ŢѬĆŢzËe5ďL=;Ú†¶ę¶ď)ĐljńăÇëСC*ČÉ’­Ü*“Ů›Ęő@]±Âơ£§‘źť®+~z'aDW–•Ąďş[1—]ŞË.“%>^¦€@ősë§ňÜ<ĺěß§”÷?P~JJ‹űűüs;¦óćjŔřńň R]MŤŠÓŇtlËgúá˝÷ś–Úě¨ä˙Ľ§˘ĂG4|îŤę?b„<˝}TUZ˘ÜďľÓ÷«V+gßľnŻQg۸}ńóšôŰß*(.Nu5µ*iXR˛»kvŞďV¬PááĂ~ă ˛ &OłYÖěűâ xë­f“;vwÍ;Óţ޶ˇµú÷„{ ś›úŐ×××·µQZZš-Z$Işę†?šĘő0¶r«–.~L’4kÖ,%$$th˙'ź|R™ “§ˇeˇC4ű¦_÷¸0âąçžÓˇC‡d.)QÜÁC\(čcR‡ĆŞÜĎO±±±zřá‡) ×hW‰ččh™L&Ůl6e=D ŃO;ěřs\\\‡÷ě±Ç(bümˇg´ź[{7l|Č=zđ;ŞÖ5×0™LN+ŁŔ5# cÚHŚ7N’TV\¨’Â|*×Ă4Eťé3C×î@bذaŽ?'ďŰIĺzܬ •J:Á5# sÚHkĚ1’¤”}»¨\˛ď›­’ěĂ5$\‡0:Ď­#7¶‘qä Őë!ޤp\oo–duÂ83 $¦L™"“É$‰^=ĹI;T]i(ž4iqÂ8snݡ±—Dęţ]˛•[©ŕYÖ ˛ş† F@×čp 1}útÇź÷íÜJϢŚ#••vX’4yňd ŇͬĄ…„ĐE:HDGG+66V’}2EzIś=»¶~*É>™eÓ ÝŁ¬¤€0ş[gvš3gŽ$©şŇF/‰ł¤iďéÓ§3™Ą FŔ™ëT O/‰łŚŢ®c6›&Ś€®áÖŮ›ö’Řůĺ§TŇ…čáZ“'OVll,at!ŹÎîŻ1cĆhßľ}:°ë+ĹŹť ţa‘T´›ŮĘ­úlýJIôŽp•ńăÇküřńşŰ™ě‘epp0EĐkY­VeddH’RSS%Iéééš={¶˘ŁŁ[Ü';+KëׯoóŘÜÜ”T\Üâ{eŮŮí>Ç]Ż˝&ٱĹ÷N;&I˛Ůl:xđ`ł÷“[Żż^Ő11ňôňj¶MZZšV­Z%IŠŠŠ’Ůl–Ĺb‘Ĺb‘——×ië@ ŃDtt´fÍšĄ 6¨ 'K›×ĽŁé×ŢÄß°.r8yżěúJ’ŃjŇ=ѦM›”””¤ňňreffžv»‹Ş’“Á­¸X•%%ö ˘Âi[ł»»ĽÝÝť^ 4ä_Y©âSzN45-$¤]çlÍÉ9í{ćŠ Ťňó“$eŰlÍŢĎ­Ş’$enÝŞÄ]»śŢóoč=‘S]í3Z 5${ůI“&iţüů§“ ””:tH©űw)_żJÁˇ,zlĺVmY·BŐ•ö´íž{îaU g=|HMMUzzşŇÓÓťľŮ˙ó#ʍ,;[¶˘"•eg«,;[µ =Ě%%ęo0Čŕî®@OOÜÝŕé)››‚N@śë‚ŚF]ćôZYuµĘjjT]W§ÂŞ*UŐŐÉ»¸X?nÚäŘĆÝ`Oh¨L2ůűkőęŐNÇPTT”ă=5¤č±é8Đ IDATŇ@ÂŰŰ[÷Ţ{Ż-Z$IZóď%şö–{%:iÍż—8捸ńĆ™ŔY±nÝ:Ą§§+%%E¶†'4úzŮ2ůxz¶ř^Ľźźâ†8ŕô|<=5<]ŚÚŞ*§§«8=]eŐŐÍŢĎĚĚTff¦¶oßîx-""BŹ=öXŹj«[W0::Z·Ýv›$©şŇ¦5˙^˘’Â|îŞÚĽćGqŃEiĆŚŔYńíÎťÚ»woł0˘żÁ 8]¤i!!§ #Đ}|<=őł%„‡kjp°Fůů)ÂËKćSćÖčWUĄ˘†‰8{ Źî8č”)S$IoĽń†cĺŤkoąG&3Ă Úcóšw”şß>éIll,K|čViiiNó?”ž8ˇâ´4;¦˘cÇäkµŞÂÓSBŚF }vEOŐŘł˘iŻŠ˛ęjVU©°ŞJ޵µÚűÖ[’ě“jú„†* &FÁ Ă9V®\©¨¨(Ť;ÖeSxt×§L™˘üü|ÇĘŤĂ7%ÚFDDDčŢ{ďĄ(şÜž={”””¤¤¤$Ůl6-úÍo”—šŞ˘cÇ«Z4şŔbˇ`˝PK!…$ÇpŹĚ]»än0Ȧ-_~éxěر7nśâââÜmççŃťŤOHHP~~ľ¶oßî%¦Í™Ďś-°•[őőƵNaÄC=Ä$–şL^^ž6oެÄÄÄfĂ/>~ăŤNŻŢ«¶ŞJ©©2»»«Ľ¶V’´wď^íÝ»W’} I“&uËś†ÝݸĆáMC &ştf+·:M`I +%''kýúőN+b4ŠđňR¤—aDd4ę'‘‘*¨¬Ôá˛2eTT8‰íŰ·kűöí ŇěŮłS4tW4náÂ…˛X,Ú°aŞ+mz÷_/č˛Ůs5|Ü…}ţÂçfečăw—©¬¸P’}Î{ď˝—0@—X±b…>űě3§×ú ěăŁHłYĆS&?Dßd4*ČhÔ’ *+•RZŞŚňrU×׫  Ŕ±„kWńpUĂd±XôĆoH’>_żJůŮYşřŞźôŮ‹}8yż¶¬[ˇęJ{W©‹.ş ,t©±úL’gż~Š4›5ÚßźŐ0Ц ŁQ“ŚFUÖÖ*ŁĽ\‡ËĘT˝c‡’ËĘ3uŞĽÎř3<\Ů )S¦Čb±hÉ’%˛ŮlÚżs«2ŹęsóJś:_„$ÝxăŤ,í  Ë䦤čÇŤUYR˘©ÁÁ 1™č 3ş»kŻŻ†řúJuuĘŢż_Ůű÷+ćâ‹1a‚<˝Ľ:}l7W7&>>^úÓź+IŽy%öîř˘O\ĚŚ#µú_/8“ɤG}”0@—¨(*RŇ›oęű˙üDZZF”·7aşÔ±ŻľŇŽ_TnJŠ$ű„©ĺq6NÜŰŰ[?ü°Ö®]ëW"qăZI9 isćË/đÜ[RĆVnŐ®­µçVÇkĚ +äĺĺ)88Xą))JY·®ËÇú-©­Ş˛_č?;vt¸çżÇŮ<ů„„ĹĹĹiŐŞUĘĚĚTVÚa˝őâź4zÂT]0ő ™ĚçĆúŢ_hç—ź:ćŠ0™Lš3g˝"ś±´´4=˙üóŁ‘§,ĺ ¸ÂG»ě#VŻ^-Ií~Öő8Ű'ŻÇ{ĚŃ[B’öďÜŞä};5öÂK4ú‚‹{m0‘Ľ÷íÝńĄc9OI3fŚ,X@Ż]bٲe˛ŮlÚ›’"KHśÁ~ 3f„†ęËÜ\UWkőęŐŠŚŚT|||›űyô”$$$hňäÉZąrĄöíۧęJ›v}ů©öîř˛×É{żŃÎ/?u,ĺ)Iš7ožĆŹĎÝ  K¬]»V™™™’¤ó #pVřxzę’ţýőQV–Şëëµ|ůr=óĚ3mîçŃ“¬űî»OÉÉÉZ·nť:äL ŠĄac.Pä ˇ=î”ć+e˙.%ďÝŮ,3gަL™Â] ôp•Ş  ôB^ĺĺň¨«Łúśm۶I’ú Š÷óŁ 8k|<=u^` v¨  @{öěió yŹžŘřřxĹÇÇ7 &R÷ďRęţ]ňńÔ‰S5(nÔYťÓVnŐŃß)yďNeĄvzŹ čůĘ˝ĽT "?ŮFôjž••yŕ;  OIKKSAAýŠ0=Ŕ__í.,Tu}˝RRRzg Ѩ1ČËËÓşuë”””$›Í¦˛âB%n\«ÄŤkĺă¨AqŁ3DáŃ»}XGnV†ŽüN‡“÷;Í ŃhĚ1š>}z»ĆËp˝77ĺ…†*ߤjŁ‘‚çţ>č‹ĘËËötsŁ č<=•[UĄôôô6·őč ÖÂ… eµZ•””¤¤¤$íŰ·O’TV\¨ý;·:–Ó Sph„‚„+84BŁIýĂ";ü™%…ů*-.TiqňNW^vfł^Ť"""§3f(88;čˇrú÷׉đ0Őy8˙Óg U`ÔpI’_č@ ô"y‡“”űc… ‡(Ż­m÷¶˝©aŢŢŢš2eЦL™Ňb8!I9Y*ČÉRę~ç}=Ť&‡†·ůĄĹ…Ns@śNDD„&Ož¬qăĆB=ýE//ĄĹD; Ëđ ‰Q˙Řńň0H&ß@ŠôR%ŮG)€>«ésHŽÍĆ„–8ëĘŞ«e=W‰¦š†’}üTjjŞRRR”žž®ÂBçPˇşŇvÚm1™LŠŚŚÔ°a稨(–íz‰ü  eFE:zEĚţŠwąB‡žGq@Ż–——çřóa«Uců’g×î&ĎásćĚis{ŹsĄáŃŃŃŠŽŽÖŚ3Ż%''K’RSSU^^Ţ®1,ÁÁÁ˛X,Ž˙>˝W~PŇ tü1ć2EŹźFaŔ9ç’©ßL]íÇŇReTTH’Ć…h׼ŠçrA Ŕ“@ßÓ4Śp÷4jČ”źĘ3‚€sŇ‘/ľÔ€k®VUI Ĺ€Ë%kwQ‘$ÉT^®‘ýű·k?¦bpÎ)ň÷w #F\µ0śÓŞË­úţ˝÷e`ůO¸Pem­ľĚÉq #bSReňôl×ţέ Ą Ś‘t2Śđ±„SpΫ*)QҲĺň`rK¸Č—99ŽaŤa„G]]»÷'pNI‹‰qL`3ájÂĐçě{ëm•dťP­Á 5Úßđí5Đ•Ü M “$ćĺw8ŚÎń9$ô-Eţţ˛úůJ˛O`ÉJ ŻĘضMYGŹČ®ýĹĹ:\V¦Á>>Šóő•ŃÝťáŚtxó;¦áŚUUť:€sFfT¤$űPŤ°áQЧy—–Ęł˛RŐF٬µµÚ_\¬ä’Ezyiźź‚ŚFŠ„VUÖÖ*ÇfSTĂĘ“MF§†ůAAÚť•ĄęŻżÖ”)SZ=>€sB~PŞţŁ9îryšĚôi~eVŤ<đťň‚”"›Ů¬ęúz)/בňrxxh°ŹŹůřĐkN—•)ŁĽ\ňě×OaĘŢľĂ)8íďĺ– ĄźČRib"€ľˇ8 @’d0ű+|Äd ÎyÁÁÁš5k–v˝úŞŚUŐ§ÝÎRP KAJ|ĽUh Va°E’TTSŁÝEEäăC1ű¸ĘÚZeVT(ŁĽ\Ů6›ŞëëďU××kŰĆM˛tůçHčý˙€ * ´Ńńô ÁÁÁJHHPć“Oµk{ż2«üʬ•Ąâ€X‚d¨¬RIU•"Î;OőµµŞ./§°}HşŐŞýEE*Ş©iöž[M­‚ňó”_ sĂJ]Ť@@ŻWÖ$ŐŠNAZa¬ŞRHNŽBrrTăć¦Üş:ĺî? IňʉQčŃňéß_Uĺĺz˙đayöë§P//…Ť 1™Ţq±yz:…n5µň/*’Q‘Š‹»ýó $ôzeľö•5Ü=Ť LAÚű@xĘ2ŤĹÇŽ9ć ¨4d=J’TTZŞ”ŇRI’·»» …Ť 4ęĺE!{‚ĘJUW«°ŞJĽ˝üü¤úz•df*㛝*łŮd2X>ĄĄÝÚâ´÷— @oWiđ”$™P €.â^SŁĐăÇUęăŁr??ÇëÖÚZY+*”Ńđđŕé©kÂĂ)ŘŮř=¸¶VEUU*¬Ş’µ¶V…UUĘ©¬tÚĆ×ËKY'”óÝwŞ*)qzĎ()ţ‡äłvţz˝Ć˙@š|)@W=,ÖŐ),ë„Â~.ńń–Ő×W^fU˝+śůÔÖŞ¦şZć  ąą{ČVTčtśo dps“gż~ 4äíá!OO |†öęŔ)CKŽ»[599=óă28WĽ(@7iśłQŤ››ĘÍöá©;w9˙^ćç§‘#ĺáęq*owwy{xČŰÝ]>žžĐ7—Ënčib­­•µa>‡l›M’t^` c¸…›Á Ů[őŞWmeĄĽóňśŽăVS+SąUćŠ y•WČ«˘ÂĺC0:Š@Đń‡Éş:§€˘©Ş’elۦ77™†ĹÉf67ŰĆZ[+km­$ÉÓÍMçGG7RĐh_aˇr*+h0Čŕććx=¤áa˝Q€ÁpV&Ýl:ŃTksklĚĘRî)Ű·¤07OîîĘض­ůg °‡ĺÍćé÷Ť ÷INNÖ /Ľ ťž§¤ś68«śuuŽ9 * UŢ m=Ľ†´«ŞęÓ»Y t~x¸&N™Ňć¶€-2#łĹ×Ë˝ĽTă~r‡ąĽ˘Ĺá ’˝‡†_d¤Óö-)>@źŐW˙ ¬swçâzŚŚČťHMŐá+4ţüV·%Î6ooý8,ŽBčłř7€łŻÜËKĺÖ2ů¤§·ą-C6€^Îl6S€‚)Đ˙ŢĎš5KˇÇŹËXUMAĐëĐCčĺ,X ôv¤Źç˛^x€|đÁ>Ýţřřxn Ź VBB‚2ź|Šb W"z9ooo~ Čču˛\Ž@¸ŔĄŠň˛µpĘÝ5męjkĎé¶–ćiá”úőśŃgĺó sOhᔺ˙ęáÜx Ça hÇô§«^Ńß~Ąü*/-–·_€b†ŤŐ´ëhÜ”+šíSVR¨Ô¤í:ď’«)ŕ)ަě“$EĹŽ›»{—·'ÔüÔsČ<ś,IŠ:꬜ϱ†Z6†Ŕ9---Íţďmt4ĹzzH@+2~üAĽíríܲVs<¨˙·úýăŁdÝůŘ‹:‘vHűÝ­Ú¸ęU§}ęjkőě}?UĘžm°Ĺ‡ä˝’¤a]×k 'ÔĽĄs~ţĹZúő =´řťłrNŤáĎŔx ç®´´4-Z´H‹-ŇĘ•+eµZűLŰ­V«RRRTęăŁ7í@ ç”·ţú¨ĘŠ ô‹ßż ó¦Î”Éě-/oŤśx©î|üEöÓńcťöŮňţ2eNÖ áă(`kÉĂĆvŮ1{BÍ{âu?Úţ ŚËŤŕśU^^~ňßâ-[ôČ#Ź(11±O´===]‹/ÖŹĂâTnöâf@Ź™‘©™Cb5oŢĽ6·eČ´âÇ»%IGś×ě˝ŘQhń{??±đ KÝďřůĺ˙˝W«^zJ‹?ŘŁĽ¬tmř÷?t`Çg*Ě=!Ł—YCF^ 9 ĐĐŃűÔ××ëž+†ČËŰWϮئ·˙öG}űůYDę—mŇľm›µqŐ«:üĂUUÚŁ‹Żž§+ćÝ)O§óŰ·m“Ö,]¬ôßËËÇW]qťnĽ÷úÓÝstä‡$=»r›B#Ů™++ч˙ţ?íú|˝ňł3e4™5tĚÍľí y^›uęČyK±×((4B˙|âíßľEuµµš<óÍ»˙qŚ'ˇúrÝŰúęĂĘ:vHU¶ ‡EiĘŐsuőÍ÷©_ż~§­ůóďďî˛:?šŞµKë‡Ý_«˘¬T–‘şřšyšyÓ=r÷đhőş?xíXĺeëĎ«v($"F’Ú}/HŇ˝W•úőÓ3oĄ7ź˙˝ľßőĄ &/MżţšóóÚĽ.ÇZÚÓöŚĂÉúă­—)4j°ž]áüK˝µ¤Hż˝ţőssÓ_ŢÝ)łŻ»ďź¶îoč 6›MË—/Wbb˘,X ŕŕ`Џą˘B~~íBE h…—ŹŻ¤“ß4·ć‰Ąµř$I’Ź–~}B‹?ŘŁĂßďŃc?ź¦|«»źř§^ü$E˙ýŹ˙¨ 'Sľ˙:Ü÷ŤăůŮެ(WHÄ ˝úÔŻ4jâĄzaM’~˙Ňműä?úëĂ·¨ŇV®Ç^űX_˙ť†ŽąP«^zJď˝üŚÓąěülťţú𭲄FčOďlŐ“Ë·¨(ď„ŢüË˙čhň^ůXaDNć1=ľ`ş¶®[sď{L_@-~[YÇé™{ŻUňžÖżeęČyäŞ07KŁv}ľ^×Ü|żž{w§&NżV[Ţ[¦uËţęŘöÝţIKźýŤ˘‡ŽÔ3ď|ĄçţłSˇQµzÉÓúŕµçZ­yWŐńĐ]úßŰŻRMuµ~˙âZĽfŹŹŻw˙ąHďżöçVϡ(/[EyŮňö t„»2Ua-U`˙úŕőżčg<Ą'–n’››»Ţ{ĺYĄîÝŃęu)Ě=ˇ˘Ľlůř)8,ŞC×*4rÜÜÝ•wěbĚ’¤jĺĘ•čÁ$ WÜx‡$éo˙ýs˝÷ĘłĘh¤đtŽ$ŰL'¬Ş¬ĐKřĄęëëő›çßÖQçËh2+zč(Ýô_O޶¦ZĽţÇţ‡~$Ą˙ř˝¦\=W¦%ČÓh’Éě­Ý_~$łŹżn}čY…F’—·Ź®]ř$)ń“wÇ¨Ş¬Đż?"ßŔ`Ýţčß< JţAýőóß=§›ŢW}}˝bâěó7ÔÖÔčĄ?ܡĽ¬tÝ·č5ť7u¦ĽĽ}5hřxÍż˙ űů5<üźN{ĎK:9„ÁhŇőwý^ŃCGĘěă§Ů·ýZ’ôÍ擦[Ţ[*IşîÎßËÇ?H~Áš˙«'döőw N­yWŐ±¦¦ZŻ>yż|üt×ă/j@ôůřꆻ•ÉěŁCľmőŽž2ˇdGď…ôCß;‚…îzDA!á ‰Ń¸)W6„»[˝.-MhŮ޶{ŚęŁÚÚĺťHwş·6­~M^>~şrî/;|˙´v] ł‚5kÖ,EŹľRń—ęüY)|ŘĹr÷4)öÂyެ®§H@Ĺ hŬ[˙K’´ţŤżiÝňżjÝňż*,f¨.ľfžfÜx»ÓđI:šě~ Ť¬´ÓđşsËZKÝŻřó¦(něENÇ2ę|űĂöI­Ö¨˝çeoź}hä+Ż—Ź_ ăőŔ0Iö^ŤŚ^ŢŞ°–ęÇ»4ú˘i’¤zńă”VkŢUuÜţé{ĘÉ<Ş9?@žFÓÉsí?@K6jűŻŤéÔ˝ĐŢLąz®ĽýNnÜŻź$ÉÍ­őJNNh9¶S×*,&VŮ釕ť~ŘŃ›ćËu勤(_ ’Ů×_Ű?}ŻC÷Ok×Î$HHHPéÖ““YĆN¸N1c®R^…—>řĆŞ11 Ź4P, ‡ˇ‡´˘_ż~š}ŰŻµř$ý⑿j̤éĘÉ8˘ŐKžÖŁ7_˘ÂÜ­>îţňcIŇI3šŰĂ`˙Ũ¦şŞÉ›}˙‹Żi{ şş:Ç·×ýĂcŻďßń™$)nÜEÍ?ÓÓÓé!ą±GBKK—zűÚ‚ëęë:TłÓť—ý!Űţ@:bÂ%NŻ×TŮkŕi0:^›yÓÝ’¤żýî6˝ňäýJIjyŤ–‰3Żăž­ök7tě…mٵsčě˝p¤áńă&9m›—e_Ún@ô6‰˝Ž‡˙Î\«°ˇ’¤ěŚ#’ě˝i>Y±D^>~şjŢťťş:r]ŕLyí˝Żjęúi÷‘jmÜ[ˇ‚˛Z ô ô€v0űřię¬ůš:kľ sOčőgÔźiéłżŃož»ŮiăJ ÇRíŕQ±#›ł1ĚěÖě}řůS›mźôőF}ţÁrKÝŻŇÂ|ŐŐש_Ă·ĺQCF4;FäŕřfÇ(ĘĎqzHn|č]ńŹ'´âO´Řöŕ‘­Ö¦˝çe můÜ sł$IA!áŽ×®š·,"µvébműä]műä]Ĺź7Y÷<ůŠă[ý–jŢUulýš˘‡Ž”ŃdÖ^~Fëßř›˘bO>L6~ăátŚ’Â<dgĘ7Ŕ˘ŕQNç÷ĎM‡eô2w¸9ŻŇ˘|4 Éđ p®ÇńŁ©->d_pŮl]pŮlĄîݡwţţ’w'ęő?= žű÷ikŢUu,noüBZ­AKçĐ8ˇeKµnϽиż_P§˘07K%ą  ËéĎ«±ľYŽ´ÝÄŘ{Häd“$}ôÖ‹Žą#uäţiíş€+ĄŻŃšK•“v@{÷îŐěŮł5mÚ4y{3ź ĐĘ˝ĽtĽ´Tiiim~ Ů€üóń»őë9Ł•{<­Ĺ÷Ť^ö_Z»ÚK-w‘÷ňö“äܿѾDű2‡ç_zŤ¤“ß·ôÍ÷Gď,‘$Ýń‡żkčč Žą!Ř%IŠzň[÷şZ{ywOçĺ+w}ľŢ~ü&ççáŮpţý:W§ŽśWcďIrssţĎOăę#'^ÚâçÄŤ˝P˙őěrIŇw»¶¶Z󮪣ÉŰ·]5hé{LÄtň^p 9e¸ĹŃ䆶 ëxÝHŘ{†äźH×ţí[”ńăşâ†;śćłčČýÓÚuW3ůźě‘·~ýz=ýôÓJNN¦0@ČŚĐG‡Ú·Ę ´ŔĂÓ ’‚\mßř^‹ďďŢú‘$iÔ„“Đé +4í’?â‚‹%©ŮŇ™ůŮ™úfóZE Ž×ySŻvz`Ź6¶ŮççgK’ú‡źL™Óp<ČGîx=$r $)7ó¨ăµJ[ą>~{Ił‡Ü!#Ďo8ÖwÎČŰ6ë÷7MŃÚe/´Z§Žś×±† -%ű* ŤŞ«*őÍć5ňö Đů—Î’$­zé)ý÷Ü‹='$©ľa.˙Ŕţ­ÖĽ«ę9Äţçś&u<¸ď=0FËźű]»ÎaP“@ 3÷ÂŔSÚŕh[Ü6B’ć+lt¤í’}R@p¨ słôń;Käĺí«+ćŽčĚýÓÚuW8v¦Ć_ýůő,I*((Đ /Ľ —^zIV«µ×´Ăl6+66Vć’yÔÖqaŃëH@ ţFľ­]ö‚>~g‰ňOd¨şŞRůŮ™Úôîëú÷óż—%4Rsď{¬Ůľ%ą˛•Ű™™{ďc2™˝µâOčhň^UUVčČIúŰďn“Éě­{źzEnîî lŤßŞ7˙yXĂÄ†ź˝ż\U•:đÍç:üýą»7ywůO~.IúĎËĎŞ´(_ÇŹ¦ęíţ ߀ ű/aM’zÇďäáiĐ[/<ŞôCß«˛˘\»·~¬WžĽ_ů'2ÝÂ|ť=ŻĆoČ'NKĐŠż?®ÜăÄ'n IDATi*.ČŐ›Ď˙Ź sł4ďţÇO.˙X_ŻśĚŁzç8?GeĹZ˝äiIjöP|jÍ»ŞŽłną_’´nŮ ˛–éXę~˝ţ̲–iüĹW¶óNťşâÇśRĂÓ·Í9h>ÉfGÚŢ(,f¨*¬Ąú~×V͸ѹwDGíó3ŽěŮĆ˝Oü–;.Žéh”‘¶HHTË>›LŇŽqžšńQaŢť"JHHGŇ’„„Ůą†Ľ?ëW̸;Ö±Tcµ­źV­J1ü¶{$i–Ă»˙Í‘=Ű ďĎ­“§) "Ň%řE0lÂ+Äüř)vź6đĺţB Š47Č•˘e?ED®!ŢţÝÉÉ8Ç˙Íy‘G^ś‹›»™Ý[Ö˛aů"ÂŢČčźNU¤QeeĄX“ńţ¬_ańôá©Yďc0¸^×fłŮHMMm˛ś»»{ŁË”%%%5«˝¨¨¨÷edd™™Ůdţţţ\ŃëIIIˇ°Ďž‡„„4¸,b[\O{Ƕ±ëiďŘ^é{Ąą±mŹ{Ąą±mÉő4§ľ–r÷ô =Ż‚u{ěô 1Ó˨_"JHHC&<ţ"Ţ~$¬_ÉoIyY9݂ø󡧹ăţ§p5ş)HҨ—ď˙!YÎ;śź‹îÁ×]×µwď^.\ج˛M 1ť;wnłęiě’„„Ö­[×dwŢy'ăÇŹŻwźŐju SoÍő|ôŃG;v¬ÉzfĚAttô»žöŽmc×Óޱ˝Ň÷JscŰ÷JscŰV÷J[(«pâű”R¬e ék¤$ď GŹ%>>^ż4D”‘jÎÎÎÜ~ßÜ~ß †\–?¬ŘyM^—Ýn'°G0çĎĄ5Y6=·‚e˙ný˛ŤŐq*Ą´Yu|źRJ~ődź-jVM]Oznó†Ło űÄ ĘŠŠˇ˛’R»ťó˛íťwřú­?qŰmuęýę«x…„}âË'M&áOâěľ}çćRQVNqn.g÷îeűÜwůhňdrNźĆ§W/~ôĘ+zÚFH\¦€°ĽűrúŔÎ$mĂËż'Aˇ ĚdňńŕBbbŁĺ˙ë_śÜ˛…ÂěěZŰ  ×Í7Sjł±î×3Č?s¦ŃzňRÓřě×3řáôu Ýú÷çÂáĂz!Ú€FH´‚«›…đ'0pĚÇü„Oľµqü\©s…䥦ĐďîńŤ¬¬¬“ŚĽc,‡ţőŻ&“Őr­VŽ|ň QŹ—ÖSBBDDDDD¤ řEŕéBY…;“Křr!E L;°|97Ϝɭo˝EXÜ\ŚĆftqňŃ“[ľjQ»Ç7m®uĽ´žŮ‘kŢöíŰŮúďrl• şő)DDÚEz^źî*d`+Q=]9ÖŠŮlÖ¤—­tä“O0űű1ôż oüúĆŹˇ˘¬Ś ‰‰śŰ·ź´Ý»Iݵ‹ňâúWVň  ëřńµ[]ŢłG˝mD ąćeffrúä1BD®ŠďSJ9r*‡=ź- 7'‹»îş‹qăĆ)0­°çýż“´î3®źđS®»ĺüúô!pŔ `ĐS(+,äȧ«ŮµhĹůůµŽu5[(-,lQ›ĄöŞeU]-z‘ĚąŁG9±|9“'On´¬""""""WŘąÔăäćd°víZxřá‡‰ŽŽVp.SÁąs|ó×|ó׸y{4h= "ä¦éÖŻ'O˘×Í7óĎGĄ03łVbÁÍË 7OOŠrršÝžŃŁ*Qj+PđawwÇn+ŔĂjm˛¬ćąÂÂb<öY,>Adeeńî»ď˛zőęË®344”3fĐ') ł˝°KÇ·87—S_ÍÎyóřřÁ‡Xqßýd&'ăĚđ§Y«lŢʼn,»µ0äYu|Zšnč6˘„„H;đôˇ˙-?ÇÍâëضnݺˮĎb±ŤW C…&Ϭ)39™/_ţ-׍Ykßą}űżőÇ-ŞłĎŹFpć»˝ pQBBDDDDD¤d¤ŕ»ĎćPl«ZŠŇd21}út¦ÂâFpÓOŕîďßdŮę‘ 7·ZŰ“.&"︀¨¨fµëJôřŞeF×¬Ń ŃF”ifO_ĘK‹6lłgĎf°–l‘!?˙9Cý9Ă~ŮôŠI·Ţ Ô]Mă‘#śÚú5ÎcçÎÁ?<ĽŃz<{öäŽwßĹ`2qôłĎZĽ:‡4L“ZŠ\a˝»»0dXýýÂßß_“Y^¦o,dÜ_ç=nŢ!!\±’sßŔž™••||đ Ąo|<×ßsű?ř N=_ÍšĹOűôĆ;4”źýß˙räÓŐ߸‘ĚcÉç`ôđŔ·÷uô5Š?ű“‰Ś¤$ţýÇ·ő"´!%$DDDDDD®ł†GąŃçŞë§ ´BÚîÝlţŻrËË/4x0AŤŚ0©(+cÇ{&yý†:ű ł˛řףŹ1ćµW >śëv×˙ěžë:öĺ—|=ű÷”ÔżÂĆ»w5zŢ †Ţ¨ŻJH´‘ÂüL F®n„tť›‚ŇĆŽ~ţ9ÖożĄ˙Oî&ä?Ŕ·W/ÜĽĽŔɉ’‚rNźćĚž=Y˝šĽÔ†WÄ(ĚĘbíÓĎrÓMDÜ~=bbđ Âĺâś™ÉÉśůn/IkÖp!1Qż”‘kŢ#čŮ—ďN”*"rĹś>°Ô#[ą.úFž|ě~Ţ NÓ#""""""-tîř·ßý‰cO“Éĸq㏏Wp:9ëÎť\ą’÷ŢKȰpď°8ö鍶Ą„„H3ćgrtÇrrÓŹ;¶ĹÄÄ0iŇ$ kÄż˙đG˛Žăú{ďĹ',Śň’RrSN+0mL éR‰ô“»›,g4űţ÷ź=ö %öś&ëés[ű˛Ď&“wáD“uxuëÓčܧlh·ëéŢ{(îžţ­Šm[\O[Ŷ-®§=bŰž÷JS±mĎ{Ą©Ř^z=.®&BúÝrĺ;PF¶śŞ•|}}™4iYzR:ŻC«ţɡU˙T ”iŁÁ SeVł:eţA}ąaXäžŢEć٦;Coľ˝Á}é‰Ç9}ŕ‹&ëz+ݢ"Ü˙u;^OŘu}čćU˙7ŔéyÍ‹m[\O[Ŷ-®§]bŰŽ÷JS±mĎ{Ą©ŘÖw=ŢÝűâérĺ:OΕü0ÖŹĆ‘‘‘Á¸qă4W„""""Ťóópah_˙nFY_‹3·27¸˙Îd6ŁžĆę(:íJsVµďčÚh=«Úńz†ö5]=‰nÍ‹m[\O[Ŷ-®§=bŰž÷JS±mĎ{Ą©ŘÖw=e%Wne‹Č 1˝Ś¸ą:¦y"DÚ„SeeeĄÂ "ťŮ´iÓŽEŘŕŃ H’˛w3iľ`ѢE H”Č»ďľ ŔŔ1O´ŮňľĄĹ6\Ý,x›ťÚ×HC‡ľö>IIxŘtCČU—éçGŔđá ĽőÇÄĹĹ5ZV#$DDDDDD.ĘĎLĺÄžOqu3ńŔÔéô 1*("-éď‡őÜYň”iJi±Ť3IŰjÍoQtá„tÜ +¸óÎ;Ůýß˙Ť[IéU?ź'v荒­˛Ľśâü|ňĎžĺÜţ$oŘŔůďżď°1­ľ-ďŮ>śéʲĎ&óÝgsk%#îĽóN"##;ôy0~üx‚Ξí¤¤CžŁ“‹ &şőëÇŔÉ“°řďŚ_¸ŹmÖĆŔIëM†Hǧ"""Ňi•Řr‘.Îl6NznŁ{‹Ž--¶qtÇGd¦tl gҤI„……)¸—©ćč'Ü<=ń§÷¨[7ŽŕˇC™°x1˙śúçĎ·ş˝nýú)čť”"""Ňiĺg¬`tQaaaĚś9“e˙nŮ„Ž©G¶rúŔĘK‹0™LŚ7Žx­ Ń¦*ËË)ĘÉ!m÷nŇvďfß?ţÁŘ9sŠâÇłgńŻGkuݢ•謔‘N«¬¤Ş#áďďŻ`Hł•ŰH;˛Ő‘Ś6l'NÄb±(8WXÁąó¬}ćWÜżęcz Dؤ$$Ô*cňńađÔ‡é‡gPÎöŚLÎ|·‡ď/!űäIO}ażüĄă¸ęÇ6ľ|éeŽ}ńEłëiHČM7qĂ#S ŠÂŕćF®5•¤Ď>ăŔ˨(+ońůÖ8p 1÷ßGŹ1ýý(+.&˙Ü9NnŮÂ÷­ (§îŔŔôŔĹĆbňń¦8/Źóßd˙pfĎ%$DDDDÚKQ~6…9UC}Ł˘˘i¶ë‚<ňčT–ýc)S§N%::ZAiG…™™|żü#†<ö(}FŹ®•đ dÂâżcéŢ˝Ö1=‰ĽăúŚĂ'Ź?΅ÇmŁ5őT”•sÝČ‘Üţöqrqql÷ ďËđgž&pŔőlxáż.»ť>ŁGsëďgתŰčęŠx8ţááôűÉOXőđTlééŽýQwÝĹŹ~÷ŰZǸűůqÝ-#ąnäů÷ŰďppĹ %$DDDDÚC–őăçŽ>ńśt f# ŹrهŹčÇ[o˝Ą \%'·neČcŹűĐ_<†Ą{wŇbŰś9d;@@d$7?˙ÝúőcŘSO±ć©§Ř»d){—,­weŚQż}ąŮőÔQYÁ_É‘O?e˙—šŠ›§cÇ2âWĎĐgôhzÝÇémŰ[tľŐ~đä8ą¸°wÉR­Z…í &7‚ßŔ_‰gP?xň 6żú^!ÁÜňŇ‹|·x1G>]Ť-=s@·ÝĆŤÓ'îŮgIýćrNźîT÷VŮ‘N)ďlŐX___M>'"MjŕŽĚ“rŐ?ĂSSp÷÷«µÝ+$„R›ŤMŻţ?Î˙=e……”rn˙~¶Ľţ4YkęqvuĺÜlťý{rNť¦˘¬śÂěl|đűţďDÜ>ö˛Űńş8ďўŋÉ?{–О2J lśţ÷żůâĹ)ÎËĂđź{÷މ¸Ť|»pßĚ˙+y©©”—”ć ß-^Ěî˙ţoś .ô˙éO;Ý} wŁt:EůŮd§&{É·k""P5ie¦ő ń?ű%CűşáçárÍ]cbb"ďľű. ą>IIxŘ:Íą—ÚpuŻ˝2ĘęéO4xLÖ‰UÇÍMÖßÚz®ü¸ŢíÇ6~Éŕ©Ó˝żËn'űÔ)ü#"őňKlźű.öŚ Çľô‡řűč1µĘ‡Ü8€¤uëęmçčçëąé‰'č9ä†ńÚúgfIĚJHȵçÔ®Ď?kF|©)?3•¤bĎ9 €ó…üés˘Łqóô 87·Î>“Ź7îťHČM7⻯/ÎC­ůšŁ5őd&'×»=7Ĺ PkCKŰŮüÚkŚ˙ë_ żőVúĆÇ“~či»v‘úí.ÎěÝKeyy­ňž={đĐgë=gŻŽ‘ČĘbHϞܧ„„\[rÎś ŰšTÍŚPăŹBézRRRX±b鹌îd¦těóőő%¤tҤ¶€‹“çź;W§óýÓ÷˙K·n­Şżµő”Úíőn/+ŞZ™ĹŕćvŮíd$&ńá˝t˙ýôŤCŕŔČ ?˙9öŚLľ]¸€#ź|ę(é(’†›1r¤ŁQBBDDD:ŤŇ"»ct„Édbܸq ŠHg·ŰIľäŰl“ÉÄ1c?~ĽÔAő3 Îr•ĂźyK·näź=Ë7óçsvß~Šrs©(-Ą˘Ľś'v}۬ú[[Ź‹›e……u;Đ&SŐďŁÂ˘VµS™ÉÎyóŘ9oޡˇ„Nßř1ôĽáFýö·¸¸9¸rĺŶ 1zxđţ¨QRPpMÝJHH§qj×zÇRźcĆŚŃč©#<<śGyDź_ßľDßuP5˙AMŐó%¬}úirNŐ^1ÂŁGŹf·ŃÚzüúô!ýС:Ű}ző ŕüą6;ß\«•\«•+VĐ˙§?ĺ–—_bĐ”ű ‰\«•nýúás]/Ňş¦î­˛!"""ťBĘŢÍdśŘçčpč›OŔd2a2™>}:3gÎT2˘óéŐ‹±sćŕěęĘńŤëĚŐŕb4`KżPçŘ˙TVVud uż[Ż9gCkę¸~„z·‡ßv+€#Yq9íÄżůŻ˙śî®ŻsLň_Ôz$ő›ŞQ±ů¤‚""Ž„Ä{ď˝Ç{ď˝ÇŕÁČŐb!pŔFüú×ÜűŹ˙Ă+$\«•­łf×)›}şj”Á°_>…ÉÇŁ‘î×_Ďío˙ŁĹĽ´4 ę‘gWWŕ?ó:ôŤłÁ€»źßeŐS­˘¬śŕ‡2â׿Ʒwo &7<‚4ĺ~ÝwIë>»ěó'ĚüxÖ,z݇ŃĂ'<‚ţôÓd;ć8źC«VQVTDßř1Äżůޡˇ8 üąţg÷pŰ[żÇ;, هG§»7ś*+/¦lDD:©iÓ¦3аÁŁ‘kHŐśë##L&Ď=÷aaa ŽtyŽe?ˇC,űůÄî]Í*—¶k_ĽřE99uöEÜ~;ńoľQg{Áąóüóç?gŘÓż$rěXÇöCoäîE‹ę,yąń·żkq=ΦíÜIIA_ľô2cçÎĹŮPwĄŚÄ5kŘňÚë—}ľžAAÜłt î~~őƧĽ¸µO?Ă™ďľslëOü›o48˘ăBb"«§O§¤,ý:ä±Ç¸iú´&Ëi érÎśŕÔ®ĎsF(!"Ň9•cĎĚŕÜţýý|=Ö;,›Ľ~=&o/Nš„gĎžŘ32Iýöv-ú¶ôtv-úľ˝{ăß·Żc…Ž­żźÍ¨—_¦[ż~T”•“sęÔeŐăb¬Z9Ł´°”„Ö<ů$C}”ný˘1¸ą‘“’‘O?ĺŕŠ•­:ßüłgYq˙b&Oćş[FâŃ˝;.F#¶Ś ÎěŮĂŢ˙ý?˛Oś¨—ă7’}ň$±>@đСýý©(+#űÔ)Žmř‚}DEii‡x˝ŹF„łoďwl}űmfÎśŮhYŤ‘NO#$D®-EůŮX÷mqŚŠ€Ş9#ž|ňI,‹$"rQG!!U »—áááM&$4BBDDD®ş˘ülňÎź"+ĺŮÖDÇv-Ý'"Ұččh-ZÄ‚ˇ7*Ň))!!"׌Ľó§°îۢ@tEůŮŰr(+.t<–QÓ°aĂ7nśfËąF)!!"׌üó§Č?JéÄ|}}‰ŤŤ%>>^‰‘kś"Ňé“vqI%é|ď_BCC‰ŤŤŐ„•"""]"Ňé˝ňĘ+ ‚H'ă¬H{Ó ‘HLLdÁ‚Ő»˙Î;ďltEęejÓśe˛DDDD:;%$DD®q7ndßľ}WĄmłŮĚĂ?ŚĹbąfâyôčŃ“ÇŇ ůüű¬Vµ‘e+sÔ‘“žĆ÷ŰÖág10qâDͱ "")))¬X±‚3§¦b.,TPäŞ3âDhhh“e•ąĆ­\ąň޶oµZ‰ŽŽľ&c3ŞÎ6gź0Îç—¶čKąyř8ęČÉ. óĚI2»Ý®ZDDěv;ÉÉÉŕĺI™‹žĆ—Ž!$5Ť!·Źĺ¦É“›,«„„Ha4{ăćáÓníĺ§źľćc6xt»#"""r-RBBD¤‹č>¸];Ă;–^›«źDFFRP\αô"ÝT""""­ „„H DGGăÔ›’C9 †H+čA#iwJHH»SBBDDDDDDDÚť"""""""Ň&RC‚Ywô(Ë—/o˛¬&µ‘6awwÇn+ŔĂjm˛¬"""ĹŻý~<•ŰřŞ€Chh(3fĚŕÓéÓ1Ű ét”iŐ«WłnÝ:†?üúoĎŐdƧg˘Ł|©Ĺb±Í–›‚!ť’ć‘v§„„´;%$DDDDDDD¤Ý)!!"""""""íN iwJHt`EůŮX÷maÍš5ddd( ""â`łŮHJJ"ßĂ2guí¤óŃ]+""Ňĺg“ş k×®UBBDDj±Z­Ěť;—ăQ‘ŘÍî t!©iÜŢ7śI“&5YV ićÂB‚˝Ľ k˛¬Aái>Âz÷%ŰV¦`´‚"""-GźCŮp(GÁi=˛!"""""""íN iwJHH»SBBDDDDDDDÚ„ÝÝť3ůů¤¤¤4YV i©!Á|~,™Ź>ú¨É˛ZeCDD¤3yú3ŠŘP  8ÍfÂĂĂ9óÝwĘ+ét”iíŰ·łu[Y¶2Üţó+ŢžÉÓ—°ÁŁ?"P … IDATÁ‘ZÂÂÂ9s& †Ţ¨`H§¤„„H dffrúÄ1BDDD¤•4‡„´;%$DDDDDDD¤Ý)!!"""""""íN iwJHH»SBBDD¤Ë9s‚K_aÚ´i$&&* ""â’’Â;ďĽCrDvwwD:˙Ě,b{0bÄ&ËjŮO‘NČn·“śś ^ž”ąč»féüł˛Ňł'7ĹĹ5YVw­´;Ťi#FŕÜ›]'  ‘VPBBDD¤(uő$ą8GÁi=˛!"""""""íN iwJHH›(sn~šA ‘NČl6;~.5ş) rŐ•9;spp,ďďýŽíŰ·7Y^ ‘N(,, ___\ťŐ­“Ž!+ Ŕńshhh“ĺµĘ†H dddp2ĺąg đę}ĹŰóéهáżÎÔ ľÔ1uęTşy{łî±_`S8ä*˛»»s.¨ÁÁÁ„……5yŚRi"""-ŔŇEó8üĹbCDD®şččhü‚¸ůůç ąjěî Â`Ŕd21uęÔf§„„HçääTç?Á@·nÝ:t(Ď<ó ;vě¸"mőŐWôďߣŃH÷îÝŻÚµ‹tv}~ô#~öŹ˙ĂŐbV0¤]ĺyXÉ€‰'6kt(!!"ő(//'##={ö0oŢ«ŐÚ&íĄĄĄ‘žžŔÖ­[),,ěqŘłgŹné´şEG3ńĂ0iÉśîÖ˘eEš+×Ç‡Ś‹É/“ÉÄôéÓ[”Ś%$D¤ŐŹlŚ=š÷Ţ{ŹË™3g˙üó:evîÜÉ˝÷ŢKPPFŁ‘ŔŔ@îľűnľúę«:e‡ Fż~ý˙®o.‡ .đüóĎÓż, FŁ‘°°0|đA>\ďy65'Dcűßzë-śśś8xđ`­˛Ë—/× """ťNhh(111T śëÄÁÁ±śî†ÝÝ]’ËND yě1Xł†›¦Oăąçžă©§žÂb±\V}JHHłôčѧź~€U«VŐÚ·dÉnľůf>ţřcÎť;çxäcőęŐŚ=šżüĺ/-jËjµ2xđ`ć̙Ñ#G°Űí”––bµZůÇ?ţÁСCٵk—^éŠňł±îŰš5kČČČP@DD¤Yxę©§1cáááŽíŮíߏÄč(Ň»uÓ¨ iPŽ·©!ÁXk%"L^^UŰ/3ˇ„„´ŘÝwß Ŕ¶mŰŰŽ?ÎôéÓxńĹINN¦°°'N0kÖ, 3fĚ ))ÉqĚÎť;©¬¬tü»zîŠjŻżţ:iiiÜtÓM$$$źźO~~>۶mcČ!ňŇK/µéµýć7ż©÷śÚę‘Ö$$R÷oaíÚµJHH‹EGG3sćLfĚÁ°aĂţóűĹbáLX(%nn ’ÔJBśîĆA1ś '#0ŢĎ<ÍCëÖÖJD´B."ÍŐ·o_Îź?ďŘ6ţ|Š‹‹™5kV­$AďŢ˝y饗¨¨¨ŕwżűűŰß3gNłÚ9~ü8žžž,]ş”ččhÇö¸¸8Ţ˙}bccŮąs§^‘$&˘ŁŁ7nűöícăĆŤ<ôß#qÍZN~ő¶ăI×Pl4RŕáA®ŹžŽŐ2Ş™L& ={^±ö•‘fóđđŔfł9¶mŢĽąę—ŮCŐ{Ě”)SřÝď~ÇÖ­[›ÝNuťőąţúë(((Đ "WEdd$ĹĺK/R0DD¤Ó  >>žřřx222 [t4?śů<I\ł–3{öuě©!Á”ÝđČĎÇŁ sYMÚF¦źÖŢ×Ő»/&&†#F\ÖD•JHČ‘ťť €źźźcŰ©S§€Ş‰“süřńµ•‘‘ÁüůóŮ´iV«• .PZZJYY™^ąŞ˘ŁŁńęMɡCDD:µ€€€Z˙îM·‹ŁS‹ňňůâ‹””•‘çë€sYůU‰ K~>îv;†Š ˛ę5ň‡DDFbÝ»¨ Kll,‘‘‘­žB is{/~`………9¶5w¤B~~~łŰ9yň$7ß|3gÎśQĐEDDD®‚rn:”¤¤$Ç—Ryľ>AUY›ŤčÄ$¬Č÷đ Äh¤ĐěNˇ»›—'=­©ôvr" *’ŕ!CŚ"řơŽcě«W[ëďűö¤„„4[őęŁFŤrlóđđ 77—ěěl|||Ú¤ť^x3gÎĐ«W/fĎžM\\ţţţŤF ...-®ł¤¤D/ H3Y,yä jäjRRGŹ­• °tď΀A±d=ĘŮ‹_^Ő”é燱¤—ňr=ňцň<,Ř=˝Ş’nFJś4čÎ;xč©§¬güřńWő:”‘f9xđ K—.Şć…¨Ξ={HJJâ?řA›´U=‡ÄúőëkMj púôéŹsrr˘˛˛’˘˘"L&S­}‰‰‰zEDDD.C@@ÄĹĹUó‰Y­VŽ=ŠŮlć‡ńńޞÉ?{–ڤŁdMât~>Ů5–u-.ĆX\‚{ˇCy&» —ň =ţQCţĹyŰ‹‰»Ż/'şwo°ŽđđpBCC‰ŠŠęĐת„„4)))‰»ďľ›’’î˝÷^ äŘĎž={xçťwXąreťcׯ_Ď3Ď<ĂĉyóÍ7›Ő^qq1ÁÁÁuö˝öÚkŽÄCII FŁŃ±ĎÓÓ“ĽĽ<öîÝËđáĂk÷Ç?ţ±E×\VV†Á ŹH‘KY,ÇŞ—Şž‡˘ĎŹ~Ŕ§Ó¦ŐÚ_ęćF©›6/ĎZŰc÷|×`{y*\\p)ŻęśwöŃŐŁj&ęĺ0¸˘’€d$nžž‚ŃÓnŃŃdddđňË/Śżż?ˇˇˇŽ˙.ťD é|”yy>|•+W˛hŃ"l6ááá,Z´¨VąéÓ§3oŢ<>ţřc¦L™Â«ŻľJŻ^˝ČĚĚä“O>áżţëżČĎĎ'77·ŮmGEEńÝwßńâ‹/ňꫯâááÁxë­·pvv¦Oź>?~śU«V1aÂÜ.~x8íŰ·óěłĎ˛hŃ"˘˘˘¸páż˙ýďٱcľľľµ†ÖÇl6c·ŰYąr%÷ÜsŮŮŮꆹ sçÎĹjµ’™™Iff&V«»ÝNjj*EE˙Y±jʧźć,i{öPśźOćŃŁä§śťlĂą¬ w{!gĎŕU`«·L±ŃH¶ż“çëZ\ŚVVűÓ»w§˘ĆăĂvwwĘ/yś¸ÜĹąÁy5\-fś""9×ČőTë9¨ówygäTYYY©·‰H×ĺääÔ¬rŁGŹfůňĺtëÖ­Îľ+VđŔPZZZď±7Üp›7oĆŰۻ޶/ýZ¶l<đ@ťzBCCIHHŕ7żů Ë–-slŻ>~ůňĺÜwß}uŽ3Ť¬[·ŽGy„ÔÔT***prrŞ·ýQŁFŐY˘´łLN»řÍDpĚ(ÂŹn·vw,}€3fÔű Jgv6· í´ĘFi‘[Ö9nŕKhhh»Íz-""ŇRRR°ŰíŤţ­0ţ|8Đd]·÷ 'ŘËë?I‹ €ŚĘJvą87Y‡oEĂhřďăťTÖzĄ!Ż<ö ÇĎţQ‘jś×ęŐ«Y·n&“‰G‚Á˙bÂ$22Ňń·ďµţ{_#$D¤^îîîôčѸ¸8¦L™Âí·ßŢ`ى'ŇżŢyç¶lŮÂąsçpuu%::šÉ“'óôÓO;F14Ç”)SČĘĘbŢĽyś:uŠ=zĎkŻ˝FHHŻľú*GŽáŕÁµfžś´´´zëtwwgëÖ­Üx㍵¶˙éOâŮgźĄľŹ•!C†°mŰ6L&S­sę”â‰'X¸p!qqqlذ‹ĹÂ’%Kxě±Çę='''ţüç?óË_ţ˛Öö’’ÜÜÜčÝ»7űŰ߸ăŽ;ęMľ¬\ą’źýěg<ňČ#,Y˛¤NWWWVŻ^Íرcë=çŽlÚ´iÇŚ"lđčvkwÇŇW1cŃŃŃ×Ôűęlnq»­˛Qy†Sß~N —+“&M",,Ll"""rMĐ ‘®đFwvćé§źćŃGĺČ‘#”””pţüyćÎť‹‹‹ ˙üç?Y·nťŁü믿NZZ7Ýt äç瓟źĎ¶mŰ2d………ĽôŇKµÚ8tčĎ?˙<ůóç“‘‘ÍfcëÖ­DGGłgĎŢ|óÍ&Ďő•W^aáÂ…ÜxăŤ|öŮgX,Ž?ÎôéÓxńĹINN¦°°'N0kÖ, 3fĚ ))©NŞFJ<ţřăüâż %%…ŇŇRľ˙ţ{nşé&ţň—ż°eË–,Y‚‹‹ ďĽóçĎźÇfł±˙~¦M›ĆôéÓqvÖÇfW—ČW_~Žuß–viĎĂż'Ć>ĘĚ™3•Ś%$D¤s)))ařđá,Z´ččh\]]éŢ˝;3fĚŕů矨őčÂńăÇńôôdéŇĄ ><<<‹‹ăý÷ߪź¨iÁ‚”——ó›ßü†'ź|Ěf3#GŽäĂ?Äl67ůŘĂĽyóxăŤ7‰‰aÆ xyy0ţ|Š‹‹yýő×™={6áááL&z÷îÍK/˝Ä+ŻĽBYYűŰßjŐW=#33“‘#G2ţ|BCC1 0€ůóç°˙~ţ÷˙€gź}–çž{ŽîÝ»c6›‰‰‰aŢĽyŚ9’ŠŠ ÝP]ÜŃŁGůęËĎIÝżEÁQBBDšňä“OÖ»}âĉě޽۱móćÍäĺĺŐ;Ěľz>†‚‚‚ZŰ·nÝ Ŕ˝÷Ţ[çŘŘXl6[s=|řá‡üęWż"::š/żü__ßZçđĐCŐ{ě”)SjťC}žy景%77€oľů€űŢ:ŞGiHë‘®!&&¦Ţíś={¶ÖöŚŚ ćϟϦM›°Z­\¸pŇŇRĘĘĘę­çäÉ“ôíŰ·Ĺç¶aĂ~řa\\\Xż~=Ý»wݵ˙Ô©S„††6ZĎńăÇÜYg›»»;đźů ¬Vk­\jŔ€ş‘DDDDDÚ"]„§§g˝ŰÍf3………µ’ 7ß|3gÎśivýŐÇ»ąąµřÜîąçÇd“ ,ŕ­·ŢŞµ˙ŇŃ ÉĎĎopꇇG“ÇŰíöZ1ą”ĹbŃŤ$""""ŇFôȆHQ3áP_'Ľfgű…^ŕĚ™3ôęŐ‹eË–qęÔ)ňóó)..®w•‹šÇgff¶řÜů裏pssăí·ßfÓ¦Mő&˛łł©¬¬lđż†Fo4Wő ĹŞúŃi=%$DşĂ‡×»˝zeŠšŹCTĎٰ~ýzîż˙~zőę…‡‡FŁŃńXĂĄz÷î T­@ĐR;wîdâĉüţ÷ż§˘˘‚|ŚŚ ÇţđđđZçzĄôěŮ€'NԻ߾}ş‘DDDDDÚ"]ĢE‹ęÝľ|ůrÇĹĹĹ×)˙ÚkŻ9VŻ())ql9r$K—.­sĚţýűqwwgřđáőžC·nÝřőŻÍm·ÝĆŮłgyä‘Gűăăăxçťwę=~ýúőDFFňŰßţ¶U12d+V¬¨w˙Â… u#I»+Č<ĂÁőçťwŢ!%%E‘k†"]€Á``óćÍ<÷Üs>|»ÝΩS§;w.úÓź€Ú+XDEEđâ‹/’‘‘AQQß~ű-&L //Ź>}ú°jŐ*GňâńÇÇŮٙŋ3{öl.\¸€ÝngëÖ­Üwß}1bÄFĎÓÉɉ%K–Đ­[7Ö®]Ë{ď˝T­na6›ůř㏙2e ÉÉÉ”””pöěY,XŔĉINNnő#“'OŕřůË_ČČČŔn·sŕŔ¦OźÎ®]».kŽ ‘Ö(+."˙ü)’““ŹX‰\ ś*«§—‘kNii)FŁooo>üđCĆŹ_ď< S§NeńâĹŽ/[¶Śx NąĐĐPřÍo~òeËŰ«?FŢ~űm^xá…zĎĄ_ż~l۶ ??żZ šÇW[»v-ăĆŤĂÍÍŤť;wËŠ+xŕ“_^ę†n`óćÍx{{×IrÔ×FCűďşë.Ö­[W§śÁ`ŕ“O>aĘ”)äććRQQá8¶Ł›6mÁ1Ł<şÝÚݱôfĚQﲝŐęŐ«÷Čđ‡_żâíĺś9Á‘/—\“±‘®M#$D®aEEE@Ő„“cÇŽeăĆŤÄÇÇăëë‹Éd"&&†÷Ţ{Ź˙ůź˙©uÜ”)Sřóź˙LDD®®®„††ňČ#ʰ}űvBBBxőŐWąá†0ŤŽůfÎśÉçźέ·ŢŠŻŻ/®®®ôéÓ‡^xť;wÖJF4ć®»îâ©§ž˘¸¸űî»›ÍĆĉůî»ďxřᇠĂh4b±X2dożý6 u’—cŐŞUĚž=›ţýűc2™đńńaĚ1lذ;ďĽÓ±ZIulĄë1b=ţKúýxŞ‚!"""Ň !Ń]:1ŕŃŁGë”±Űí N^ŘĚfs­I«ůűűăďď_ëßzŃDZA#$ÚŢŮÜb6Ęi—¶4BBDD¤6›ÍFjjj­ľKÍDDD4ú;łˇąÉ.őüóĎ7¸oűöí¤¦¦ÖY2^}šć3(ť_FF†c©ĹęäBÍ„Bff&YYYîĽ÷ďßßâc"""€Ú ŤšořęąDDDDD¤óőiěv;‘‘‘µ–¤ŻiăĆŤ¬\ą˛ÉúĆÜ|3>ĹĹ50ÇXrrrłÎëÔ×_7¸ďřţýl?x°É:zčˇěرwwwG˙¦+ői”čŞ3€V«•ÂÂB¬V+v»ýŠ&|\]qmÁóń&¶ňr xľż>9ĄĄ”¶p€NÍŽĆŐ‰‹ĐĐPĚf3!!!Íf%,DDDDD®’””Ž=Z«OcµZë< ű‹űď'üşë(ĘÉq$ Îź§¬¨˘ěěfµeÝ·Ź¤FF|w3?]\đuuuü»¤˘‚싫ɝ޶­Á: óň0»¸`//oô\Îý5űj,+ďÓ«WUgÜdâ\a!k×®­÷¸ŕŕ`DZ!!!ÄĹĹ)!!Wö š™™éH>Říöfgî.evqÁâ ŃŃŮŮńfó©ńf3:;ă×V (./wĽůJk|d—–Rrń ˇĆö†őĹĎĎĎG˛"22’ł°"""""Ňz}ôÇŽk˛ÜńŤ±ą»×»Ď©´”řůa1ü§+ëa0ŕQŁŹÓ? jőőD{yíĺŐdźĆ٬ŚÜÉ‘š?ź+,l°ţ´´4Gź¦_DDŁ ‰””üýý;]źF ‰«$)) «ŐŠŐj%33łE‰‡ęŃ ®®X\\° X †—\¸n..ô¸äĂ'´‰7UÍ7|úĹěęů‹˙ŻoFVVYYYub^ť¨ŠŠ"$$„ĐĐP=ë%""""RŹšŁ¸kökŢ{ď=J ±Ą§;F5äś>MQn.¦ôô:}šę/O›Ű§ńpumqňˇ#ôiŇĂÝťű{őŞ·OSýelNi).çĎłuöl\ŚF<1Lxâ›·7łfÍę”}%$ÚAFFGŹĹjµ’””äČt5'éh29F5řŤ¸]ő őżázăgc++#»¤ÄńhÉĄÉŠú&“‰¨¨(BCC‰ŚŚÔ#""""Ň%Ą¤¤ššęřbµˇ>ͦ·ŢÂPQQďľ>Dyy©Os™}€ň’Nj̋ý–šŹÍ7Ő§7nś]%‘””Äľ}ű]ĐŐÉ WWM&,ľFc§ĺĐůąąáçćVg´Eu&2§´”ě’ÇĎŐŠŠŠŘż­ů*‚˝z9ţ«–Hhhh»ÎCˇ„D+“űöíkp„«“ÝM¦Ş7ëĹočĄs¨QŃ×Ó¨I‘^TDvI ©……ŽŚcÍ+W®$88#F(9!""""ťŞOcµZyĺ•WČHJ"çbÂVc·ëÍf"L&ői:aźćRąV+ąV+§·mĂĹhħW/"#y÷ŻŞV,ŚŤŤm—>Ť-”’’¦M›LBt3 µX”€¸Ć¸ą¸j±j±ăëëHP¤ÖA‘––ĆĘ•+ɉřřx ¤Wç»”·¸6•—”™śĚľ}űŰ’““INNfĺĘ•DDD0bÄFڎ„ÄŐ”@BBB˝«a»»âîN٬áJ]0AUáRíöZŁ'ŇŇŇXşt)&“‰ŘŘXĆŚCXX‚'""""íÎfł±˙~Ö¬YCVVV­}®NN„ÍT”–‚ľTí’B-n78QP@jaˇă ×ęäÄG}Ä#3fL›ŽšPB˘‰úŢ´Áîîô©Ń!•®­z8TŚŻ/ĄĄś((ŕ„͆˝Ľś˘˘"vîÜÉÎť;‰`âĉJLH»Ůľ};+V¬¨3»ú‹ŐęÇ”E}?77†Rő…kÍäDQQ›7o¦˛˛’É“'+!q5fúX,ôńđĐ%i‡«+1ľľÄřúbµŮ8ał‘VXTeg͚ŰaĂ7nść™‘+˙÷©››#QݧŃň›ŇšÉ «ÍFb^JJ號Oö©Sř^w]›´Ł„Ä%RRRX˛dI­•2Ě.. ôöVćPZ¬ú±Ž‚ŇRľĎÍĺ¤Íŕ1q×]w1zôhÍ1!" *ĘĎćÂń}¬É´0|řp%2ED¤ŮJ I۵‹śmŰôđŔ×hTźF.»OS\^Nĺ… řŕĽCCéőö:1ˇ„D kÖ¬aíÚµJDH›ópuex@˝˝k%&Ö®]ËŢ˝{™:uŞă‘zĺg“ş ©ű«f˝VBBDDšăěţý˙ňKĘKJęﯠH«ÔQ“kµrŕ8ľńń¸^˛ähs9+¬UĽ,^ĽŘ‘Śpurb€—? Q2BÚTubb|Ďžt3ŞÉ/çĚ™ĂŢ˝{ i•ŇÂB®\ÉŃuëÉ‘+ĺü÷ßóÍüů¤9‹)‹$ě IDAT/ľHJJJ‹ŽWB3g;wîŔÇŐ•1Äřú*0rĹx¸şňă  n¸xź±páB¶oß®ŕtpţţţ„őî‹g÷^ †t6›Ť×_ť-_~ÉţeËȬge@‘+ĄĽ¤„?˙ĺ/dee1gÎś%%ş|BbůňĺŽů"‚ÝÝź–ş‘víĺĹčîÝqur`ĹŠ-Î*ŠHűŠ‹‹ăçOüŠcU0DD¤CX±biii,˙řcŽź>­€H»‹ňňŞľhť3g¶‹Ź¨7ĄK'$ٲe P52â–îÝ5Ó¬´»îîü°[7ÇxĹŠ Š4ËŢ˝{Ł˝ÝÝéq™Ďň‹´F_OO~ŕç×â>M—NHlÚ´ ¨š3bL` î"ąjz¸»3ŕbV199™ÄÄDEDDDDšÝ§1»¸0LWĘUÔ×ÓÓѧٹs'MÓe6›Ťĺé©‘rŐEyy9Ýرc‡"""""ŤĘČČ ůâ|Ń^^ęÓH‡čÓTKHHh˛|—MHX­VÇĎÝM&Ý9rŐąą¸ŕăęęřĺ"""""Ňš3V˙)ŇQú45űÜ Ń*"""""""Ň6*+°ŰíM5(ZSZŞÉ_¤ĂÜ‹""5™<} ŽEl¨…€€DDDjţ^°••) Ň1ú4-¸•’ňňčm±č™+ąŞŽççSz1›("×öíŰŮş-,[n˙ůoĎäéKŘŕŃŚˇÉ—ED¤¶šŹlĚÍĄŻ§§‚"WŐělÇĎn˛ĽŮlĺĺ|—•Ą@ČU“U\Ěw5ŢĽ"ŇqeffrúÄ1ňĎźR0DD¤ĂčŻ6¤ôićĺh±ßä1JH\tŇngÇ… „\•7î¦óç5:BDDDD.[yj*ÎFŁ!WµOŕ\VÎ-˝®kÖqJH\ T%%ľNO§¸Ľ\A‘vaµŮj%#ŞďE‘–Č9y’ó‡))!íîxţ˙oďÎĂŁŞďýż“™Ě>Ů& Y&ŤŐ° !* ÄĄ˛h­µUŠÚRĘ˝·Z­Uéí˝ŠôÖź m5ÖJ\+ŕ "bXDHŘ$ „=!“möÉĚä÷ÇdĆ Ů·I&yżž§ŹÍ™3gÎ|rNĎ{ľç|µn=MLq1”bqĎzq–ĎQ0Ië@ËŚF|VY‰R˝ž…ˇAc¶Ůp˘®‡jk]'®úR1$wDDDDÔ7WÎśEőąsůűłäGjkq¬®Î­§ llěńóHđµYź_€ Z Ç=%ŐÖb_Utśő€X‘N‡ŹËË‘ŻŐ:Ž?« c/\„Š÷1!"""˘~şrć,~řŕCZopy¦ˇ= šŔ@€źŮŚÄÎ÷ş§á,ÎBŘí˝| (;vˇ5f3>®¨Ŕ8™ Sˇđócˇ¨ĎŠt:śih€ľÍ%Aţő ).†Đngh@XššűĆNX¦NÁ~~8ÓŘČž†”ŻH‹^Ăî=ŹĐęš>ő4 $®ŘŘĹ™3(W«Qâ¸Sí%— „‰ĹHR*ˇ–ËY(ę]s3.éő(ŇéÜ‚?łQĄe˝ÎDDŁSCEÎąGŢÖŻ_Źääd…zDsĺ éÖÓDKĄ'—ł§ˇ3Űl ® ÂW @Ţî=°´Î¨QYŐçm3č¨(­Ł%ĆTV˘*"ÂLÔͨ1›!ŻŻG´L†qr9‚{xł]'ląŃ2eFŁŰc~f3ĆTTňň """"t•UÖÔąő4eF#ĘŚFČëë§P`ś\ÎQÔiOS¤ÓAoµbĹĉ0h4(řŕĂnź[ŹÜś“8ř ظq#‰ľ[,®`âJX(ęT!° ĐŰlČ×j‘ŻŐ"P(DśB0‰„áÄ(?akL&Wqőž˛¦&¨4u "hČzšŞ4şzš3ŤŤ8ÓŘôČH†ä !ŞŤF\jťôÁéë?”ŃÝ $zxG—•#ş¬šŕ`hTÁ0´Ţą¶ÁjĹɆ€\ @´L†0±a‰kX ŤLşćf” ¨1›ŰŤ„7« ÖÔ"´ć Ä F4B¤§§cÖő7ăós , ©äädlßľŰfÎęq0a--Ec` ®„‡Á$“Anµ"88đńq Á§Ń×Ó”Ť¨1›Ű=îg6# ˇŇúC@Uçř–Ű,ˇ10uŞ`d2p9ab1Â%ÇĄRoś¬5f3Ş[OÖ¶÷„hB44  ˇ÷‡ """˘aGh·»ő46ą§Nbc5k$ţţ05Ôľ×h ŘÓŚPE:Î^D9{š M-üuúÁ=ů+č±Ĺ‚°š„ŐԸ ťB¦ @×:Î{N8 …—J$!L,ć°¨aÎ<Ô[,¨6™Ú]†áäL ZC""""ňŞž¦­ĆË—Ńxů˛ëç¨E7 @§s[ÇůĄk ź‚D"ö4^ĐÓŠDnŁ÷ýd2řŠDĐ_ąß’ 0 ­ţ B0Äpšr4B§TşFOŽË;ZGO€źŹ‚D"×˙äSÇ! kn†Ţju…şćf4X­ť®ďkµAˇŐ:BťŽ—cŃTxä($ccÝzš«żtőóńA´L†ą!!,Ř0ęię-×îE±cŁTŔPW‡ęÓgÜB'_D˘!ëiH ťŢ•*Y}}aIˇW*ˇU(\÷ž€ć––v'4ŕI!.‘¸B‹«S-ę˝:łz« ÍÍŽÖfC˝ĹŇéČ×Ył ­RŁ ­˛Aş~Šh8‘ŤH>ź«Ż/tJ%tJ Ri»ž¦€$0öŰ*Ł>{švQ«Ĺ%˝ľĂ{?´uú»c¨++ďôńˇü‚•Ä`Řnw­Ë R)tJ%,"żv'4ŕIÖtëja­3y‰Důú"ĐĎĎń_žÜ¨3›Ńl·»Bgč ·Z;ĽßCgáÔ`„Ôh€Ôŕ „v;d""""Ő=M`cŁŰĺÉ©ĆÖľĆçR1N~s ň÷GؤIđŹŠ‚H.Ż@C%%n_:{šp‰ÄýgŽwő4Î>¦ŁiY}E"drm6Ôt0‹_ŰžF®ŐBfľ_¨22Ł±Ý·ěf‘F©F™F© Vo» €+¤č*sžĐ"__‰DąPy›Ŕ çÉxő{·´†`±ŮşĽĽ˘«ŕÁĎl†Ěh„ČŇ ‰AďŃkĄzüµXeX,Âýý k3d–h8ô4WOkoijBŮ‘#®ź­ľľhž–ŇŁžćžŘŘN_ŻTŻGsK‹[Oă͆stCw=MD` ĄRšš`¨Ő ćÜ9×l(©˛č(ŻîiH b‹b‹ĄÝMÍ"Ě"?drŘľĐ*“L»°ă@ˇí‰]ÖĂË ś—‰\­m¨1Ú† WëÉĺ˝!1ŕkµBli†ČâH }mVDÔ/yyyČ9›‡ŇÔ)7 úë)T‘|ËX3/śĹ'""7%%%ČĘĘBEB˘ĘʆĺĄĹB»‰?ś‡UŕŰ®§iű¬¬© W !V(!’Ë wü»'‹á\ą‚jˇŰžfNH‚[ż ˝š®ąŮt%T,Ć.ÂŽ3 ¨6™şíĹ®Xś#@ÓÔÔíĄpţ뎂ˇPbá…a÷;—ŤPFD@­V3)A…ł‰Ž¸ęqTęvrpťŕvˇĐí4]q^&Ň‘˛aö‡Í×jÄ o­Ź#lŕ d#/ł ˘ASPP€Żżü<HuĆ`0 °°đWÂÚÚ GΠ¤łž¦I!‡ĐfÇ•.úmB<ĐÁň«{›Paa‚P,Ať¶ g**şÝß”°pŚ ęî7yü:ÝpuOSVYŐé6"*«:ěŰFŁ„s¤ĹŹŘÜ‘÷ö4˝ýÂŇ_§Gʉ“ýŢŹIgĎń—1@|Y"""""""ň4DDDDDDDäq $ČăH c:MÎ~ö:^|ńE”””° DDD4b0 ""Ƭf´ŐĹ(,,„ˇ›ů׉†ZYtöŕťwŢév]βADDDDDDD • ×AQZÚíş $z!11:ł jL, )µZŤőë×ăߏ<™ÁČ‚×a ADDÔ ÉÉÉËą†”\.Grr2čô,y%ŢC‚<Žy """""""ň8DDDDDDDäq $Čă8Ë ˝^ʬ¬,„„„`ůňĺ,QČÇ`ÂŤk°drÔj5 BDDnźµĘĘĘ U( 5 ´ŰYň* $hĐţÜĽy3ĘËËsçÎEHH C^ďăŹ?ĆŢ˝{ÇőýĎúëůIdŚŚCrr8‹ODDnJKK±eË )qůůđçôź4 D—•#nůtĚXµŞŰuHŃ€+))ÁćÍ›a2™\ËjkkHŤp2ŁQţţ‰‰év]D4 ňňň°mŰ6·0‚čjĽ©% ěěllٲĹF„ŽOaQ¨C!ADb÷îÝŘłg@ŕ'Fě¬[ –âĘĹ\‡Úa ADý–™™‰ŁGŹp„o΀B‰†Š"‡:Ä@‚úěę™4dAáHşáH”A,Ńi6`¨ŻF~~˘ŁŁ!—ËY"""x "ę“ÚÚÚvaÄÄ›2F 0}]~ř"/˝ôJKKY"""Ö R)*´Z”””t». "ęµ’’<÷Üs®0"t| ®I_?‰ŚĹ!""""ĹʢŁđé…BěÚµ«ŰuHQŻäää`óćÍ®™4“ŻEüü;Y""""“ÉdŹŹ‡¬© B›ť!ŻĂ{HQŹeggcçÎť®źăćÝŽđ„é, Ť*éééuýÍřü\‹ADDC*&&7nĶ™łX ňJ $¨G®žI#qáÝŚŚca¨OHQ—ôz=˛˛˛:śÖ“h¨ĺĺ塬¬ sçÎĺLDD^†uŞŁi='Ţ”Á›WѰ±eËŔţýűq˙ý÷#99™E!ňĽ©%u¨¤¤Ä-ŚP†Ĺ2Ś ""˘a«®®[¶lÁ®]» ×ëY"/ŔDÔŽ3ŚpΤ:>…3i‘Wřꫯ››‹ 6 $$„!ĆH‘›ěěldeeąÂ¨© 3m C4D$Ę DM]µś¬‰ş:>&m=´5—áă'ćßL"/Ŕ@‚\öíۇwß}×ő3§ő$j/;;ż=Ś:˝“—<0čŻ'Q!fÚ"¤Ď gń‰ş ’"~ţť¨řá0üĂÇâÓ3uźŕĄdä¶<%%%ČĘĘBEB˘ĘĘ 3y ĐSię©óću». "Đ~ZOΤAÔ1ŤFËEX"˘abýúő8VÔŁŔ9ŃŃUk›ńq®)1 LŠ™ło ţJXĽ= Şş:ĚŚÄěÔÔn×e A4ĘéőzĽńĆ8uę@$ @Ң»F‘WHNNFQł5Zk»ÇšíŔńbJ4f,š±M;ŃpÂ3’hsNëé #dAáş|-Ă"""Q޵Íxďű+Č+ŐŕwżűvďŢ͢ !A4J•””`۶m¨««E'a|ę6­gVVd2NJDDDCŁŮĽýŢżQWW‡={ö ''kÖ¬ALL ‹C4DHŤByyyضm›G§ő,//gá‰hH…ĹO¶ş†új”——cÓ¦MX¶l–/_Îâ DŁLvv6vîÜéúy0gŇŹAPt¬ ?ÔěE„ĹOc!hTS¨"qMú:”ä|…ňÓ_GK ĺgT–€hôŘ˝{7öěŮŔ1“Fě¬[uZO?‰ É‹ĘÂőCCEÎąGŢpÜI>99™E!"ę§i‹“Ś‹Ůr´ŃâM-‰F‰ĚĚL·0bâÍFyB~~>®”Á¤­ďŐóśŁ%˘¦.t-Űłg233YT˘~(HÇk9'ń /t».GHŤpz˝۶msĚQ ÇLăSďŕLDDD4"ĽôŇK€¨© 3mQŻźßv´„Y×€éóobQ‰<„ŃV[[‹­[·şn() ÇÄ›2m& ˘Ń`ŢĽyPEŤĂńK:h„pŽ–h¨(BÎ!j­őź±px(ONNĆöí۱mć,ţÉ+1 ˇJJJ°yófŹÎ¤A4„„„ ŮO‰Bs‹AD4ÂFĆJë-xďű+ź€X•„…!$ $F śśěرĂF„'_‹¸k—˛0D^.++ 2™ ŃŃŃX˝zu§ë˝řâ‹=ÚŢoűŰNËÎÎĆ‘#GşÝĆÜąs‘ššÚác%%%ČĘĘęvÝ˝źwŢyeeeÝngĺĘ•ťŢ!ż§űŇŐűńtmâý TműXéim‡Ó±Âó°÷µőä±2šíŔüF¨Śź«ŮŤFĂ™8HQW^<5­' >‰2Čő˙ť—_iMVä”t~Éóž1ÝéjçŠ*{´˙đ±©;ŢNŮ%MʶŃÝű9ˇUĄ—şÝΩKhÜŻ}éęýxş¶ń~޶}¬ô´¶ĂéXáyŘűÚö±"ÎHçh‰ŞŁď˘ŕüÎÄAÄ@‚:˛k×.|őŐW3iŚO˝ŞŘ‰, ‘—QS˘©ęÇF E†SeúNźŁ ‹íѶ»ÚF˝]ŢŁíÔŰĺťnG×Ôł}éîý´ČĂ  łw»ťËM€¦źűŇŐűńtmâý TműXéim‡Ó±Âó°÷µĚcE˘ Bh\Ę ý-Ö6ÖŁŕüŽ™8rrr°fÍŽ– ę'ź––––ŃřĆóňň°eË@\~>üuz 4ä âađ÷G||<6nÜŘ«çfffâčŃŁ®0bâÍśI†Ô‘7ž¬_żÉÉÉ#ę˝U6šńů9ŢC‚h4Ńi*p1űCę«]ˆËh ŢÔ’Ľµ§ńeąĽ›^ŻÇłĎ>ë #dAáşü— #s&ލ© ]ËöěŮgź}%%%,Qđ’Ť!¶öű〼ݻqŕŹĎv»ţÜ_˙ )÷Ţ `t$ˇÎú´ŐbłÁ¬ŐB[Y‰ŞS§Qřůç¨>s¦WۻՋN‹úâb\ţö[ś{ď}XtŢ5Ť_II věŘáş®\‹¤îć´žD¨¶¶—JŞĐX©C@Ä8„h”‰™¶Á1É®ŃĺĺĺŘ´iÓŚ–¨­­Ĺ‘#GP Ťb‹Ĺë?÷÷¤ÇéhÝ‘ü™$ă‰abüâĹJşľŹŹ@€Ä%·ŚúZů"tÂLY˝ wfľŽôW·A1&ĽwżĐ±ť”ĚyôQ¬zçřGEyU±yófW:>“oyaŃ ;|ř0ŢŘţ2~ř"“Ĺ "Ą:-‘ťť ˝Ţł—×ÖÖbĎž=¨ŠŚ€YäÇ_ĚüĚďŤ /ŕÁiÓ{t :GH ~2â-BÁ'źtşNĚÜ9…¨Fe}Ú¦ź>ÄJ%Tńń·đz$/_ލ™3qgf&>X“]uu·ŰHip0˘gÍÂě_®…bL8ć­˙5>űíĆa_Ź«§őŚšş1ÓńD""""ň ¶Ł%˘gß‚Z“r9ë2\úo˙Ě?p„Ä0`Ńé`njBҲĄ]®—´l9ĚZ-,Łüś-6L (˙ţ{|űâfĽłjjóó! ĹŤÚÔăíŘ,說·{7ľřĎ˙r4ö^rĚľ}ű\aD:™aŃqŽ–PډĂüFě?_łŐΠŢü™4ŕ‰aŔW(DńÁo0ţĆ4(ÂĂ;ü†_¬Tbě‚ëPüÍ7»`A§ŰŠž=Sď^Ť°É“!V*aŃépĺüyś{˙}\úú Űş~r9:ř5ꋊđÎĘUś>3zˇÉÉĨżT„S˙|…ź}ćţ">>Ľb&¤§#06VłU§Ná»W_…¦°÷}˛ň°0ü}ţ|XMć>í[o說±çń_áž÷ßĂk®AĚĽy(9|¸WŰĐ\¸ŕ;ě6Ż8fV­Z…Í›7Ăd2ˇľ4…‡>@Âuwňd""""bĄőĽ÷ýĚO@¬J‚ #Ý}ć—bÚšű›š eD|…Bj5¨8y'3w ţŇ%·őçLÁÔ{îĆ)S!SĂj6C[U…KŕĚ®,ÚĎČ>y2®ą÷^D¤¤@sSŞĎśĹ©·ßFʼnŁŻća9ô~~(>t>ľľH\Úń(‰ř›o†@$BńÁo ‰:\'ĺg?Ăň­Ż öşë ‚ŻPI` ÔsçbÉ‹/âÚużt[ßfv„±ŃłgcůÖW={6ÄţţJÄť0iĎ?‡¸Ĺîßľ/úĂÓ¸î?6"$9 B©’ŔŚ˝~îxí›< ~2Ç8µ¶aDo÷­·Ś ÎĽł ·¨÷ŁÂ'OTť>íÇLLL 6lŘ€   @mQ.ňľzÍ&O("""˘!Öl‡Űh‰]»vˇ¶¶–…b]}ćW„‡cĺŰ˙DĘ˝÷"hÜ8%ř …PŚ Gâ­·â®7ßDčĉ®őă-Â˙ř;âoĽŠ1áđőóHˇ€*>3ţs¬ü×ۇ…ą˝FҲe¸ăµ`|ÚbČBTđ ! ĆŘëŕ¶W·aňĘ• $Čó|:›Ĺ‚¤Ą·v¸Nňňe°77ŁřСW%$`Σ뀖伱˙şë.ü-u>ţyŰí8öĘV´ŘíľfŤë$»Ő ɸţÉ'đÇâÍĄËđęµs°kŐjÔś=˛r•ë9Ńłg#iŮ2´Ří8¶uŢX˛ŰçĚĹűkÖŕĘç±đÉ'ŰÝśł/űÖ—:FYD¤¤ô¸îŠđpL¸ýv¤=˙ĚMM8ú˙ţź×7111xę©§ŐzSžúŇ<śű<“ˇŃ0QZoÁËo~ŚŻľú Ď=÷öíŰǢ AŻŐ“Ďü3ţäaa¨9{d<€ż_·żn>|đ!\9B‰sÖ­s­í/×ÂG @ÎŽ7đÖňtlź3Ż-\OÖ˙ÚĘJČCCqí/׺Ö÷ŹŽÂőOüp23˙Ľýüm^*ŢJż Ç^Ů »Í†Ôßü±±Łę÷ĂK6†‰f˝ĄGŹb삟2ĹmËŔ±c6i.űm§SÔLúÉťđpţŁŹpôĺ—]Ë›ĘËq23˛LYµÉ·ĄŁúěY·çJpůŰC8ô?˙ëZVwń"ľůź˙Á]oî„*1Áµ<ńVG`r6+ '_ݵĽćě9ě~ôQüdÇř ¶o˝ŃTVŞ‚;|Ľó©€¬(řôSś|=ŤĄĄ^uÜČĺrlذ;věŔéÓ§al¨Ćé=Ż"é†ŐP¨"yb 1‹­`2™đî»ď"77kÖ¬AHH‹Ó‹Ďěý}~wźůýŁŁŃ¬×c˙3@Cńe×ňŞS§pŕŮç°ň_o#|ĘŹ_ :gę8‘™‰ćÖŮU,:+.:c}–ýőݵůO^±‘Ç^ŮŠ“™?ÎÔĄ­¨ŔÉĚLřřú`öÚµxÇ8ü—żŚšß7GH #÷íŕ ŃVrë|Ćľü˛ÓçŽiđĂGuř¸söŽ1S§vřřéwŢi·¬®¨ V(\ËÂ'Oäďý¤Ă“üDćëľo=uŚŽ‘~RiďNˇă.Ä´űďo7¬Ę[B‰uëÖaÎś9Ž?„úśű< E<©†XäÄy˛ěd€ÂÂBŽ–ŠĆ·›Ďü?˛˙¸~ˇ[qu_ä'“ą–Ő>ů„[đ8ľ¬}}Ńběyô1ײčY3[ű¨˝÷Dź:îŰ9cş××ZŚ“••ČÎÎîv]ŽFŠż9{s3Ćßx#ľ}q3l‹ăľ·Ţ›Ĺ‚âťßřŃ?"Âqb\*îđqç Ł3¦ĂÇ.—´[ćĽÇ||~l~[Oކ˗;ÜNĺÉśß·ž+ýćĆĆżz řř@ ˙Äßt#¦¬Z…±×ÍÇűk2 ­¬ôşă'##‰‰‰Řąs'ěÍfś˙râćÝŽđ„é<ą†B‰©ËעôÔTçs-±víZČ9WhçźŮ;ĐŐ(Šţ|ć—`ňŠ•ž= Šđp×˝ď|‚vŻóŐ˙ô­[ÓMź–†šsçP~ü8Ęľ;ŽŠś´ŘÜoś©ŚtŚ^ľď“˝]ľ7˙čhď$TÁ(­Ş„öđa¤¦¦vą.GH #ťĄÇŽA¬TbÜőĆ-ŕ IDAT×˘Ż˝ňĐP”=ÚĺtźÎű6XŤĆw.żúţW?Ţçó›;YßÔÔ4ŕűÖS!IImUUĎžĐŇŁFęłg‘ýŇ|»ů%HU*ĚyěQŻ=†RSSqß}÷AŇZˢá4÷O.˘¤R©3n<”a±,őźD†¸k—bÂŤk đpŚ–Řżß?˙†„`éŇĄݍ€ŘŇĚ"÷ă3ż22+˙ő/Ězř6 ĘČHĄRřúůÁÇ·}Ű\›—Ź­X‰ś7vB[Y‰đ)S0ýţę6Ü·w/&Ü~›űďż‡Ł¸EmFaŚ!1Ě\Ü·±óç#iŮ2\řňK$/s\ľqńË®‡t5M)äJĄ®kÜN™´Ë ˇ§l–f%bĹ"·Y4śÄJĹí›s6ľN—“żg®űŹŤP·^úŕ͡„Z­vM ZvęLÚzN J4€çXÜä™řü\‹ADD˝‡¸‹PxôSH$¨Őę~ééé(ö9¶źźůç>ţ8䡡ĐVVâŘ+Ż 2÷LŤŤ°77ĂnłaíńďÚm˨ŃŕčË/ăčË/#@­†zî\ŚO[ŚČéÓ±đ÷ż‡ŔO„łďľëęuD ^[xC§÷Ť8BbąôőAŘ­VDĎž±R‰ŘëćĂf±¸f茶˛×áăAă˵ýÚ?Ć1]‘TÇC‰"¦M’} ?ŢŢ8Żżę-gňŮŮ´ŞŢÄ9-¨sŽÚ˘\śýěuÎŔADDD4Ä&Ś‘â±źĄă†nŔSO=…iÓ¦±(ÔŮg~ç=ö<ö ?űşŞ*XŤFŘ­V(ÂĂ»Ýnci)Îfeáßżx7ý pÍOďq{Çr„%‰a̢ӡôč1řúů!ĺľźÁO&CÉá#h6tÝHV´Ţ»aâ·wü‡ď¶tÇz9ąýÚ?MA`üŤií&ˇ3ČđřľĆĆâ–Í›áë燋űöASXاí$,ąŔŹ÷´iˇ„¶şÓ‚ ™Č7O ĵqţ }±zőjδ1:űĚď (ô5WÚ=gÖ/~´´¸zH{ţ9Ü˙٧k˝é[…_|‡†ş–•sڰHą÷gî—zî\ÜóÁű˝v- Z[ď¸;é®»Zţ˛Űçś{ď=Ř­6$§§ăÚuż„tb1ÔjĚ^»n» v« çŢżűÖzŤŰôűďǤ»~q@|…„Mž„导Ňáe±o~r9Â'OĆĽ_˙+ŢzţŃQh,-uĄ‘=%RȡJLĵʮĂü Ŕ5¬j$Ëĺxúé§]3pŞqň-Đi*x˘ ˘¶_M#Ĺm)!ł0C 'źůë[oÚ?çŃu@ !lŇ$,yá!’+ĐT^Ŕq™¸ŻźČBBpă¦Mťź ‘BĘĚ}Ě1»†ćÂ…{˘÷߇ŐdÂř´ĹH{ţ9¨Őđ ! QaŇ]?ÁÍţoÄÄ@¤PŚŞß ď!1 t\¶!V*a3›qéŕ7Ý>§ţŇ%ţËĚ˙ío1=#Ó3®©ĐŇ‚Ă[^B}Q˙¦‚,üü LĽóNDNźŽ˙ő_Xđ_˙ĺzĚÔĐ€zwż÷Ţ€ď[wó—?Ž/~÷ĚZmź·gŢyy˙ţxÄSJĄ8pŕěÍfśű<ń©w@;‘'Ńk¨(ÂĹĂ!iî-řÉMsDxP_?óźzëźH{ţ9L^ą“W®t-×UUăŔśÇ…t4nÜ´ đÖňtDĎž˙¨(Üú—ż´{ ›ŮŚ#˙÷W×ĎÚĘJ|őĚ‘öüsHX˛ K–´{ΕĽ<|·m+ Zf­eÇľCLę<”>Üă0ÎĽł u.âš{ŠđÉ“!R(anjDŐéÓ8őĎ·Q™“Ó˙ťkiÁŢ_ý3|ăÓŇ AS‡Š'đÝöíĐ×T;VłŰu߬&3 šZTť:…‚O?Cé‘#}z;6łşęjTť>Ť>úUą§Fěqµzőj¨Őj×´ _żĂiA‰PłÉ€˛Ó_ŁęüQ@á·Â˙ÖYH Ąž|ć/üě3Hü1eŐ*(##a¨Ő ě»c8ľýoĐ×Ôŕřöż!hÜ8¨Ćʇ¶Ş ÚĘJdÝóSL]˝cŻ_EX"ôµµ¨8q9;ßl÷…ëĹ}űPéR~v/˘f΄LĄ‚ÝjE}q1.|ţNďÚ{óčš-ŧĄĄőbQ&//[¶lÄĺçĂż‹)5©çccq÷űďÁÜŘ×§± ˝Tż?âăă±qăĆAyŤśśěر&“ 0fÂŚ›}ë ýŁ|ńđG°ZLüĺŽBÚębŔúő둜ś<˘Ţ[eŁ™łl‘程ŢńďD"ÁňĺË‘–6řź‰·ÍśĹ_yeOĂÔ+ˇÉÉ7ŇľĄÇyJMákš6mT*•kZĐŞóGa5›eZP}]ęKóXtQ>ţřcěÝ»0÷ţgY"˘QîęQŹŚŚŚAżiĄëKÖÓů%+ *M1uŢĽn×e A˝ríş_B=w.ŞrOá»W_Ĺ•Ľó°YšˇŚŚDҲĄ¸ćŢ{÷ěa±†±<őÔSŘşu+ĘËËQ[” }]%&Ýś?‰lP^3** R©”Ĺe¦M›6âFG9 娢áJUW‡‘‘ťšÚíş $¨Wľ}q3Ň_݆1)× ýŐm®sáË/‘Ď@bŘ Á† °uëV\¸pƆjÇÍ.çß…*rŔ_oĺĘ•lL‰hÄ0iëqţË®ź=5*‚h$á´źÔ+ —/㽟ý ąo:nŇb5™a·Ú`¬ŻGéŃŁŘ÷ű§đĺďž`ˇĽ„\.ÇĆŤݦ=÷y&§%"""ęF°*‰'C"‘`ĹŠظq#â^â ę5C­GţďŻnÓŘwËČČ€JĄÂŢ˝{]Ó‚Žťu gŕ """ęŔ„1R¤Ä(`ťđŚF#˘>b AD€ôôt¨T*×´ E‡?†D](Í=P§ÜĐé:Ő…'aŃ7v»­Đń)(:|̤­Ç•‹ąÝnC$čňśuîowşz? EĐÖ\îvʰXFĆők_ş{?ž¬í@ĽźŞ­'Ž•žÖv¸+<{_[O+ž8=Ň<ů×% V%…rČĺrţHÄ@‚ú+55*• ۶mÉdBŃáŹĐTU<(3pŤe§ŤĹŚënětťüKą¨«,îv[1c㪠íđ±+ M®×ęJpÄXL™>»ÓÇŹśęY#ÔŐűŃ4”ôh_g-BXRbżöĄ»÷ăÉÚÄű¨ÚzâXéim‡Ë±Âó°÷µőô±2ŘçaťŢ «}pţÖ7T!02ŃA"\—±W˝1 ˘A‘śśěşŮe}}=j‹rak6aüĽŰm"o˘V«Ű-»uŠŞÓőĎ}&D]¶{mś?’“;ŢNžßîÁ6‚ĺÂ.÷ĺß=|Ź]mĂzIŠ‚l#!LÚď}éîýx˛¶ń~޶ž8VzZŰár¬đ<ě}m=}¬ ćyźźŹ+eM0ů*;…Ń:M.|ű!,ú<ü«˙Ä5 áüG¨ R)*´Z””” &&¦Ëu}ZZZZFc‘\söśł—†Ť‚„xüýŹŤ7éľčőzlŢĽĺĺĺi`xݧm¨(rÝ}zýúőśeÜĂ? šş1Ó Č6Ks¸ŤÚ3g222ŘÓ pOĂĂÄÚ[ÖbłÁ¬ŐB[Y‰ŞS§Qřůç¨>sfŘż‡m3gń:Čĺrlذ;věŔéÓ§al¨Ćé=Ż"é†Ő2-(ŃPsŽŠ06T»–-]şéééĂre2âăăQqň$„6űď{ę-^5Śů"tÂLY˝ wfľŽôW·A1fŕ†‹MYµ˛Ă?DÎPbÝşu®iA-úN JDDD#RiîśŮóŞ+ŚŠŠÂ“O>9lɉÁĆŤ‘Xx2Ł‘= yŽfÚ&q>ÄJ%Tńń·đz$/_ލ™3qgf&>X“]uuż_/tÂťş•‘‘µZŤwß}öf3Îěyqónç DDDäőĽmT{ö4# GH c-6L (˙ţ{|űâfĽłjjóó! ĹŤÚ4 ŻšĚ“—z&-- ÷Ýw$Ç4WE‡?BĹąĂ, yµş’<ŻÁž†=ÍHÂ^DWUŤ=Ź˙ ÷Ľ˙Ć\s bćÍCÉa÷†PikîGlj*”đ a¨Ő âä śĚÜúK—ÓÖÜŹ9Ź>ęzžsÓ—O<‰ _|Ńăít&zölLĎX¤$Ĺb4––!˙“OpúíÂnµőiźťÂ§LÁÔ{îĆ)S!SĂj6C[U…KŕĚ®,ÚíOřäɸćŢ{‘’I`ĚMM¨>s§Ţ~'NđŕęˇÔÔT¨ŐjlŢĽ&“ —ż˙ úş*N JDDDŢŮ ů÷Żľoh ‘’’ ‚=MźzšľĽ{^ɨŃŕĚ;»0㡷h‘Űɫǝ™ŻCćöĹp$Ţz+â-ĆGżř®üđC—ŻŃźíŘ­6Ś]°K^ř_ř®ĺÁńă1÷ńÇ>y>˙Ź˙ěókĹ-Z„›ţűOnŰůůAU|<&Ü~;Ţż ô55®Ç“–-Ă OýŢí9Ňŕ`Ś˝~Ć.¸‡^xgł˛xpőPLLŚëf—ĺĺĺ¨-Ę…Y߀¤…«9-(yŤč ®K€Xč‹§ź~šaOÓ§ž¦/ŻĂžćGĽdĂ ]:x‘’â¶|ćĎ‚<, 5gĎáŚđ÷ëŕď×-Ŕ‡>„+çĎC(cÎşu€śo¸]۵mć,l›9 ľř˘WŰi§ĹŽëţc#Î˙űßř×]waűś9ŘqăMČ~i Zl6Ä-Z„Řů©}Úg¸ö—ká# gÇxky:¶Ď™‹×.Ä'ëme%䡡¸ö—k]ëűGGáú'~8™™‰Ţ~ţ6/oĄß†cŻl…ÝfCęo~ŔŘXX}%˘˘˘Úębśű<Í&‹CDDDÚи!)i‚ ˛bOÓżž¦/ŻĂž†„Wk*+HUÁnËýŁŁŃ¬×c˙3@ő™3°Ť°Ť¨:u ž}>er·ŰďĎv|ýüPuú4ţéżŃP|v« Ćúzś~űmäľů aÉ-}~-˙ÖřDf&´••°[­°čô¸|čľřÝď`nj‚,$ĵţä+!‰đÝ«Űqě•­h*+Íb¶˘'33ńýß˙_ˇď¸V/9§uÎŔal¨Ćɶp"""vJs@§©@t+f†"V%aQŘÓ HOÓ—×aOó#^˛á…šŤŽoˇý¤R·ĺ?˛¶ÓçÔ9ž#ë~H}·söÝ÷:\~aß—¶ć~„MśĐçת/.†*! ź|Ů/mˇ¶ÖőXÍŮsx}Ńb·mDĎš Čß»·Ă×(řô3Ě^»‘38[D_C‰ŚŚ ŔŃŁGao6ăÜç™HZx7#ăX """tK—.EaŤ‚ŔvʵťAĂRťŹG—˙aD˝÷’’deeˇ"!QeeĂvęĎ‘ÜÓôĺuFzO#3ˇŚ€Z­f 1‰•ţscc»Ç$Ľb%˘gĎ‚"<Ň  ř …n×őD¶Ł),ěpycI©ăm“öőöµľú㑾u+âoş ăÓŇPsîĘŹGŮwÇQ‘“›ű 3•‘‘€ű>ŮŰĺ>űGGóŔę‡ŚŚ $&&bçÎť°7›qţË›w;Äň@‡Uzz:>9ŁAŤÖę¶Ľ4÷ĘNpýě'đAmm-B®ú,ęÍ  %¬ďü>’zšŢľÎHďi˘ËĘ1cÉ-˝z5‰‘($)  ­Şjw ŢńÚ?  í×öű»ťfCÇ÷°šLŽN,îókŐćĺă_+Vâš{îÁř´Ĺź2áS¦`úŔP«ÁwŻnĂůŹţýă?ű@‚7mBěüT řPFD`îcŹ4.¸¶îý÷a5™0>m1Ňžj5|…BČBTt×Opóź˙11)<Řú\ާź~+V¬Ŕ#Ź<Â@‚͵ăü±~ÝϱtéR<ýôÓĽD=ŤÇzšľ˝{WÔŇŇŮŚ2yyyزe‹ăŕČχżN?¤űłöűă=ZŻüřq|ń»'`jhh÷XÂ’%H{ţąvËuUŐřŕ0ç±G‘xË-®ĺŰfÎÂmŰ··›fßďźęőv|…<|ô(,:ľ|âIÜňŇKđ¶ż«lŢîÝ8đÇgűĽĎo-OÇOŢŘipp‡ő±™ÍŘóŘă¨8yҵl|ZŇž®ÓôóJ^>~äX†ř€‚„xüýŹŤ7ň_/""""bO3Ś{šľěŻ2"bD÷40㡇0ű‘‡»]OđĚ3Ď<3OŢÚÚZ=z¤Ń@liŇý™ő‹_t¸Üj2CW]ŤËß~‹#}Ç_ÝîJŻVwáĚŤŤP«!’ËˇŻą‚˘ŻľÂţ§˙}M 4……HI4 M8łkŞĎžAHB¤AA°šĚĐâŰ_ěőv„)¦gdŔÔŘoţügTž< EX8ÄţJř¨żt '33ńݶW]Ă–ú˛Ď'ţń >ý -V$ŠDđńń®¦Ĺb˙3Ä•~p«K}Q.ř~R $ţţŠĹ°Y,Đ\¸€Ó˙|_oúS§5ő4Ť*Íb1‚‘ššĘe‰=Í0îiú˛żťnD÷4eŃQ¸hoAu}=&OžÜĺş!á‘&!ADDDDěihôô4Ľ‡‘R«ŐXż~=âňó!3Yň: $Ľ\.Grr2üuzív„Ľ """""""ň8DDDDDDDäq $ČăH‘Ç1 """""""ʞDDDDDDŢGŻ×٬¬ Z…R3m×á """"""/TZZŠ—^z “aIY˘Ëʱd||ŽdOCĂCQ\ŽÖ×!''§ŰuGm ‰D¨S©xÔĐ3HĄ0Éĺ€iÓ¦± DDDDÔĄäädWOc”IYr h Äąââ­?Ş/ŮXĽx1 )( “‡‡cÖĂżŔ„ôtäää 11±Ď#˝HtBŻ×c˙ţýŘżżŰIěkµ"XŁAhÍ-Š:Ą FťJ˝żŇmy||<–/_Žääd‰<ŢÓř™ÍPię¤Ń°§ˇ.5ř»¬iÓ´|& ŕMřHôđ$>|ř0ęëëÝ“čőÖÔ! ±‘'2ąNŘĆŔ@4şŤ†DŃĐö4W0öÂE66˛HÔiOscp0®ż˙~„BĂ@˘˛łłqřđa·{L89Ă …N™ŃČbŤV__蔊NC‰D‚ąsç"--Ť—fѰčiöďߏňňrE"¬‰ĹĄŻż†ľşšĹĹ=Mc` «Żąş§ąá†°zőęAym}P[[‹}űöˇ  ĺĺĺí÷µZĐĐ…V…NÇŃ#ŚVˇ€^©tü÷ŞË1ś¦NťŠ””¤¦¦˛`DDDD4,{šüü|×çŐ+yyČŰ˝'N ®ő ؆€Řö4#T“BŽŠčh:¸˙D"AJJ RRRútłJ<‘sssqřđáĂ ŔqŤ–B«…Ô`ä /ăa’É» Úž°ýą© Ń7Şĺĺ(?qŰ?ůĆÖvŃŮÓ(´:HŤFö4#€_J Ž |Űő4‰‰‰űb•Är¦ŚČÍÍmw}V[ň&-”:üĚfžĐĂ(|0ĘdĐ+•0‹ü`”J;L ť˘˘˘””Ţ‚F˝^Ź'žx˘ÓžĆ×j…Ô`„R§ĂĘJlö4©ÁŤë!Ó¦!rĆ „$%"rĆ HüýńěłĎşľXŤ‰‰ńřţ2D%%%(((p…]ÎBd1Cli†\«…ŔfcP1'©E$BłX ­B‹Řń˙»â Ôj5RRR8 ‚F}O#iiÁ”ü|4ë ť~öÚí,ä Đ*Ýö4S"#±jŐŞAą!%äňÎcIIDAT /ăAQZZŠŇŇŇoŽŮ‰^ÍĄNç.´ZpýLí™E"XD"Řľ0Éä0‹ü`‰a”IŰݤĄ#AAAP«ŐP«ŐHLLä""""˘Ö€ÂŮĎ@ĄRaÝşu055A“_€ň'P[‹V‡Ęś$ÄĂŕďďÖÓřZ­Ťü¶P?ľŰž&%%eĐnHÉ@b„śĐŤĄĄĄČĎĎGYYY·#)®&orRŁB›Ý-´i'ąstW©6 G#®µZ •JĺÁŃDDDDDý÷«Ç‡Élîr_«SOť55iŰÓ8ż@í¨§‰ËχżN?ą !‰IPFF@‰¤DÔ™ĚřűGş¶ŹŻëiH SyyyĐh4®°Â`0ôxDEO §ŽFY8ĂŚÎô4äh{˘u¦í čäÍŕÔ—ájAAAP©TP«ŐÉdHLLDHH§â$""""DűöíÁ`@~~>ŚFc§<8m:¸FW@eNŽăżcPé /¤c‡˝‰CC§}JŰޤ?_ÚjŠ—ë•î7żďęľmßOWŇ,ŔŠźţ´ËžŃŰ{^V@AA ?? ŃhP__?*k©TęJť˙cč@DDDD4Ľčőz”––Âh4˘´´žžŢéú˙řŰßpüĉn·;ËfGŹŹ[áÔ¤Ł()©ŰmČššXŘůÁÎKPşłrĚ(#~ B’!V8B‹CgNăŔ±cÝö4ŁáŇq#TII [pŔ5Ú©/—x*\prŽjŕ śËyiŃČ–——çÖÓ8żurŽ$_ż~}§ |^^¶lŮŇíkĹ„‡cÍrG8"R*ÚÝ ň…^čŃČőM›6uúĹhŰ^m´Ę@‚Ú©­­Emmm·ë9/)éHbbbŹ^‹7‹$""""˘Áć•Ń]/ŁR©ššÚévÚ† N•Ýw $Čă|Y"""""""ň4DDDDDDDäq $ČăH‘Ç1 """""""Źű˙¸*ĚľťZIEND®B`‚ceilometer-6.0.0/doc/source/1-agents.png0000664000567000056710000014157112701406223021206 0ustar jenkinsjenkins00000000000000‰PNG  IHDR—űa»3bKGD˙˙˙ ˝§“ pHYs  šśtIMEß  >–„ IDATxÚěÝ}xUĺťď˙ŹsüéŹ<@¦ PBCÇlńŞI€˘cÂUsô˝¬„ ‰ĹË  aÎH‹­@­txf-f(0zZHR™‘ŕ@ˇ<$ôîxÄ„H,ḷלĂďŹd­¬˝÷Z;{…<’÷ë%Y{gí{Żďşďű»î‡;nŢĽyS¸đgŔ-’Ë×H.\#ą píNŠ ˙¨ŻŻWyy9 G¤¦¦:ţŽär?qîÜ9Ť7Ž‚Đc˘˘˘ô§?ýÉöw,‹ŃOěßżźBĐŁęëëuäČŰß1rą;TĂGĄ t‹KV©ą±%č1$—űˇéO¦jnÎS€nńă¬5:ęBĐcXŕÉe€k$—®‘\¸FrŕÉe€kwRčÎźş K«5flĽÂ"ÂtďŘŃ Đ‹ą<@ĺ|oą›©ĺĎ®éđŘ59yz|l¦voz«KţöŐ+×´ÇŰ?|l¦›©«W®™?kjhVÎ÷–ëÇYkôĆk;ôă¬5Z›łÁńřžŇÔЬݛËcăKżęҲęJóÓëń±™Ş8yŔ-#ą<554ëŇĹ*IRĹÉ &?ů°Z’4ćţř[ţŰűwĽ­ůi‹ŐÜŘâóóŹŰÎ'<2LĂG5~âđďuéb•Â#ĂôDÖcz"ë1M2ŐńřžđńĹ*-ţŢrť(=đ»KŢÖ˛zđˇúÜwn$á»â{XcşÔ–,Ź Ssc‹J[¦D‡dčŐ+×Ú“’]°Ĺť%&_‡ŹŞu;V*<2Ěçç'·&pÓžLŐ /=kţĽ©ˇŮöřžđţáßëę•kJ›•đ»ËłZËŞŹ%pŤď|XěPE 'pËH.@çŰF*Oš6Q'/¨t™ććr¨ÔáýG5wŃS Kc”î˝÷ŹöůySCł^YĽÁv˝ćÁází×+Ě„äÇ«ôă¬öŤŻ^ą¦g­Ń#ÓľŁ•›–I’ůűíĄůćď­Śç˙fť"‡űo=ç…µ{ó[jjhöyýÂ>çdŘúZˇöľm[N‰= —ó—šď˙úO·#ą%éŤ×vH’ţů4˙ýţáßkÁň,Ízöq󸥧´u]ˇĎĆĆzÍţĂđŇÖŞ©ˇY˙üÁ?jíâ >ë;—ŞL‡ö—iÝŻW„ś`6ľó«µ×ôÜôżń)ź…•˝ďîÍo™ĺóöĹ˝úçţQs=%I*đK˛ ˙"ą<ŔŘ­ˇ<˝-‰[şß7YůńĹ*3kÝ ÎHŇ>2í;úEáJsTëđ‘CµrÓ2sé#ůi¬Źüymť¤Öő“ŰFM[ĎiĚŘxźăÍő™Ű–Ź0–đ?Ţ8×…¦u;VjÖłŹkřȡşwěh­Ü´LĂb‡šKRH­ íŇß–)<2L›~»NÓźLŐ˝cGkřȡšţdŞ–ü|ˇyśÁş„Ĺůž“ÝzŇM Ífâř‡?_čł®ő¤´‰ZŃ6jűDé)}lm|ľ«W®iŚ'^›~ű MJ›Ř¶ŚÉDĄµŤümnlqőťK­ ň^zV÷Ž­{ÇŽÖ’×ęÁ‰­ç러6–±ŰśĐiý슶×<‘ő¸ůóÁáš›ó”śř€Ďu€ţŤäňc$W­ŁQ'ĄM4“݇,‰?ăŘa±CÍŃą'/¨âä…G†™ XĆć{'˙Ţü™1W ÜTÎř;ţKoŘŤšv:ŢX†aŇ´‰¶K8ĚÍyĘ=+µŹ¦¶Ž<¶jrHÜ:Ťćm/×öDěÂjjhÖ°] břȡfb÷Ľ%‰mĽWxdĽ”đ:#©ćę;;Ôgů ń}XéÖd·]yçk—x–¤ĂűË–&™›ó”Vä/5“ăčßXsy€1ţ ŢYYŹiëşBŢ_f&B/›ŔYFź8Üşöď¤i×ű}đˇ¤Íľ?3FĎÉTźßŮ$‘­Éh˙ä¦Ýńď·%˛Ťe+üů'wŤĺ7¤öş­çY­¦ĆöѵÖĎn=ÖżüÚ“­íçjŚźĺpNĆyś?ĺ; ŮřŽfe=n[ĆvËQ„ňťĎÍyĘö÷vIjs#ljöÚ=¤¤´Y©:PxP'/čąéŁ'˛37 tÚĽýÉĺĆi4pÚ“©Ú˝ů-UśĽ «W®iřȡć(ÝDK‚ńü©‹’¤IÓľăř7ěÖÔ5“‘~ÉÚ¦†ćöe.,Ł`Ť„¨rÓîxăŘđČ0ŰQ¶vššµu]ˇí Öd«5yú±Í’"ţçkś“uťăIi;,«a–÷3ŘŹŘ”±Órˇ|çNß™ÝňƧża~?~żżwěh­Ű±R_ú•>Ż˝Öşąß¦·”ödŞ,Ď yBô}$—§5”ĄÖuq'M›¨ŇýeÚ_ř¶^xéŮö5w- [sÍć IÂó6ŁŁ+lFöJľËIXŹv٦ťŽ7’­NĎů36$ljhÖ°ŘÖ5–ÇÜŻđÁáććÇÇf”“Óh])0io&Ťc‡=ăuF˛şŁĺ(ě–ßĺ;·.mpŚM"Ůxŕôw‚­ÇśřĐúőá|ť(=Ąý…uţTëúÖ—>¬Öş_Ż Á p› ą<€Ř­ˇl57ç)•î/Óű‡Ż´YíËH¸]Îŕýw[—¨°&Ť$Ş2ňĽSŇŮamc»ă?·)mUqň‚>Ż˝¦{<ńşwěhíŢü–ąň/ W˘´ué˙‘Đç– ¶„Çw?ݦ†f…G†µoVŘÁrNĺŐŃw1Ä>ˇŰÔĐl.)b)Ýľibŕß±®Ëm|Ţ«W®éóÚk ‹h/łIi5)m˘*N^ĐÚś<]jŰtŃiyô/lč7€8Ť6Ě]˝rÍL8ú'Ť‘¸Í~›µöďxŰ)k,a1íź|5FÍ$ťÖ6¶;~ŘČŕŁ_˙ÉŻ´ńĄ_™‰V#qí´LÄťŰţ†ďą^rÚ`Đf=ici kâŮß˙´lBč˙u´E¨#—Ť÷»díóY šß«ńÝXGOű/˙ŃÔĐlž·őóVśĽ g­ŃÖ× ţFâChVÖă>ç€ţŹäňb$Uď 2ęŐŘ|nw[Ń?Á:)­5!»ű—˙+ŕµ'/hëşÖäâ /e™?7FۍƵۜ.ŘÚĆvljŘó§.¬÷üĆk;tőĘ5sů ©=azµ6pÄł±î´ő}ŰËŻýĽ|~nłžô˝cG›‰x#kµń'żRĹÉ Ź ÓKYµŹŘ¶O;­™ÝŃw¦ý;ŢöůÝÇ«ĚďyÉk ۭ͟Á,µŻSm.CbůĽĆůž?uÁ'9m°{ ú7–Ĺ@ŚŃŔÁ““Ň&jXěP}^k¬cě› ś»č)ť(ý˝.]¬RÎ÷–kú¬T 9T'ź27Ç›»č)źMěŚÄâ'Ţjm}­Pá‘aš›ó”ϦwÖD®ÓÚĆNÇ9TOd=¦…µř{Ë5wŃS ¦Ňß–™ Ü›–šÇ§=™ŞŇýe:PxPwčMJűŽ.]¬Ö‰Ă§Tqň‚ĆÜožť˙ůË˙Ą1÷Çë‘ď~G÷Ží¸žô’×jůłk´{ó[şä­VÚ¬T576űś×ş+}–(±[çÚúůťF€űÎŤÄúÖu…şä­Öđ‘CuőĘ5óűúáĎú”çűă¦ćĆ­]ĽAcîŹWsC‹N>ĄđČ0ĄÍj-?ëFŹ÷Ž­'> ó§.襬UÚ¬TMJűŽ®^ą¦Š“TşżLá‘aćfô$—ër­ˇ<+ë1s˛":bp¸~Q¸R_ú•Îźş 7,ŁTÇÜX–Z“ąű ęóÚkÚ_ř¶śř€ćʲ®ŻĂ:ĚN#‡íFőľđŇł’ZG żńÚóçN|@ ^ĘňIĆ&>ô€,ĎŇîÍoiáŰÚ_ř¶ylţoÖéýÿץ«FAIŐĄ§t˘ô”ž×:ĘŰi ŹÄ‡Đş­eeĽĆú^Od=ćs^Ćwd7bŰúůťÖcö÷±ąnrĽćć<Ą¦ĆfźQÔĂb‡ę…—˛ľŻÁáZđŇłÚúÚźó~"ë1˝đŇłĘůŢň¶÷íóş›–ę@áAłLŤrµ~Ă;XÂýÇ7oŢĽI1ô}?űŮĎ´zőjI­#ű¦hÖ‘Äţ›ßŮ1’§ĂGí¶$Łőś:ú;M ÍćĺPĎéă‹Ujnl éóÚ˝®»?ż›ňéč!őŘ1÷ÇŰnŮŃwÝŰźťóă¬5ć Ęű·ÓÔ©SŽaä2:ÍmŇ01Äu‚{ęś"‡»>'7 ĺ®x]o–Ď­$…{â»@ďbC?€k$—®‘\¸FrŕÉe€kwRýĎŐÚk:ę [47µtx Éĺ~¨ô·e*ým ×°,F?E!čqNąIF.÷łfÍŇąsçTUUEačÉÉÉJNN¶ýÝ7oŢĽIÜ`Y €k,‹ť;wNź~ú©«×DGG+))Iaaa¶Ü>,Iš6mtâÄ IҤI“( €ű3â ʏ-\FPÇŹ׎;:őÚ… :®Çr»űőŻm&kjjôü€‹ił^^Ż—ëŕţ €xGÄ#n ˙ĺg?űŮĎ(89wîś*++;őÚo~ó›ňx<ş"¤O?ýTׯ_°‰v\ń€xGÄ#nOŚ\FȢGQLÜź=¦®ćOş~ĺK*?ĆĎxâČőŔőŹńH<Ä#â· ’ËYLÜźkě¤1AŹą¨K6ąě_Śzŕ›’¤ËţH…ŔőŔőŹG€x@<â¶Cr覊`ü_=`ţ› ëë â‘xGÄ#n7¬ąŚ *++Í5—cľĄˇ-‹ńéźtýÓzIRBB€XsąŁŠ`Ä_ UKĂ }y­Ik&q=p=Ä#@<Źń€xÄíä2‚"ą|k ®€xGâ ʏ]‘\FP$—o˝" B aŔőŹńH<Ä#â·#’ËŠär×TT4 ¸â ‰G€x@<âvCrA‘\€ †×@<Ä#ńŹńH<âvBrA‘\îÚŠ€ †×@<Ä#ńŹńH<âvq'Et®"¸ó®;>$LŢ÷«4hđ˙«÷Ćč˙ą»ăúěăkj¸Ö¬đ!aşó®;őź_˙§$™ďýü€Âîç×ĂŕˇáqďĐއëŕţ ŹÄ#@<Ä#ńţŠä2‚ÇűT’ôź_˙§.ž¸dţűáŚDŤ¸7&čű\ţ˙>Ó™w.:ţţĉJHHĐäÉ“)ô>ěřńă]r=|öqť>(:Ďőôxäţ Źýâ ~Üű3Ščőź7txLsĂŤŹ Ł0ČőĘ1¸?Ä#ńŹńH<˘Żbä2‚iÓ¦iĐ Aş~ýşĎĎ˝^Ż>účŁN˝ç}÷ݰ&utt4ë$ő“'OÖŤ7ÔŇŇÂőôx”Äý Ш‰Gô’Ë€Ë Á_g+ŹÇŁôôt ¶7ŘąîĎG€x@Şm9ąüŤ»C ›TÖ6\öò×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\»“"Đ_—ťUyeŤŞkŻ«úł:IRĘŹ’┞:ŽzÁ+E’¤¨0ĺĚIsőÚŞÚ:íúÝqIŇŁă=JťŕąĄs±ľß3˙m˛FÇĆđá¶ŐŮř),>¦ËĽ®QߌVVú 2„ű[WÜź®+Üę\vNĺ•—UQY#IŠŁřŘč®é[m«•ťöę˝3^IŇËŮ·]˝ţŢ™J۲MOIV’g ú,’ËúE#|gÉqUŐÖŮ60$itlڶ®śO#˝v–W}cłĎ™NaŔµGΚ ŕ™©É®: ů{Jµio©$é䮕·|.Őź]×Ú­íťv’˸ťYŻ÷¨Č0y¬STdXH÷üŁgĽJďéňär}c‹r7îSÁŞů·Eĺűrví p]a@«olѦ˝ĄćőëËkţ_RBś^ÎÎPĆÔqÝŇV;zÚk> é+ÉĺúĆ˝RP¤ôÔqťŠiăőů{JËvíÖ"ĄNđhýgM2oÚSŞGÇ'FŹcY }ş3ăĹőZ»µČL,'&Ä)=5Y/gg(eĽGń#˘%µ>éťńâzŁŕ\ńâz=żúźTßxÂ@§,Îl­\\vÖŐkKŽž3ăšF0pkőeöší˝ze§˝ň<±\…%ÔĂp;)÷^6űd†Ä„8Í›99 OV^YŁďçnÖ˛Ť{DŮTŐÖÉóÄr‡Äpčý1ëëSĆ{”2ŢŁôÔdĄŚ÷hHÄ łžť±0OĺŢ˶çńĐÜŐZşqŻę›čסç1r@ź5ăĹő*o™žš¬Ů¶ ¨ü=‡´lă>IRöšíŠŹŤa$‹dp+2¦ŽÓ˛Ť{őeÓ mÚ{8äđEGΚŤ¬ jťStä¬ŠŽśu-ÖÝŽžöŞľ±…/n#őŤ-š±0ĎĽż§§&+oI¦íăÂâcf›0O©ľlĽŃĺ3YR&xô˛úÎrŐź]żĄş/Ď!źţî¶UĎĚBŞolQţžR˝RP¤úĆ=ýŁ_Ę{`]ŔyďôF.č“–nŘkVófNÖ[y9Ž#Ď™®‚•í —˝Çşn«›Ť^^ˇü=‡”:Ác>1·26˘đßptlŚćÍś¬śĚ4ŰJ}Ć‹ë%Ië8[ń±1ĘݸOEegͧřŁcc´ţ‡łÍŃ—ĺŢËze[±O‚;)!Në—d,×QX|L»~w\ާ K3ÍÍŚ÷ŽŠ Óâ9ižŰ3˙m˛ăĆLK7ěŐůŹZGżł%7ŕg’´ëwÇÍť—Ťc:*·¤„8-ž3]ófNć˘ŕ’<Ł?"ZŐź]WÉŃsÚ°43čńÖŘ7sŠccűŐ‚b=ăí’ë.”X1âŃ?Ę˝—•űú>óçe§˝zµ ČgY™Ś©ăôňóéć ‹ť%ÇőJAűzńQ‘aĘH§őKf;vĘN{µio©ĎňQ‘aJ™ŕQNfËý H‡sşľlĽˇŁgĽ]˛<ĆÎ’ă>SuŐ•F|TvÝ6ŢĘ+ktţŁĹŹů˘ô IDAT±ť]ßآ§s7K’YÚÝŚYIţ›•ťöjWÉqźzŮI§¸ńŹée÷šë]†şA°‘Č˙˛íov´ÉমZ˙Ă٦W Š}Ö1÷ß,-”ú(”şĆZß{­]›ĐZż9ëÓ® µ-N›˛oĘßsČü˙Ľ%™®Ú…9™iÚ´·µO¬Nrş†ťîßNmµPę0cßž`I\cSë†çÎuŠQŹXŹÉݸWQ‘aŽ×0ŐźŐuxĚčŘĺ-™­úĆJ±”ÍŚ×Űž‡]ík˙:3µ­­i÷=uTgZűÁFůď,9°üâčŘĄŚ÷č§Ů鎣ߍ{„SxíÖ"ť˙¨&h{Á®˙ěóˇk\Đç9çS‘‡*u‚DZ3X§ôKźJĆZ ­ÝZ¤ť%ÇőĎ÷× jŁb,ݬ±}ŹŞÚ:}?włň–ĚVĘxŹĎşdć߯¬ŃŚ×ëť-ą>çXýŮu•ťöęćM项«ÍFĐAú˛é†ę[´vk‘ŠŽśUÁĘůŽçöčxçNpEeMŔÓm˙źUŐÖŮ–Mą÷˛íç1>Óó«˙IEe­çĆčÓžÜJÓ˛ŤűĚ™Áb×:3Á©ăŕß`÷żîň÷Ň;[rCľîB‰#şM7Ěźł…St䬎žöĘ{`ťr7î ŘÔ¬ľ±E…%ÇT^yY'wŻ ,“=ĄZjłůŤŃ)3îN‰q `Ő|=4wµľlşˇě5Ű•2Áăúľl$zíâŔ©®´Ć‡]ĽŤŽŤn‹yŻíĂ•Ł–‘qĺ•5¶ťĹⲳ*;íŐA>ő uĎ»,:rVY3§$µ­çĽvk‘ĎFJUµuŞ®­“‚$—Ť ‡Ť:Ű®~:ø.Źžńę•‚bŰöä÷s7ëť-ąŞ®­ Z}°ke@'{ővÇM7­őÍ›ëÔăţ×˝ýVTvVŹŽO0۵ťiSş­ŰŃ˝ŠËÚ7^vJ:Éš9Ůlď•Ů· w•·˝µ{śÚjˇÔaů{Z“Ćďüj™í=»čČYÇ奌sĘ[2[‹çL·­űŚŘ°»ţť¤Ś÷č诊ËÎiÓžRĺĚIë ˝=Ýńľávík§‘äĆ(ő¬™SęjëgÍßs(°ÎlKŚ;Ý#üű›Eegmż§{„µ?|ł­kWĆÁľ?ëçëęuŔŃŠe1ô9FŇ31!®K—Uµuš±0ĎLžľśťˇ«ďţľ:µMWßýsttUmťžţŃ/׫Ě}˝5qörv†NîZ©«ďţ VÎ7wđ}Ą X3ćéćÍ›*X9_Ţëä=°Îgôµu€˙g.ݬQbBśĽÖéóË×W§¶)oÉlł‘đôŹ~Ůeeś·d¶Ď“ěy3'ëť-ą>?3Ę­ľ±EC")oÉlłÜ¬źË¨Č1°YG [×S¶‹GŁŃąŘ¦]î˝l6 ‡D RÁĘů>ם1ýżĽ˛ĆíŘ“˛×lWbBśy^'w­4GZŐ7¶čágÖ¨°äRĆ{ôΖ\]}÷ôΖ\s6EyeMŔ:y…ĹÇĚÄrbBśŢ\żH_ťÚ¦ŻNmÓÉ]+Í%˛×lgÉ8cNYîěňÖNyNfšNîZiÖ•F}ç_W&%Äéť-ą>#ŤúdŢĚÉćő+µ&’uŠë×m7ޱŽÔjÝ8j_‡qSXrLK7ěuüĚŻiHÄ ĺd¦éĺě %&Ä}8f—X桺ڲŤűtóćMĺ-™m¶'Ť6ˇÔ:21{ÍvĹŹÖ›ë™ÇXëŁW Š|ŢsíÖ"3‘—žš,ďufĚx¬3×s5ÚĽv÷ë~(ĆýÁ·úĆ3i7–¤ŃËŮćß·¶ĹË+khSöÖ{q’ÍŚĐŽX‡NÉζëŃÚÇxsý"ĹŹîT»'{Ívźe<Śkô«SŰĚ:Ě˙Z´~Ţď·­/mÄ•µN1ÚqË6îSŃ‘łfÝgŤKŁeýYG¬máĄ÷ĘóÄr-۸×Ő˛Nçaí×YűkvźĎh_–S®ĂC[Łżk­3ăGD+=u\[Ů´ď™dŤq˙żQßآµ~÷'cŁBă{1î}_ťÚf¶ŁË+kŻĄŽúĆý­Ł6:Źä2€>+*˘kF-;ëťŔ Ú§C$VŐÖiŮĆ˝Ž ăĽ%łµbA†’<٦¬ô)ćäúĆÝĽyS'wŻRVúŤŽŤ1;űFcß©Ń-µ>ą>´%×gd€ułÂ®ÜĽ!É3ĘgÄhü€‘ßÖr{3/G‹çL7ËÍř\FC¦čČYÇQ ńŇuní`·$†Ńˇ1H‡¶ä*+}ŠĎu÷V^ŽŮ -;ííń MüĎ+É3JŰV=gv„Şj딞š¬Co´ÎRŠ SęŹŢ\żČ|ëňőŤ-ćgNLÓˇ-ą>‰­$Ď(z#×L”Ůuřkťa\+EGÎşŠŹÂâö)¬/gghĂŇL39`Ôw‡Ú:ŞĆ0ăw©<ŠŃ^wőÉčŘsŮÉţÁSÉQßű…‡Úš°ĘhëŔZă&~D´cÜŮM{Kmgç1]Yô mXš© 2tj÷*ÇŰ$–ѓڶ—Ńž´.Q^YŁřŃ:ą{•2¦Ž3ʱÖGţIciĘxOŔ¦lŁcc´ai¦ůţUµu>É7k;/oÉlm[őśy0â-Ř’Ë6î5ßďť-ąZ± ĂüűF[ÜÚ¦äAjď«oşáÓOč Ł>rÉ*Io®_äÓÇČ:N'wŻ2ŻăPŰ=Öë&'3-`3xkVßŘĐßł¶?Ť¸ň©S¶äšçTXr̬űÚ˙FbBk˙ĘÍL–Ś©ă|+őëôÖëî‰Ď롹«;L6;ť‡µ_×Ńç{+/Ç'ÁěÔŻ»yó¦mÉ5ëĚʢ_htlLŔ~*Ö·ţ #IďßWČßÓ^OׄńúÔ ­}ä” łŤŹĆçóď?lXšiö‚µ Đy$—ô­†ŚĄ!;$rP—Ľ§QŃĄ§&;vłŇ§ŞSblHÄ Ű©HÖ‘TFß_(Ť˛<‡µXłŇ§Ť™ť!l–ÖUßń·‚­őşxÎtóÜśFecŕ°&~ś’ZF")=59ŕz/÷^6ł‹çLwlśŻXa^wFgą'“wvqj]ç}…ÍZńŁccĚs¶ŽŢÜYrĚĽď­˛ 1˛… KŃ‘‚UíťTcĆM(¬ËŐ8mÚ”äe&ÜÄž1Şé˝3•>?·.ÉdŚ*˛.Ťĺ/Fg¸čČYËč¦LǸ±®j}°e5oć”fIYËƨ(Ëč.)ăí—złŽ ]<Ç~?Ł]jMčŐ7¶('3Mé©É¶ł†ěŢßúzc„iüh۶°oƽǩ->oćäÚ”NKw çT÷@ň-=5Ův¦HTdĎĚR»-ţ¬×h(uµżWU[çÓţ´‹«¨Č0Ą§Ž šŕ쬬ô):ą{•ćÍśCĺ•5f˛ŮóÄňNő‹Žś5ëÚ`uć¶UĎYęLű~]ĆÔq¶íóúĆ˝śťˇ”ńźŤľťú ţŚu´]N#ÂýűNK¸Xď}NmtÉe}е˛ű˛ńĆ-żźĎć©Á×o¶&ĆěžÖ&%tüşłOöăGD}ĘmŚđ쩝­ťůŽ652’jç?ú” x€Ë:.čŲÓí›kdŮŚZ¶^ßm{ś;$ă:ż=ŇáwëýÁ)–íîÖó6ßú;f ˙ĺ1„8ĹܸS:¸çÉ'§%,ěő˙úţF]cM¤ů×sĆ˝Äú@Ęç^$nŚÍ$™›Ö:ť[0ţ#–ßĚË!±Ś^©küG'†ZףßĘËqŚ™úĆݡ;lg<JŇ–ŽŠ łm+[묎sĆçö…Ţí“u[»1ČődŤPú?Öőˇť»q Zű{Ö~O°:pĂŇLz#WoĺĺtKÝ˝mŐsúüßňőΖ\3QkUU[§çW˙“˛W»[:ĆÁꮨČ0KťY´üśÚ‡ŢČulWŐÖé˛eöžőçFŰ Xťlť ĺÔöýY?_E÷6ô0`Äw°…ő÷_Ú¬»ÝáßčĚšdN'Uµu®7ŐpËšđzĄ (č5cŠ?Ó‹ µ>yĄ HĹeçTßŘâÓŔßiUb׹­·ÇemâGDkHdX—’°ŢĎhK÷’ËúśôÔqÚ´·ÔśrëfDâÝźWRBśćÍśěłv›[)}¬ĂhM’wö3u–u§a #Łcc”žš¬â˛sÚYrĽ}„˘eŤT§MÜ$¬Łz:&şCbBśĎú°ÁDE âBCH±¸bA†–mÜg.ŹĘýüĺěŚë@7łuRĆ{´łä¸™P6ÇC"™ő|ĘxŹ™\–¤"Ë4çÎ&™şb¤±‘g|lŚ2~¬/›nhÁšíú`×Ę™:t…ěŐŰÍ¤Ź‘HN™ŕi]š-!NIžQ*;í őŮŮYyv VÎiäŁqCďÖ!ĆżÎ,SbÝâVűU%§­í˘y3';bđ×ŰíÇü=‡T\vNŢŇ(ä$Ď(ĺ-ÉTvŰrWn}ŮÔ}3ü—Ź1H)›şéwµ,‹eçňŻwř^<ęlč ĎImEa4LB™ÚTßآW·[:‚i Z» Ć|Ä–i€]9J#ĺ•5AGWěÎ4d:łĽOą ^nŮ«·+{ővǤ`g$}Šąľ^qŮYź]¸ł‚Ś&ńŮĽĄim%GŰG4ş¬c\ÜÁ=˘;¤8ltfwŹ›ńâz-۸·Ă¬ VÍ7ăń•‚bŰŽŮčŘłŢí(ň÷Ň÷s7ë•‚"WËgDE†)˝mŁÎ˘#gÍ·v"­÷€ÂâöŽŞÝgĽ¦ŁMë[,Ó˛»¦^_›ö–2ý‚5©”·$ÓqÄ˝SiÄn°¬Öxł˛ÖÓ;¦ËÖn-RöęíćňZč]óf¶·çr_ßň=ż°řyoLďqLökĎřŚ|q}}kű0Řűú×aÖ÷–€=zÚ«§–mÖôÖ‡Ľˇm0S“}ęÖúu–äo¨Ë1Z?_GmČöÍ;“]}–˘#íĺ,±l׾·¶AŢ ˛rą÷˛í5č¦˙đýÜÍ­ł8ŠŹŕ]Śä2€>É:=üágÖtŘyĚÝŘŢŕ™7s˛Ů‰Š 3ź–ď,9îř>ĺŢËć& Öé{ŇÚ­EŽŤ َcm„ř6ĽŽŻíĚ”ŕ$Ď(ł‘¶ëwÇ“e§˝*,9¦BËÓjŔčqg$«†D ş¶ťőÁҫۊŻÝÂâcć5™âhăzvę—ťööĘ5lÍł`Ťóîßů{JŰFž–vŰÚî¸=ËcőĄÓýÜŮňĘÇNW}c‹^)(nY|Ě1aŕ»FRř˝3^3 eíôZwrĎ}}ź¤ÖQHţŁČ¬ë/ÇuTŻf¤vÝşšÖ„ý‚¶ŃŘ@WU[gîŤŕĎX⪪¶NË6îµ=&wă>Ç{×Ĺeç‚¶Ĺ_)(RaÉ16‰î#˘"ĂĚDa}c‹žÎÝÜaRµ°řąlĂA*X5ßńŘ]ż;n{˙´˛«ě÷řŞÚ:ÇDm}c‹r_ßP‡%yFů´?ťٱC=§PÚĘF]˛lăľž­1Ň^.Ö‡łÖ¨ţłqŤMu%}płvk‘ů}teťimk;m¤g +;íµ-ă»»Őţ1‹ĂS@Ü:’Ëú¤Ś©ă̤K}c‹zfŤ˛Wo÷©Ś„°ç‰ĺćT?»uKg¦™ꌅyŁŚĘN{5cažŮZ‘ťŃ+źyÓŢŇ€{Ń‘łf-1!ÎlÜ·w°“ÍϰiŹďnö›ö”*{ÍvÇš­ŠËÎęčß ß(ÇúĆ=üĚš€'Ýe§˝z:włYn/÷Rąˇo2‰ĺ•5Ú´÷°ŮîqÝUŐÖiĆ‹ë:Ćum4¬C]WĎčŘVŐÖ){µo2¨¸ě¬y-÷´Ń±1ĘÉloPűćÖd^‘ŮHďaJ1:ŹÓ;ůµxN{]™˝&pä “Fěř×µÖ‘CĆŇţI#)\nٹ޿žb•l}M°¸ů~îfź6âĆH¤Ś÷té¦MŁcc|’mN‡ľÂ:íüŐ‚bۤĎ_-Ě ú`Čhsćď)Ő÷s7ë诪jëtôLkÝUdTrŢ’ŮflÎXgۦ4Úâ’hSö±>™qż-ݬŃCϬѲŤ{}îď­›6źŐŚ×›m4Łžčh‰3»vŹőgˇîG±xNšyť/۸ϱkŚ4űýůýÜÍç”˝z»Yw9]ź»JŽŰÖ}N˘"Ă|>_öšíšńâzíl{ă|ŠËÎ*{őv=ôĚÇ:Ř:0Şřč9ź~]TdYg•WÖ|>I>mÍÄ„8WK ř·ě@í,9ĐÖ¶ŢoĎ™nÉ^ł]Ë6î5ď1Ƶl¦őű{ř™5Çî,9n&§‡D 2ű)č:¬ą  ĎÚ¶ę9 ‰ł¬ż|,hĂ51!N‡¶äŚ:66?X¶qŻŮ`1¦ßTvݬ\‡D RŢ’Ě.yÝC")O©ň÷”*u‚ÇçÜâGDkŰĘŔ'˙+˛3tô´W_6ÝĐŇŤ{µtă^ŤŽŤ řLŮ#"ŤŤ×Ę+k4ý…Ö \ĽÖitŰšx+ç›kd~?wł˘"Ă””çsn’ôf^ë[! ůblc\+ˇ4ä2¦ŽSŢ’ŮZ¶qźĘ+käybą’âćł|Lüh˝ą~QČł ĎIÓÎ’cú˛é†y/IJóŮxÄř»=mĂŇLUÖ:b¬ě´Wž'–›÷(ëgNLÓ›y‹¸¸Đ)«ć롹«őeÓ ÇNîˇ-ąćNôk·iíÖ"ĄNđ´.?aŮź“™¬µ&ŻŚdkĘxŹ˝‘k{_đďŚútP Ú˙í4;aĂŇL}ŮÔ˘ť%ÇUtä¬ŠŽś5ĽôDܬXˇ˘˛łŞ¨¬Ń¦˝ĄĘ:Ž?賲ҧ(o©**kTXrLGĎx͵¶é¬ő`umťdষ­zNÓ_\ŻŠĘ3ćüŰá’l7ëJňŚ hSJ hďJ­›ţѦě[6,ÍTRBśŮź0ú+NâGD«`ŐsŢŰÚaž'–ŰÖ5yKf‡ü`0*2Lo®_P‡#z­ď;oćdsFʵý™“™¦M{Kęk˘rŢĚÉ>‰WëaŁ}é_÷u›’̲-;í šD5úvvĺbôm­ńůŐ©mfťUýYťOťi׾NLÓ›ëÝ×™F;»úłëĘßSŞâ˛s÷!Ě26ľë5b´A**klݱy3'«şöşíčgë÷głqŹ˘1ŚÎ˛.Źá$É3J•EżĐĽ™“ÍQĚÖ%câGD«`ĺ|ŰŤ‡FÇƨ`ĺ|źŮ2vë;[cĐn4uęŹůC"Ťóm«žó‰ŁS^ßآřŃz9;#hŰŕVYú˛<ú:»v°Ń¦›7s˛ĽÖůŚ46â5DE†éÔîU*X9ß'v[g ¶µĂ#ścÍżMéßŢMLÓ;˝Ü‡‚~Ţë|ę‰ q*X9_'wŻ ©o·$S/gghHÄ €şćÍő‹flvÄ®+ݬńy߼%ł×ް4So®_P§{mTdX@ÝwôŚ·ËËÖh žÜ˝Ę1F¬çn¶u-ٍýëLkűÚ¨3mÉíÔĂ#ąoť)čŹ9ą{•6,Í4?Ł˙ěĆCîĽ%ł}Öj7®«m«ž3ËvÍ&ŹFŢŔx­Ńomź •¬C[r{m Ůí7oޤस¸X%%%’$Ď#Ł5vŇ Ç_Éx˛\U[g&xÝś›őunËĹ(“`Ż/•[OY\pŇü˙7ŢxxěÖëÚHÁqÔd}c‹*>jMZGҦĽ=ue2ŇÍő`]­;űŤť©SşŞşŐ˛ Ą<»łÎěĚ{/ݰWwÜ|Źëň9Q‘hô1,‹lEE†©°äąf˛ťť%ÇĚß‘8Fw©0—¸ qx;©ţ¬Nů{J•űú>Ç{ڱ!źdżŮ/z#—€­ŚÔds)‚‡źYŁ—ł3­QߌÖĺ?^×Îâăć¦Űé©É!o„ÂoÝĚ->6š‚ą­î1ăT\vNUµuz:włĎIÓ¨o¶~Ç—˙ŘşI ±ĚINfŁ’ű ’ËŔV’g” VÎWöšíŞŞ­Óó«˙Éö¸ôÔdÇÍŇ€ÎĘßSj>Ü0Ě›ÉĆŹ·“¬ô):zĆ«ť%Ç}6Rô7oćdŰM…ŃűH.@/Šíłk6Đ×dĄOQĘŹ^)(RyeŤąűŃ_7jX¤ćM»ßöwŐWµëÝuôüeľvP˙ňę,  ‡ä2€[¶»­óś“‘¤W÷śŇîw˝!uvď‰ŃżţüI‡ĆAf˙ü *>©Ó ˙®Rx ŹńH<ťTňÁ'’¤ż{ţ/ő_ş_»ßő†”ĚŠ©źÎyČń÷Ź>«ţţ]=EĹď_˛ťQ€xngF¸ĺ—®©üRť†„ߥEéI~—Žžż˘ňK×né}ă‡ÖżĽ:KCÂď’$·5@ŹńH<ně<|QőÍ_)ńžĄ<8RنEšł nŐĽicÍ™źÔQŘń 8$—ÜcJSúĂcq·Ňăóó[q·Ů8¨ľÚ@aÄ#@<Ź€k»ßőJ’ži›NźÓ¶yńó[e<ě©oúŠÂG`Ŕ!ą ŕ–SšŚĆAú#÷?ďŠ ˝ˇĺkł# €xGâpŁújŽžżŇ‡ßăóß®M I—?o$â°H.č4cJÓ¨a‘ćŽô鏌1§6pé–ŢßR,µnŢ€xGâpĂŘXsćĂ÷ë“ǬGż+éÖgĽşç¤ŞŰ’Y3Ű’dG` aC?ťfL]ňß±wŢ´űőęžSúeqE§v±ŻľÚ ÷ţP«W÷ś”Ô:µÉN €xGâծíÉ,cö€á™i÷ë˝?Ôšł śF9^ţĽŃŚ9%|b>č™ůđ=J3”G`Ŕ!ą  S¬Sšžů®oçů™ď¶vžË/Őéčů+ć¨-ďýˇVa›ţť!áwé_^ťĹ´&€xGâpĄřýKŞţĽQCÂď x 3oÚXýhŰż›ł śřTިW÷ś úwĄ'ę§s˘ŔâH.čcJӣߎ5§4Ś©MďýˇV»čŘyćŃoÇ*ńžĺd$Ľ?â ‰G #Ĺď·®}~·íhǨđ»őeó×AgŚ0 Áú»”oŹ$âĐH.č\çąmJSGŁ«v˝űˇţîůż´Yőč·cőŻ?’ÂG€x$.Ußô•ů°§ŁŃŽÁfÄ‹d$@<‚ä2׌)MFŘIĹ'uú˛ůkíz÷Cĺd$QpńŹÄ#Đ#ŚDÖ¨a‘Úúß§9·ŕďëň獝žM€x:’Ë:ŃynťŇôĚwď×Öż Ň8ř‡µëÝµą¨śÎ3@<Ä#ńôÝmɬô‡ď š¤ĘÉHŇŹ¶ý{ĐŮGÎţŚ"ŕ†uJ“˙.żţži[«úóFss#Ä#@<Ź@w*żtMĺ—ę$©Ă8ÖŤ7Ť@<Ée®•üđ»”ţȠǦ<8RنEJ’6•SxńŹÄ#Đí6UH’ď‰épsݍ»5óá{Ú^G<Ä#·H.pĄ}JÓŽ7žN—|đ‰ŞŻ6P€ńŹÄ#Đ­J>h[˘fÚý!?ŹŮń ÓXs@ȬSše$†ôšô‡ďŃŹ¶ý»¤ÖQ]ěî ŹńH<ÝĄúj‡Ö)öAăń‘1ú霉’¤/›ż’$=úí‘ú陳 :ĂxĎř[x€x$ľŽä2€%ŤŞ–˘E®^?|pŔk~:çˇ[îD»=€x$â¸ýĹÜ©8ňMĘ#n<Ö™÷G⸱,Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.ŕ˙gďţă¬ď<Ád›+6’ĺ-lu¶c9 E5»- W,×ün†µ $łŞFr {S$ˇăTČíĆkyKČli,WH\·3)ZsµŮ'fÍY°<ň΄Y$2[Y±R°ť:%V¤ÝâyčnuËjYňéőŞJĹýôÓĎóô÷ŃCwżűÓź/@ل˔M¸ \§FÇ˙\±çŮ3±iíÂX|ŮĹ1&p!<őň{Ĺëc®_ź‰Řý_ŢŽŠ _ţŕzČ%\†y¦ďäXôť|÷˘:¦ź/~rÜąÁőčŘŔßü™÷"˝rF€ˇŠŠŠs}@żE‡y`ůňĺÝ1]uEE\uţě·¶¶Ö óęz›pmşq=ž˙ë/÷ÚK,ŻŹĚ'™L&jjj.úăĽĐÁrMMMd20¸/®G§rćÉ›ĘĘʸ űŢŢŢĽĐ÷n\ß>ňnĽ‘3X}}ýű»|ůroó×c®žžžLoß~ÝÂřÔęŠ ×ĺ§?ýé¸âŠ+\ʏgČččh>|8Îś9^q÷Ť‹˘ď˙ďí1`­\ą2n¸á† >N®G·ÚÚÚřć7żyQËÓO?ű÷ďOo˙Ń ‹âďŢŤ<1ţÚ¸xńâŘąsçEYŔsízLŚŚŚÄľ}ű""âž{î‰ĘĘJ' .Âe*++#“É\†ÝÝÝńř㏧·“`yńeăU÷n\dőööF&“‰††'×ă,:xđ`^°|ăę±yÝ‚˘×ĺŹüăرc‡ĘE\Ź3ôḽ˝=/Xľ·aQÔ,Žt‚Ű$`>~üxś˛öďßďu.0á20cžĎ5XžěĂ´€¦˙ˇ9›ÍćË÷6˙©ďd>U» /`>räź"B™ŻŤ…“g–óšh‚Mgëł\ŠţË0»rßż^uEEŢł·^“˙YŃ{R¸°„ËŔŚx>—`ąđĂ´€¦odd$ÚŰŰÓ Ä’`ąŘbSń©Úy=`Ź9˘R J8zôč„×Ć;®›Ţ[ďblfłYíiŕMµĎr)ú/Ăě]›…í0 ?[Ţ•ůŕ‹×ÁÁÁĽ*gŕü.çd6‚ĺĽÓf¶Ü`9"âŽëN;XÎ}#ź0?óĚ3®I(0000ágöçúÚX8Á¦ţçpî×i9}–'{],ěżěş„s»6sâ›7,(úţőę‚jćÇÜŻzŕ.Ó6›ÁrBŔ Ó“Ífó‚ĺĎ×/śŇbSý ť[©ĺš„üĹą=Η-Ž{mĽúŠŠ¸ăşüţçą!605Óéł<Ů{ŐÂţËć%€s{[ŞFˇÍëÄúšüÇçźp–ó,çľi0CyoĘs'›É`9qWfâ5yôčQĎĽV¬Çy9“gNEa˙ó—^zɇi(Ótű,—RŘůČ‘#ާÂ4ŐÄ›PžÜ/I××Ĥí0Š˝&ć¶ÇčěětíÁy$\¦¬0XŽŘ°Ľ"ţ¶,žzů˝Ľ˙őť|ďś÷×wň˝ ŰýŰţ±Ř°<˙C€€&^ź_Q1«Ár"é5™üqtt4öíŰç =óîqaŹó™ ¬J)ś`s˙ţý^ˇ„™ěł<Ůkâ]™…y?Ń×Î.·ƇŤ·ş(WîűŃ“'OćM Ě®E†ŞboŽżV:DţćÍÓ®h}g,:ňnYÇvÍ5×ÄňĺËť(杣GŹNřEÁů–W_Q÷6,Šowź‰_źCßŢŢ;věĘĘJ'9­°ÇůUK#ŢüuÄS/çż>^µ4â+§_×1úÎXü㉱85šżüź|d⮕••‘Édś(¸6f˛Ďňd݉w\·0ľß;ţ>öČ‘#ń±Ź},ś(ń>6÷—w·^łpZ­jĆżÜYß{ţLDŚ·ÇČd2qÍ5×deÂe`Öüě—cQ·ĽbÚŹÎn`` Ż+"âŤ_ŽĹ˙ţÔ™ ë~xŃřĎËů™aˇď=&~rüě×çŕŕ`´··ÇÎť;ť$ćôâÜ`9"âŤÓo-ţĺčżýÍÓîíú÷cńčźÚ—®Ůl6ľńŤořrŢwđŕÁYéł\ʧjÄË'ÇâÇă_2íßż?jkkٶ¶ÖÉ€###yëkĆß«N×'VVÄÇWTÄ?žŻÚŮŮ_˙ú×˝Â,.S¶cÇŽĽ7ćĹtwwÇ©S§ftż555g­öČd2Ş–™—zzzbtttJëţúĚřŻ ¦.÷ť|oJÁrbpp0Ž=Şb„9«Ü«§FŢ‹šĹÓűĐüâ±w 8LCaUäÍś—¶5w\· ~ö˱xă—ci˙eżč|ĚűEÁtÚaş+ł0v?ýÁŻé:;;ăž{î1Ř0‹„ËŔ”MĄââčŃŁ3./_ľ•ËĚŞ‘‘‘Čfłé폯¨Ő 6ë–/OŻý`űąÓŔĚQą Ŕ¬ęěěĚëţŹ'ƢőńwňÖąjiÄÝźş¬ěÉý¦2ąćččhdłŮرc‡“3Hĺ2łŞ§§ç¬ëĽq:â'Çß+{Ű?9>µÉ5_zéĄp2` —UőőőSZoäí±˛·}rdjŹŮ°aCÔÖÖ:0´Ĺ`V}á _(yßÁăńÇź‘ýüŢďý^ÜvŰmΕ˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @Ů[844˝˝˝±yóćłnäđáĂQ__ŐŐŐłr É1UUUE&“ɻݫ«+=†ęęęŘşukTWW—\˙BJŽs*ă p±*.8p ¶mŰ?üp´´´”Ü@WWWlٲ%""ĆĆĆÎů€†††bűöíqß}÷ĺ…±{÷îhnnŽl6›.okk‹Ý»wçmcÍš5ŃÓÓStýóeďŢ˝166­­­é˛ţţţhllś±±¸PжĹčééI˙]Ü–Z·ľľţś¦§§'Ö­[Ź>úč„jăd?…Ë÷îÝÍÍÍń裏Ʈ]»"“É”\˙|زeK´¶¶–|Ş–€K]ŃĘĺ$]łfMô÷÷GWWWZq[jÝR÷—٧§'†††Š†Ż(ą~UUUZťÜÔÔTrýóĄ««+"&ŰMMM*–€9ˇhĺrŇ8ié0YőrŇ›y&*„Ë ŞK…¸RrLkÖ¬™µţÓÚ„Ęĺ$Ť—ŰÚÚ˘««+úűűcíÚµyë MÚ~˘««+{ě±tťęęęhjjŠććć ë>|8{챬“~Ék×®Ťžžžxě±ÇbÍš5ŃŇŇ’®źkEEEěŢ˝;˝żpýÂcîě쌮®®ŠńŠâűč%ëçVBg2™¸ďľűňĆŁżż?:;;ÓçZxL„ô»víš°źdÉ1­]»6š››‹íÉľęë룩©)úűűc÷îÝŃßßÖçÓŐŐťťťéşĄÎ Ŕd&„Ë…}[ZZbďŢ˝ŃÖÖ6abĽÜŢĚ…áňöíŰŁŁŁ#""ŞŞŞ˘şş:^ýő8pŕ@tttġC‡ŇĘŢŽŽŽ4XŽoiqŕŔŘşukz{÷îÝi`ÚÖÖ–VWGڦ]]]±uëÖhii™°~îńnٲ% psźÍfóŽ)""›Í¦®ßŃŃ{öěI«»{zz˘­­-]§żż?ÚÚÚŇ 9ążŞŞ*/\Š-[¶äµ"JŹi×®]yŰMĆ#Ůvooď„ű»şşâµ×^KÇ?ŃÚÚšö¨®ŻŻŹţţţřĹ/~®®®xřá‡]Ŕ”Lh‹‘„śIUnžvvv e#&NP·m۶ččč5kÖġC‡bhh(úűűă…^úúú AěâСC1DŹŤŤĹŘŘXX¶żčęęĘë]üć›oĆŘŘXZ]\¬]Fn°Ľk×®ô1ĄŽ©««+ –÷ěŮ“®?66{öě‰ń=·Zxll,·C‡ĹŘŘXşÍbŢąÁňćÍ›ăµ×^‹ţţţŠG}4ŞŞŞb÷îÝyŐäąŰzě±ÇbĎž=ńđĂO8¶$DÎ}ĚŢ˝{Łľľ>^{íµ´_őˇC‡ŇžŐ…_”2!\Nz('íÖ®]›VV L“ŠŰŞŞŞ8pŕ@^[‡L&“nŁXř™»ßbÇT¸źâ˝Ť‹­ż}űöŠűî»/ÚÚÚŇÇd2™4έ†ÎmaŃÚÚš·ŹÖÖÖ4POÂĺHCôbĎ#9ŢÜĺmmmi°ÜŐŐ•×fŁ©©) ö Ç=9Î$$Îmý‘<¦PĽ755ĺí§±±1}La PJ^¸\އr>Â…Atġl[[[Ń>ĚŤŤŤQUUůaf©ŢÍI…mUUUŃpy*ë÷ôôDWWWTUUMh!‘lcĎž=iŐoÄxĺńkŻ˝V2¬}ýő×',KžC}}ý„ű ďţţţt÷Ňwîq%Ď/©$®ŻŻź;…Ű©ŞŞ*:ĆĄĆĄĄĄ%˛ŮlôööĆ–-[˘±±1ZZZ˘ąą9Ş««‹VŚ”’Wą\އrD¤U­ťťť‘r&áhnĺn±7QXő[,¨N”j—Q¬jşÔú“µÜ(eűöí±lٲ´mD[[[´µµĹŁŹ>šö{.lÉQ*\.ÖÂ#YÖÔÔTňr'ů+ÜV©çRęąVWWGWWW477§Űiii‰uëÖi‡”-/\.Uő1‚VUUĄU»Ą&Í‹Iĺţţţ }‰' ĄKM„WŞ·q±ő“0»T…pˇÖÖÖčč說ŞŘłgOÚ"cll,úűűÓ}–x>‡łSá䊹ËJ…ËĹúM'Ş««#›ÍĆkŻ˝»víŠŞŞŞčďďŹ-[¶€˛ä…Ëg &“6 V5pÄ-*’I#&o'‘´Ë(bëm\lýÜI÷ ĹöíŰc÷îÝihťôBîęęŠÖÖÖhllĚ x“Č…űHă©¶đHö_JR%ž[Ý<Ů9ĘÝWr^úűűc÷îÝyý˛×®]mmmŃßßźŽˇ~Ë@9ŇpąXEqˇ¤/qgggZ \Ř›9˘řdwÉ>’3·_đŮú'ç¶Ţ|2żbë'j± ·ŁŁ#:::âСCQ]]ťD pKő[Nö]¬ĄH±~Ň…­D %~noĺłťŁbí7úűűŁ­­­dOéŽŽŽ’cPJ.— fs­]»6¶nÝ===E+—ÓV Ih™ŠŰoż=†††bëÖ­SŞĆ-Ő_řl˝Ť ×Oö•[˝›<çdYRą[,0ÎÝţí·ßž÷|K­›űĽ‹ÂI`ÜŮŮYt?۶mKŹ+ Š'«Řνż0𯪪ŠpN">X°T%4@1Âĺł…Śą°…ÚE|Đż.Z  IDATöbűöí±eË–Ř˝{wl۶-Ö­[===±yóćtťD2A޶mŰňú˙–:¦rz'Çśô‹ľţúëÓcşţúëchh(~řát[ŐŐŐiËŽ-[¶¤-3nżýöزeK^;Źbz{{cË–-é8• „ÓÉő®żţúضm[Ţ~†††b×®]yă}¶sTęţ$TÎ='»wďŽ-[¶ÄŢ˝{ŁŞŞJ[  ,‹’”Şú-ÔŘŘkÖ¬‰×_˝äćŠÖÖÖčęęĘkŐ°gĎžĽv‰ŽŽŽhiiIŰ>ěŮł'"Їȓő6.:WWWGOOO´´´ÄáÇóßŽŽŽ ëgłŮhkk‹˝{÷¦Ál}}}<účŁŃÔÔ_üâŃÓÓ“Cň|>]]]i{ŚÉúIgłŮČd2ŃŃŃ‘¸oŢĽ9ÚÚÚJVl—:GĹÚoDŚWI'}–sĎIUUU477G[[Ű”';(.OĹdäEڇ›I€›Ŕ–ŇÔÔT´çď /Ľ0aŮÚµkÓJ穬źű¸ä9vuuM˘'˝;::бc­®®.:†mmm“V·¶¶FkkkÚ>c˛±:Ű9zíµ×&='Éă“ó'P¦kŃlďŕběĺ{¶ęě qüŐŐŐçm_Beŕ\-0”K¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔m‘!¦jdd$']gtttĆ÷;::/˝ôҤë¬^˝:*++ť$€óD¸ LÉČČHüëýŻg%<>›ÁÁÁhooźtťĺË—Ç7ľń ' ŕ<Ń’ŃŃѲ嫯¨öţj*ËűĎÓÉ“'ť$€óHĺ20%Ë—/ŹćććčěěĚ[~Ő±xŃŘ„ő˙yíÂX|Ů9„Ë‹#nżnaĽxěÝ ÷Ťž©7~™żĎććf' ŕ<.SÖĐĐ‘0ŹŤĹÝ7.:§ ą”ÍëÄćuůĚŁďŚĹ·»Ďä-knnNŹ €óC[  , yUÂośŽřv÷™}glÖ÷ťËośţ`™`ŕÂ.e»ł`ŕâ"\¦ĺ|Ě‚e€‹Źp¶ó0 –.NÂeŕśĚfŔ,X¸x —s6ł`ŕâ&\fÄLĚ‚e€‹źp130 –. Âe`FťKŔ,X¸t—7ť€Y° pi.ł˘ś€Y° pé.łf*ł`ŕŇ´ČŔĚ8ú_űăčí7’¸łł3">ďm˙ĎŹ`ŕŇ$\f]©€9ůwB° péĐÎA&“‰Ĺ‹—ý¸Ĺ‹G&“™WcU¬E†`ŕŇĄrÎAmmmttt)*¬`N–™ëFFFâäÉ“Q[[k03„ËŔyU0 –™ë˘˝˝=FGGăž{î™wżZ`î.ç]CCC,_ľ<""®ąćÂśŐÓÓŮl6FGG#"˘»»[¸ Ŕś!\.ˇ2s]ww÷„0###€9Ă„~0ĂŠË0ר\€”ÍfăČ‘#éí/ŠřőăŔÜŁrfHa°|ŐqWĆ÷¸ĚM>ńŔ9‰ÎÎÎčééI—ݸzAÜqÝ‚řŮ/Ç s’pÎÁČČH´··Çŕŕ`şěĆŐ â®ĚÂ÷o —›„ËP đgíŔܰ|ůň¸çž{˘¶¶vƶY,XţôÚŠ¸ăă 8sžp –an:yňdôôôĚX¸<00űöí‹“'O¦Ë>_ż0>U[z:ÁÁÁhoow2„ËP¦hooŹŃŃŃtŮŮ‚ĺŃŃŃx饗 s‚p&‘ů‡˙fŕwlŐŞ8qŐŞŰŢŃŁGcßľ}i°üáEw߸0ę––k*DÄ»Íx||E…? f„p¦¨»»;:;;ÓŰ^qoâ¸úŠŇmÍâű›—Ĺ©‘÷.ŠçP*€r —` ĺe‹#îľqň`9Qł8˘f±P€ąE¸ g±˙ţxúé§ÓŰW]Q÷n\‹/Ób€ůK¸ “ČfłqäČ‘ô¶`Ć — ‘‘‘Řż^°|ăęqÇu ËÂe`dd$ÚŰŰcpp0]văęqWfˇÁ€÷ — ‡`™sŃÓÓŰ·o/y&“‰ęęęČd2±uëÖÝg}}}ttt¤Ë·lŮ?üp¬]»6"ĆŰĽtvvFsss´´´8aŔ9.@ާź~:/Xľýş…±yÝĂ”tuuEWWפ÷'šššâŃG=ç}8p şşşbÍš5鲞žžt_I°1.>|8š››ť,ŕś — GmmmŢí'ŽľuË+âę+ôYćězzz""bëÖ­ŃÚÚZtťÄŢ˝{ăŔ‘ÍfĎą‚8 ‘3™Ě„ăŘĽysŢşŐŐŐ±yóćĽu¦K¸ 92™L477GgggDDüúLÄ·»ĎÄ˝ ‹ĚśŐáÇ#"˘±±1‹®ÓŘŘCCCŃŮŮ9#ároooú·›HÂĺÂůŔN0cüÎ 444Ä׿ţőXĽxqD|0˙äřÁˇ¤ˇˇˇčďďŹ8kepr Óßßź†Ő“­344‘f'ár©€űlr·;•çťě_„ËPDmmměر#/`ţŢógâďŢ38•°ž-ÔM‚ŰúúúĽĺýýý±m۶X¶lY¬[·.Ł˘˘"¶mŰV4ěMöY¸ť$”Î ą[[[Ł˘˘"ÚÚÚň_QQ‘ďîÝ»cÝşu±nÝşôJÇ믿>–-[–ţ2™aKKKTTT¨”€9N[ (! łŮl:Éß÷{ߍOŐú~–|Iďă ·¤íJa+‹-[¶ÄĐĐPÔ××Ç}÷Ýă“đełŮčęęŠ^x!Ş««ó‘f'ÇQUU•7™_±VąË¶lŮ/ĽđBÚ&›ÍFÜ~űíńÚkŻĺ˙îݻӺąą9Ş««Ł««+:::bll¬h«`î.Ŕ$’€ą˝˝=/`~ůä{qWĆË(J+ЎˇˇŘľ}{Ú>#™ôŻżż? –wíÚ•W]ÜÚÚMMMqřđáhmmŤl6›Ţ7Ůd~…Án±jćdÝ˝{÷F}}}ôôô¤tkkk,[¶,úűűŁżż?]~ŕŔhkk‹ŞŞŞčęęĘŰ^KKKěÝ»7"&†ŰŔÜăS1śEeeĺ„€ůÇcqFŔL* o{{{c÷îÝîďď®®4X~řá‡Óđµ­­-†††bëÖ­yÁrDDuuu´µµĹ–-[â±Ç+şĎbq±Ě…oňř5kÖDWWW^UtîżCCC±m۶™ ěŽŽŽ´*{şýž€K‡OÄ0IŔĽ˙ţ8räHD|0ßqÝÂX|YEÉÇ~ďy“ÎuąŕuuuĄĹŬYł&:::˘©©)}lČvtt}LÔćö]Îí…śň&-)еĘ( |“m´µµM““<"Ň@:›Í¦m;Š…ÇŐŐŐ±yóć8|ř°–0—`Š*++ŁĄĄ%""/`ţŮ/ÎÄ˝ ‹ŠĚ}'ß›“ÁreeĄ?ąAď®]»Š®łvíÚČd2B×Ü^Íĺ´‘HöąyóćtŮĐĐĐY{+î·ŞŞ*ý».v\ąŰO&č+¶~!•Ë0÷ — L---±xńâxć™g""âŤÓßî.0ĎE ţrä˝…m-Î&©ž,ŚÍ­".Üg±ök֬ɫD.VÍ|¶ŃĹé×_}²B&ó€ůC¸ Óđűż˙űQ[[›¶3xătÄżűŰ3ńG7.Š«Ż(0oذ!ľň•ŻĽ9¨Tۉ©HBÜb=ŽIĹp}}ý„Ç«F. v§ZÍ|¶Ç ą ŹshhhB¸ ĚM LOCCC477§·OŤŽW0˙ě—ú+Ď7Ĺ&Ö›Şä1ąý” íÝ»7"ňŰQL6™_±Ŕy*ŐĚĹžSîýkÖ¬™ôX“ăTµ ópÎACCC|ůË_ŽĹ‹GDÄŻĎŚĚ}'ß38óD©‰ő¦*éłüŘcŹ˝ż­­-úűűcÍš5i¸śŰ/9·Oód“ůĺ[©ŢĚ…Ď©pűÉv“9W6›=§ nŕŇ#\€stÍ5×ÄŽ;ň懎Ľ? `žJ±SŐÔÔUUUŃßß۶mK«‚‡††b÷îݱ{÷îčččH+Ź‹ĂCCCEű7—Ó›ąđţÂŕ9é'ÝŐŐ·ß~{>|8>۶m‹m۶Ąë©\€ůA¸ 3 ¶¶6věŘ555é˛ď÷ľ'`žóÎÖ»řlŞ««Ł««+ŞŞŞ"›ÍƲeËâú믏eË–E[[[TUUšC‡˘©©iÂ>‹U(çöeŽ(>Á^©ŢĚ“m?bĽĘúá‡ŽŞŞŞ8pŕ@466Fccc<ú裱k×®t=•Ë0?ĐfHmmměÜą3ÚŰŰcpp0""~<¨˙ň\—Édb×®]ç¨f2™čďďŹl6›NŢ·uëÖhjjЦ¦¦ ŐĹŤŤŤ±víÚĽĚŐŐŐ±k×® ŐÓI_đÜă;Ű1'÷çÚ‰–––hllL'ď[»vm455ťtŰ„Ë0*++cÇŽy3s[nŔ{.Ş««Łµµ5Z[[§µĎ¤Š¸PŇĘ"WZ—łýţţţxýő×ŁŞŞ*2™Ě„ăLÂeUË0h‹3, Up2—dłŮhll,~÷÷÷§N%ćá2Ě‚ĘĘĘřÂľ7nL—ŐÖÖ.YIEňáÇcűöíéd~Ű·oŹëŻż>""î»ďľiMj\š´Ĺ€YÔŇŇűŘÇbtt4®ĽňʉĘĘJĂ%§±±1~řáضm[tttDGGGŢý÷Ýwß„eŔÜ&\€Y–ÉdŇĚűŘÇbÇŽ…KR2™_WWWô÷÷GÄxŻč¦¦&Ë0 —`ŤŚŚäMî÷ŇK/.ik×®ť±I €K›pfÉŔŔ@´··Çčč¨Á`Î1ˇĚÁ2sťĘeaÝÝݱ˙ţ4Xţđ˘_ź1.Ě-*—`uwwGggg^°|oďr{|Ú€ňřăŹÇÁÓŰW]Qw߸(jćá2Ě€l6GŽIo_uEEÜ»qa,ľĚŘ07 —ŕËëk"îľqa,ľ¬˘čúŹ?ţ¸AyâÔ©S€9K¸ Ó422ííí188.»qő‚¸+łpŇÇĺ¶Î€K• ý`Ę –Żş˘Â Áťťť10˙»ż=t㢸úŠ ŔĽ$\†KD励"“™Ňş+2u·ŢźlnŽĂ;wĆ©ľľY?ŽÉŽ/Y~Ľ§×‰Ľ\{çgâÍÍń·;˙­p9GCCCDD0źŻ`ľ·AŔ Ŕü$\†KĐ+O>ĂÇO˝oÉĘQ»iS|hÉ’X˛jeüÖž?ŹÇ>WĽuú´ă¬Vf2qĂ—ľd JhhhĘĘĘČfł1::ż>30ß•YźX)``~.Ă%čŐ'źŠă==%ďż|éwcă×îŹÚ›nŠË—.ŤO47Çó=tÁŽ÷˙jÜâ¤1gd2™Ř±cG´··§ó÷ž?źŻ_źŞ-=•Á©Ńź/~}Ćĺ{sÔpń.ĂôÖéÓqäÁoEíÁ˙µ7ÝtAĂekjkkcÇŽńÝď~7Nť:ßď}7"˘dŔü—˙NĽáĚ!ÂeŁŢ:}:NôöĆŠúúX˛jeÉőjęę˘vÓMéí·‡‡cŕąîíµ»˘ľ>""~őóźçm7Y>ôę«ńÖéÓqůŇĄ±ú¦›bÉĘeKá㆏źÁçž‹·NźŽ%«VĹG®Ľ2ŢůŐŻ¦Ý{zE}}¬Ľ>żźtî>Îvl˝ůwâCK–Lx\M]]\ö‘ŹL—\KV­?‡ď?·gź+ů\._ş4Ş?úŃ8ŃŰ[ôźzą/ž{®čă–Ő­O—-«[cccEÇ®Řß͉žŢíď}±«­­Ťť;wF{{{ FÄxŔüňÉ÷â®ĚÄ—WÁ20S*++ á2ĚS5uuqĂ—ľXtľľôĄč{âÉřoßýîŚôjţť˝Ń›íŚłŮ Ë˙¦u{,Yµ*ţéî‰Ë—.ťp,/fłŃ›í,şíş[o-ú¸·NźŽ˙öÝ}QąbEÔ·4ljŢŢřŃ}­ewí¦MqĂżX2śëôé8úČ#%Źí†/~1>zËÍEŹíČ·ţ,®ýěť±˘ľ~¸Dڇ˝źřĂ?Śk?{ç„í~˛Ą%NôôÄó}gB»lýúříŽ=±˙¶ď˙jÔnÚ4aĂÇŽçMöű¸Ü±ŹĽ±»|éŇŘüŔź”śĽ±ÔqÍU•••i‹Ś$`ţńŕXDś)0'~ď÷~Ď„€iYľ|yÔÖÖ. ÂeĂ’Ęŕ·‡‡ó–×ÔŐĹoíůó4ôxîą8őňxřżlÚËęÖGÝ­·D͆şř/Űż<ë“®»ů樻ő–2‡ŹK+v#ĆĂÔ_ťřyô=ńDŢă>ŮŇő-Íés<Ńۧ^î‹•×gbE}}lĽ˙«Ó9k7mŠĆ?} ÝöŔłĎ¦“(&ctůŇĄ%Źmăý÷§Ďiřřń8ŃÓo˙*Vf2±¬n}4ţ铎ëoíů󨩫K?đěsńöđpÔl¨‹Ú›nŠ™LüÖž?Ź˙÷űă’UĎÉ6†ŹŹ7_y%Ţ>}:Vd2±dĺĘ “=&ă÷ˇŹ,I«—ßě{%ŢţŐpú·qůŇĄyÇ•;±d2ćÉqͧI$+++cçÎť‘ÍfăČ‘#10ż9z&ţ膅±ř˛‰ýÝvŰmţŔ%O¸ sTRu1đěłéż“€đňĄKăíáá8üőťy“ľÍƵź˝3nřâ٦®.ţŮżÝ~kVʵîÖ[âÍľW˘kçÎĽ ´¦®.~÷/˙"""®ůĚynM]],żŮ÷JüÍöíiůbvĽ˘ůź}ń iZ®Ť÷µč¶“1Z™É¤•ľ˝ĺćĽc[™É¤ÁňŔsĎĹ‘ż•÷řd| +šsĎ]rÜ?ýá#úe×ÔŐĹowěI«˙óÝ˙ŞčvjęęŠVE7|íţXË-qůŇĄQ»iSô=ńDśęë‹Ýך÷Ľž衼żŤÚM›ŇăúĎw˙«Ľŕ>óŤ÷uĽČ-7ÇOđĂyu͵´´DD¤sßɱřv÷™¸·aQŃ€.u \z–Ő­Ż-ňżő·ÜżÓ±'®˝ó31^uűbç_ĄŹÍmÓđßůn^xřé~öä]Ë-±dŐŞYN…ÁrDÄ©ľľxĺÉ'#"&„Äż‘óü Ăßľ'ž;;§u,µ›6ĄcԛͭŔ=ŢÓ“ö4Nz§Çö~+‹áăÇ'ËÉř–jĄqůŇĄéą;ŃŰ[t"ĆS}}ńßůn:.+Kµ¨číť,'ç=qeý'§<.•+Ćű>żuútŃŠđľ'žçž‹˝˝ńÎđŻćĺµŮŇŇźűÜçŇŰośŽřv÷™řŮ/Çü‡ €9Gĺ2\‚r«’'óöđpüĂwľ›Ú&ýw‡ŹźĐĘ!×ó}'joşéýÇÜ4«U¨I+Śb’¶ …VżlŻ<ůTÉö ?ýÁă“ÍÍédzS5đěł±˙¶ËÖŻ/ľ'ŽżĐ+ęëó*/_ş4·ÉŽíč#ʤ•׹rű#żřp¶äľűžx"­®^˝iSŃă|őɧŠ>ö­Ó§cřřń÷ŰcLý‹w~5ś>ÇkďüLüô‡ŹLX§ëß|}Ţ_źżů›ż‹/ŽÎ÷żÜHfk„Ë0%=~_ěü« ˇmŇÇřÄ$ˇiDÄđ±cńöđp|hÉ’X‘ÉĚj¸\ŘúlVf2i ;ôĘ+“®űć+Ż¤Ďąoť>]4°]Q_KV­Šeëם(oŮúőéż>ÉżuútĽŮ÷JÚß8‘TGD\ö‘ŹLzě§úú˘¦®.j6Ô•<‡ĄüęĉX˛reYc2đěsiXĂ—ľźhnŽź÷öĆŔłĎMúÁ|ÔĐĐű÷ďŹŃŃŃřµl€9H¸ — çżóťxóĺâŐ˝ůĘ+SšH­TEpá¶VÔ×—]ů[®S/÷Mű±łh.Yµ*~ăŽ;˘fC]¬(Ńz˘ĐeeŚŐŰżšŞŻĽţý4ţ:Ąí|äĘçĺďnřرč~đ[ŃđµűăCK–¤=›“ýT__Ľúä“ńęS?š7“ůM¦ˇˇ!jkkŁ˝˝=FGGý‡ €9G¸ — 7_î›´]ç.™śnÂŘ÷˝Ă'ŽÇŕłĎĹ•őźŚő·Ü’w©Iú¦#éé|1xöŮ8đ˝Q»iS¬ŢtSÚ$bĽ˙sÍűÍ˙eű—‹öežojkkcÇŽfć$á2ĚSS©FN&Ş+·mĹů´dŐŞÚWf2i°<|üxüŹG)čWŻ_?á±ĺTR_ö‘Ź}|Ň ăG÷µ^”cţÖéÓŃ÷ÄiĎîÚM›bE&µ›nŠ%+WĆĺK—ĆćG˙ŕ\h10ăßöööŚŐ«W愆ć—7űĆ{ŻČLއxÉŞUiîą´­ ą!ď•őźśtÝ$ /Ço|öÎô߇żľ3~ú­/Öëř͜Ы‹ôdN\ľtiÔÔMţřb÷çíż®nF+Ą§bÉŞU&xöŮxţˇ‡âŃů1đÜsﯷ2VN±•Č|PYY;vě/ů˱sçNŔś \†y& Ikęę& /?zóÍéżź}ö˘{Ż<ůdDD¬ľé¦’뵟˝sZákRŐ}˘··dk‡Ë—.-\żuútÚ΢v’c»ć3ź)şüÄ „Ř-hą‘kÉŞUń»ůńą˙)ľv˙yóŰ˙ď˙·˙ÇďÇ _úbéóňÄ“.˛*++ăĺ—_Žööö0 \ň„Ë0ĎüŹGI˙˝ńk÷ ?kęęâ7>sGDŚW:_Ś˝s˙ÇÇźÇĺK—Ćoíůó Ő´u·ŢźřĂ?śÖ¶“6 +ę닎ĎĺK—ĆĆn˙ěb IDATűżZ28~ńálDŚWďn~ŕO&¬wíťź‰ú–梏=Ő×—†Ó×Ţů™¨»őÖ˘űßüŔźL‹™V89á‰÷ż¨˝é¦’UÉëoůŕK‰Ü*ěůndd$xŕ8xđ`ĽôŇKqđŕAŔ%OĎeg†Ź‹çżóť¸á‹_ŚšşşŘúý˙?éěL[_ÔnÚ׾ßâíááč~đÁ‹ňyśęë‹#ßúłŘx˙W٦®.ţ×˙ĆĐ+ŻÄ©—ű˘fC]¬Čdâíáá>~<–¬\YÖ¶ź}.ť¨î·öüyĽřp6Ţ|őŐřČ•WĆ’U«â“ÍͱdŐĘx{x8­rľ|éŇxëôéŻ˙é‰kďüL¬Čdbë÷˙CüĽ·7Ţ:=5Ć+Ćs[¨űÁoĹďţĹżŹ-Yď˙j¬ľ©!ž}.†ŹËŰÄx÷L†˙ąýµ7Ţ˙ŐXËÍ1|üD<˙ĐCńbç_Eí¦Mńˇ%KâÓüIĽúäS1đ~Uű‡–,‰k?{g¬x?tţéIÇcľ;zôhěŰ·/oBż‘‘Ŕ%O¸ óĐOđĂxgřWńĎľř…¸|éҸáK_š°Î›}ŻD÷^”UˉdBąäy¬ČdŇpsřřń8üőťqĂ˝_*;\î{≸˛ţ“±ţ–[˘¦®.˙Ź?ť8†?|$^}ňÉřÝżü‹¸˛ľ> Z#"žčˇx{x8®˝ó3qůŇĄQ›Ó9Űä±…†Ź‹żiÝ›˙ôX˛reÔnÚ”÷řÜcxţˇ‡ftL“Ęé¤j»vÓ¦xëôéxţˇ‡ŇăúíŽ=qůŇĄqígďLżíăşT=ţř㪔ł„Ëp‰řyOOôfÇ˙=|âÄ9oŻď‰'bŕŮg㣷ܜ\ľ=<Ż<ńd^PZę8¦˛<"˘7Ű™®3•ĺSÝnî󸲾>–˝ßCúÍľľôř?rĺŠń1;v¬¬ńé~đ[ńóŢcő¦›ň*ŚŹżĐŻ>őTş˝ä9ób6Gy$j7mŠĘ+&ŰdNőőĹŁ˙ň˘îÖ[ăĘúOćµý8őr_ĽZ˘byřĉô&ű;yőɧâř =1RdťĂ_ß˝ĺćX‘Éć–,É›ĐńT__řĎçÝ?•ăšoFFFbßľ}ńŇK/ůŹs–p.Ç{zŇÉřfĘ[§OÇOđĂřé~xÎÇ1Ůń˝Í–µ|*Ű­©«KCĚ·NźŽgź-Ú&í#†Ź—Č÷=ńDZ]J±ç°˘ľ>í›üÖéÓE·‘ۇůť_ źÓ1ä>vlJă:Ů6Ďöw1ťż›ů¤XŚő5Ť]ß{ţŚ`Î.—¤îýR¬¨ŻŹçž‹®óő˘ëä·˘8Ő´7Ţ{o,«[Ż<ůdt?ř­łۉzśĐ9˘XŚ›7,[ŻY}'ß3@Ě) p)J' Ľé¦˘ýkęębăý_ŤńţËSiE1cÇÖ÷rDD¬ľé¦Xů~č\+3™ř§_¸'"Ćű/k#qé‰öööĽ`ůĂ‹"ľ´qaÜzÍBŔś¤r¸$ý¤ł3Ößrs|hÉ’hüÓâT__8×l¨‹š÷ű/ż=<GJTĎ–;˙*j7mŠË—.ŤßîŘ3é±u?ř “y‰+ŐăîĹâË*Š>fttT?f.yÂeŕ’ôÖéÓń7­ŰÓö5u„¶‰˝˝ńü·:ď•ÁĂÇŽĹß´nʆŻ}-–Ő­/ylÝ~«ě‰ą¸LÖc2ŃŢŢn¸¤ —KÖ©ľľřŃ}­±dŐŞX™ÉDĺŠ1râDďéą Áí©ľľxü¦®.V\ź‰Ë>˛$"Ć{?żůĘ+BĺKÜČČHěŰ·/ŻúřĂ‹"îľqaÔ-/ŢqŞT3pá-[l `:„ËŔ%ořرč»HĂÚS}}z*Ď1Óiqőqűu ăĹcďD¸,ľ¬"ţy­9`:„Ë0EÓm‘ŘĽnAl^g.]ćá2śĹtÚ`Ŕ\'\€IL· ĚuÂe(á\Ű`Ŕ\&\€Ú`ŔŮ — ÇÉ“'ăĐÎB¸ 9ş»»ó‚ĺ«–FÜŰp™€~Ű 92™L,^Ľ8˝ýĆéď=&Fß38C¸ đ˙·w÷ŃQÖ÷Ţď?BfHH¨™Ü;Ă ÔLRşĎ!D U1±ő„Ą]w3¸V%({c·˘ µ•nZ» ĹLö^[ń),Ľ!BŮ ať˝) ʉI4î&؉BA-çŹáşÉĚ$™$§÷k­®5™™ëş~óť‘&ź|óý~ěv»VŻ^­¤¤$óľ?7^Ňż~đŤţ÷WĚ—h'>>^«WŻÖřCóľ3çĄýŕřüď.Ö˘E‹ôłźý,`LĆ»Ë D¸ @‡śN'c2p€N0&€`×SşfѢEJNN–ŰíÖůóç%ůĆdÔśţ»L®¨†uxüźü»>jü»ÎM- Ú.ňWL€ˇŤp€c2^}őUť>žÂ,Âe®˛ŞŞ*™]ËçĎźWSSá2`@#\ŕ*zď˝÷´sçN t—¸ ęëëĺv»uňäIŠ”—čeˇş•'ĹI翦ż|u‰ÂezI¨nĺď\/e'×̉×icĹ× 0h.Đ öíۧť;węüůóć}“⤟¤Ţ ¸(ę|—čÓ§OËívëÓO? ¸˙žÉ×);yxŘă:¤ęęj €!Żý÷ŇÂeş)T·ň˙=L?q×?ŚÖ᱇˘€€Ťp€µµµéwżű]ÄÝĘ·%Wí™o) FRĚu@—€Çă‘ŰíîV·ň̉×).jN¶üťBíÜ–x]§ßSč_—č‚¶¶6ÉăńÜÇ„aĘN®¨şöMđ­¶aşŐ6ś‚<Âe:Ş[yL”ôçp9âůł=ŔĐD¸ @mmm*..Ú|/Ňne#ÂeB¨ŞŞRQQ‘Nź>mŢ÷ťëĄ‡@·2á2AŠ‹‹µoßľ€űnIđmÚG·2>„ËřŮ·o_@°üťëĄź8Ż×­6Beüńw˝tŕoßHń^˘´Cç2ĐšďM¦ŔwqÄČžçťwJ’vîÜ©óçĎK’vW}«cůV?I˝^˙0šf$ÂeřikkÓÉ“'îóßČ*R§OźÖ§ź~p_RR’˘ŁŁLMZ­V>ŔtçťwĘétް°PŐŐŐ’¤żxĄŤß(;y¸fNä \†$©ŞŞJżűÝďĚ.˝ŢpčĐ!:t(čţgź}Vv»˝ßÖ"..NgÎśáC B‘üŰŻ•+WjçÎťzď˝÷$ůFdĽűń·úsăßő ózĹEQSŔĐE¸ Iҧź~Úi°<â;#:=OWžăńxúu¸ĽzőjŐ××óˇ™čččnýŰsß}÷ÉétĘív›ÝQsú’~}ŕkşCá2$ůţÜăńŚĹ¸aäő=v”$iTL”'Ťíô<‰“ĆŞĺËŻt®ĹTŐtN__řĆ|<))ÉśgÚ_EGG+99™“Ýn×ęŐ«Ăv1/™6\Q70‹0´ »téŇ%ĘÉ7sů7żůM@Ŕś6űfŤ›šŘ­óťř¸AGö|b~ť””¤§žzj@Í\€öęëëş%é;×K?q^Ż[mĚçżľ¤˙:yIű†:˝ĄşéďŞ9í‹·~üăëľűîŁ(Ŕ5B¸Ś˝0,ě˙VîÜąS˙ůź˙p˙­¶az0%|óĎ˙®w?ţ–W á2pm1(˘ŁŁőÔSO)))ÉĽďČžOtâă†.ź`ŔPř·rѢEzňÉ'gŢ˙çĆKZ»ďýą1ôďm?j X®¦řřxŠ\Ct.#¤îv0,Š˙^†ębţAŇ0Í›ŘĹĽ±âkŐžńÝž>}:ßř˝Čn·ËétRŕ"\FX‘Ěˆ˛ŞŞ*ýîwżÓůóçÍű⢤ťĂĺ÷ýˇ¸üä“O˛y(`@c,ŠdDÁ2€ˇ.99Yżüĺ/•’’bŢw漴éĐ·z÷ăouţk~— \č\F§:ë`&X€@ŹGn·;¨‹YňΝˀŹÎetŞŁf‚ećt:Cv1ź9OmťËč˛PĚţ– XEE…Š‹‹ş%:—ťËč˛PĚ‚e-==]«WŻÖäÉ“)`P!\FDBĚËбřřx­\ąR .TTT”’’’d·Ű) `@c,şĄ­­Mn·[’ärą–€!†p1Ćb"F¸ á2 b×S‚áôéÓÚąs§Nź>M1ô‰řřxÝwß}ŠŹŹ§€py đx<:tč…Чěv»îĽóN ‹1P´µµQülú :— ¸Ó§×Äx ×Ć™±ń:Ă( ĐáňtĂ…‹˛´¶Rׄ×jĄ c1ŁsŔ óíđá:%I:7š®Űž:gąRĂęęj˝÷Ţ{e€›>}şâuč!ÂeÂĹ‘#Ő3Z­V«Zbc)ČURUUĄŞŞ* 1ŢǧžzŠBz„pŔ€öíđájLLÔ_n¤@}úé§Đc„ˬS˙#Q§ľű]ýýúŕm˘ÇŘ4ü†‘ŠIśHˇ€ËN+Ł€^C¸ `ŔůvřpťLJŇٱ3c­7Ž×Ť“Óc›¨‘Fcí.zá2€ĺŰáĂUý˝Éú[t´yßQ1rdĚSŚŤ.e€k…pŔ€*X¶Můż4ń˙Ľ—â\c„ËŚĆË“fĚŐŤŽT Đ®Ł‚“IIj‰˝2G™` o.č÷ÎGE©)áFó뤔L‚e€>F¸  ßűßö$óöűŮť?¤(}Śp@żÖj±¨Őj5żžřŹlŢĐ.č׾ô‡ńÝINŤ´ÄR€~€p@żÖj±·‡Đ.č·Zbcő÷믗$ŤC×2@?B¸  ßj‹Š2oÇŮo¦ ýá2€~ë|ô•p9&ń& ĐŹ.č·ľ>ÜĽ=|ÄH ĐŹ\O 0Ô××kßľ};v¬~üăS ‡—ô[ç¬VóvŚm"ôČľ}űtčĐ!IRSS“\.Ez€±šššĚۇ’Űí¦(@.`H"`z†pC3Đ}„Ër˘ÇŘĚŰĚ@÷.`Č™đŹŮúî$§ů539Âe IŽŚyĚ@.`Č"`şŹpC3Đ=„ËňČ."`"u=%€ľs˘ú¸ÚZż űřX›]cíWĺš‘ś»©ˇ^MŤőжŚÖ¸É·÷W­$MIMż¦ëió¶čDÍÇA뀞rdĚ“$ýµÖ#É0K’Ëĺ˘8@;„ËЇ6ţ|±šę;}Ţě…ŹhÎCO)ÚÓăkľ^𜪎VhÉ3ĘH\ÔĄcöoŃŢâ­šóĐJ3Ě=Q}\ë–ÍS”e´^}˙ÓkĽž­ÚńÚzÍČ^¤‡ź-ŕ W0]C¸ }¤ÍŰbËvÇÔÁq›·Eő5kOń]Ň%=¸üĹ_·ŞÝĆ'Ş?–$Ť›<5Ä}·ôĘzüĎÝůzŽG| D‚€čá2ô#ś•¤Šö…}Ţží[ôĆ+ĎiońVÍ^řHŹĆdˇl”etDçą_ ’§¤¦kŐĆwzm=‘„Ô9KVjöÂG—\UĚ@ÇŘĐú1Ż8ą“âŮ‹1owe„FGşÓm\yÄ·ÎöôŘD»¦¤Ą÷0\î^÷ó¸É·hJZzŻŚ €Ž°ÉťËĐG*»1˘­µĹĽ˝ăµő’¤»ď_˛¶y[´÷Í­’¤9­ĽrÍ#WFb´y[´ăµßčDÍqŤsܢäÔéJ»#;čšWFPŔĆőýĎo\űŕîbU­P[k‹ĆÚěJNMWĆ˝‹ÂžŰŃQy¤BwoWSc˝Ć9nŃÝ —…×'ŞŹëč‡ĄŠ·ŮÎéżž6o‹Ž|XŞŁ–Ş­µEi·gkFö°atĺ‘ ý°T'jŽk¬Í®».ոɷ¨ňH…Ş<Jv¦kJZú ˙Lž>}ZĹĹĹňx<ü řˇŤpúH}M׺vŤ@Ř˙ą'ŞŹ«d[čp×÷řÇ*ٶ^vÇԀǍk65Ôëźî˙Gµy[Ěkě)ޢ٠ŃË_:—8ŁŮ¸~”etPx˝ń_›ç5”ďÚ®˝Ĺ[µjăŰoŐQ_@36Ń®uŹĎ x­ĆšÖş˙PŁ#”jÇkëu÷ÂĄ’™ŻÇx˝©·gm”Xy¤BG>Ü­Uß XW›·E˙eqŔuŤő.y¦@'ŞŹkońV-yfhlčńx–0`ŚĹ€>ĐÔPo°u.·y[ôĆ+ĎIňŤĎ0şxŤŔ7ÜHŤĘ3’}Çů:…îŢ®»ď_ŞÍĄU*<بžđĘ{Š·­F í.űw@ű3‚ĺžxA…Ux°QkÝ”Ý1ŐH_î.nżž7^yNçľjŃăż*TáÁFm.­2_[ů®íťľ6ă<ŁFÇhÝăó•ěL×Z÷Ux°Q«6ľŁ(ËhU©xmmŢ­{|ľ*ŹT(95]˙úÖ¬wŰKËuôĂŇNߣÁ¤­­Ť˙81$ŚŁŰÄŹcDÎečF*…î\nó¶¨ŇsH;¶­7ź›ă×!|eTEčĐ3ÔČ ˙`uÎC+•łäĘůf/zD'Ş?ÖÁÝŰUľk»9˘ÍŰňZˇf%ź¨>®6o‹’SÓćDŹ›|‹r–¬ÔĆź/ÖŃKőŕňÖ“ěś®'^v›_G[c4%5]UG+6>”ü;ľ×Sy¤BKž)—1%-]ă&ßbnJh(yÍWŰäÔt=˝éť€ő>˝é­\đłű9Ň™ĐARJ¦ěÎň+ĐĚŔ„ËĐ*ý‚ÎĹ3l>7Ę2Z.1`ćo¨QţĚnă´ŔQ’ołˆ´;˛ĚyÇíŻołŚłŐÍl¨:Zˇň]ŰŢ´;˛FR뉲ŚÖĂ!ĆNóĄýŻŘń}KP=ĂÍv>ݸbSC˝öűfR‡şv´5Ć ¤;ŰpŔĐÓćşş:}ńĹť>oüřńš0aÂU[‡ÇăQKK‹bbbät:#>ţŔ!×ŮÓó^«uu„ËĐÚwă¶ołkl˘]i·g)íŽě MíŞÂŚ˝Â°Ć5g/\ňšŃ–ŕÍîÂm:Ş›yÜä[”z{–Ž~XŞm/-×Ţâ­ş{áR3ěmż!ž±ž´ŰłCn´g<î˙ÚýCäPőőÚÚĽ-f˛±†#ě–$ŮSj{ĺőLUŐŃŠ°>€ˇ­Żf·Ű­µk×vůů999Zľ|ą233{uyyy:pŕ€fÎś©˛˛˛Ź7ÖóüóĎkÍš5˝vŢkµÎˇŽpú€†>ţ«BĄÝ‘ѱĆ8‰(ËčÁhŘÖă»?ÜőšÚu÷Jţ!ň-A×oßÍ,IOĽěVɶőÚSĽE'ŞŹkŰK˵ăµőzŕ‰‚®kt?§Ý‘ŐaŤÚo$îľpŻÍ¨‡Ý15辎jeôÇT>°BH#2JJJTRR˘ÂÂBFx ×.Ŕ5ÖŮĽĺ®îX#üőEý»wĂuę†:.äf~aş™ 9KVjöÂĄÚSĽU{Š·¨©ˇ^ľX<ń‚9‹Ů–s¨Î`˙‘ţë řv¶ąa¨ůĐm­_uX ˙×>ç-čşţ0oذˇĂQn·[EEE’¤Ĺ‹Ëétöʨ‰«ÉX_YçĚ™3%‰‘í.ş,Ĺ•kŢ>ć.ęŇ16§S ΔŽé•ođ˛ł5*áFť;őĄjvďz<ÎáPňüůŠ›ěPśĂˇSŹ>{Ź$ux\bILÔ¤{fK’NyŽ©ŃăáC „˙ăŽÂÍÎŽʶy[t´ĽTRč‘ářgś7Ô8 ßą‚CáĘŁjkýJSśÓmŤQ´5Ć ™_/xNwo׎Âßár¸YÎí×Ű>Ř 5¤ŁÚX›ďŕ0Üx}í•ďÚ®6oKŘîpřůŁŹf§ÓŮḋĚĚL9ťN­X±B’”źź/·ŰÝŻkšźźßŻÖÓŁ9Łë( «nsąĚ˙M[¶¬KÇÜčtšÇô¶‘V«R\ą!Ď}SÖ=şÍĺŇMY÷=çpč® ż•#;Kq‡$)ÁéÔŻ·ĂăúŠÍéÔŹţ°5č~KB‚YŰűy×.żg‰‰šťżA6Ţ/`Č Ő! c|E¨ă÷oő›·<B’ů¸?#LŤ·ŮÍQˇĆIHˇ;z×-›§ŤO»‚BěhkŚ~¶ŔĽ®ćV†yŞFˇĆ_tĄ›9Ôzýr㹡Âĺ6o‹vĽ¶>č5@GóôÝIWľĎ;tčPż pýî;vđ†ˇW.şĺćóű< »wË–n…Ö·-vi¤Ő*IŞ--Ő1w‘Žą‹t¶¶¶˙}šť­»ó7!8®8‡Csßx] ËÔ{ăŚŃ†˝—ÇPHľ6Two”e´Ţx幀ăNT׎ÂßH’~¦Ŕďţă$Ât3#)öľü ŃP3˘CmşFSÎÓ•nfC¸Í Ón÷čwoWů®íĎ_÷řü+Ż‘ÍüDňý{?cccÍŃÍÍÍćýŹGłfÍҬYłäéŕŻ!óňň4kÖ,ĺĺĺuxť’’Íš5KÆ Ó1c4wî\s$G$:ş^ssł Ěë 6L©©©]«¬¬LsçÎŐĉ5lŘ0Íš5K+V¬P]]]Čç5j˙~ë4î/**ŇÜąs5fĚóĽ5Ą¨¨HłfÍ XŹńZÜnw—jß‹č¶é«Vi×#Źč‚×Ű'×·$ÚÂ>vxă&Ť°Xt±µ5豋$élM­*^^×ĺăúBtBBŘÇÎÖÖjožďĎÚZOťâŮĎź;:ăĐ™q“§Şęh…Ţxĺ9ŐW¬hëhů Tm8ˇś‡VęŤWž `=ľ?Ń~ř™müůb55ÖkJjşšęÍpuÉ3q¨qFŕÜľ›9硕Z÷ř<ů`·Ö=>Ďo´ĆW*ßí;˙Ë_4źßYŔzăľóź;ŮÜĐ8OűyĚSŇŇu÷ÂĄÚ[ĽUŰ^Z®˝Ĺ[ÍçŰSmŤQ›·…p@Äúó&_|ńEĐ}ÍÍÍćȇŽPŹÇŁčŇĄKaźłxńâ€đµąąŮÜHĐívëÝwßUlll—Öîzuuuš;wnPîńxäńxTRR˘üü|íßż?ěµV¬X4v٬¬Leeer»ÝÚżЬgŁFF@ß~ť3gÎÔ¬Ył‚Ćg´?oű5577kîÜąaŹËĎĎ×Ě™3UVVÖaíű ťË€nł$Útknnż\Ű™š5z<:SSÓÁsŞ»u\qÁëUŁÇŁFŹG­ | `€0ÂP©űc1rZ©äÔtµy[´§x‹J¶­WĽ-I/¸÷™ĎńEŤîÝäÔtĄÝ‘­%Ďč‹OŹ«dŰz•ďÚ®x›]Ź˙ŞP÷. ¸N¨¸2L—đ”´t­u˙Qɩ骮sŢÍyhĄ~¶ ähčŞţŘÁěńxĚ®Üöio8pŕ€Ün·bb´Z IDATbTXX¨ŁGŹj˙ţýš3gޤ+ťÂ=µxńby<ĹÄÄhÆ Úżе<OŘ™ÍP~~ľĆŹŻÂÂBíßż?ŕX#ěŤTAAĘĘĘ”’’b®«°°P)))ćšBuűËsćĚŃ»ďľk;~üxy<ôŰĎ:ťË€ť­©Ő –Q˛ŘlşyÁ|ť,/ď•ÍäâÝ0j”$éÜ—_^őŔ´µ±gÝľ —żIřúÜą.‡Ń#­VĹŢt“ůu$Çö„%1QŁnĽ±G×4^Ż$ť:v¬×Öć˙ľKRógźuąŢżž‘×ßëâ_“kőpíLIKWáÁĆť#Ú٧7˝c†´ă&O5CŘŮ‹17Í3ŚM´\3ăŢEJ»=K'.w=‡ f7—VÝ—łdĄr–¬ ůüq“oŃӛޑt% ›ĽiaűőtµFkÝ ş/Ôëő÷ŕň:¦Ű˸wQP¨ntrĚĐý©ą®®N‹/6żľZk0ÂP˙îÜĚĚLą\.™Ý¸m>ŘŮë0‚Řüüü ×‘™™©ĚĚL8p@EEEZłfMČ󤤤¨¬¬,hť999Ú±c‡ęęęTWW§ &tymÍÍÍĘÍÍ ú%BNNŽśN§ľřâ‹ Y×%%%ćë wlff¦ŽőâĎ_˝Ťp±‹çZuxÓ&ÝťżARĎÇc¤¸ruÓ=YAc.ZőQQ‘jKK,ČôR\ąJqů:¨˙=sVŔsN;¦=Ë}żţźeű®kwĚ]¤ŹÜîÇůł$&ęÖźţTöŚćÜfĂ'oľĄ?˙Űż…¬Ă¤¬,MY0?äěä ^ŻŞŢ~[ÇÜWfŮśNłľcýĆZýźcÜęş·ĺ憬mĺŰoé“·Ţ:Ć˙Ľ˙ž9K7/Ż[ss^ݱćĘ·ßéÖűŢQ%é”ÇŁĂ›6‡ UmN§nsĺÍ/®Ů]Ş#ŻľŞ™żxQ ))aëbs:őýeŹ˝‘ÖeĘüµ˝ŕőęłŇ÷>ˇŢKăëöźłW®’çĎŞIkCŁ>{ż´Űő0x… n»*Ú˛k¸·\Ís÷Tĺ‘ í(\ŻhKŚ˙UaĐă/Źń066€îşVsQQ‘8tssł<OŔČ…”””«.»ÝîŁ(ňóóURR˘––ĺçç÷(\îĚš5käńx‚ĆZ´N¨ućĺĺ™p¤á˛ń:Ű‹ŤŤ•ËĺŇÚµkŐÜÜp^#L?~|ČÎöŘŘX•””hâĉýö3N¸ č–FŹGźĽő¶n^0ߏqxÓ¦Î1ŇjŐĚ_O;¦Ł,ă$K˘MéOŻŇŤ)):´n]źżć8‡Cwmř­üµ66ęÜ©S3i’FX,şůţJHuęŹ+ž §ŻZ%Gv–ůµqśŹ´Zu›ËĄ§S{.ĎPî íŻ{¶¦Vϵ*!%E–D›¦-[&{F†¬~.l`Ůţţď]w×ÜľŽĆű>*!A›/¨Mp:u׆ßęŹ+ž ŮŮšľęźÍŻ/¶¶ębk«,6›ŮYŠ›Üńć‡Ó{L7ßż čý3i’Y—§S‡Öý:l]¦-[¦›ĚY—›ď_ ¸ÉŽëŇľÖF´ń~]ŤĎ UŃÖŃfgő‰ęăťŰ;^[ŻĘ#ОŚÖě…K)€»sWGnĚś9S%%%WĺuĆÄÄ„ Ťccc•““6ďŞĚĚLĹÄĨĄĄE+V¬Đ_|ˇÜÜÜ€Řč^îHNNNŘuşd·Żm¸ĎíĎk¬×˛;ú,L0A)))ý¶{™pĐm.*’=cF·Çc¤=ú¨,×––ęO›_5ĂýéUaë8ŇjŐ­ąąşyŻ{wĘ‚ů›.Z•öčĎ$ůBĺĎ®6Ż=Ňj5;–Ă~cźťmËí_—$łŢöŚ µ6ž ű “›ĚשcÇôQˇŰĽ~śĂˇô§źŞ‹ń^úw0ďÍ[P3{F†ů~ŢĽYźĽůV@Müß/Gv¶jvďć čq“oQrjşŞŽVčy×]f¸lĚoޞŚÖĂĎ0@ŻąÚsJJJ‡ĺ#:ęćí©ÎÎmŞmŘyyyfđš5k´fÍM0Á!aĚNîék4\Ž´6ţvÖ!=a„~.łˇ Ű.x˝:äüM_µ*äPÚ‡®/Ż čmmhPŮ3ϚݛľéÄ>{­Éóç›ă<»: •|#1ڱöWF=Ląd^lmŐˇ—×Í‘nôxtĚ/ŕNHíů7{–ÄDł«öÔ±c*{ćŮ€ë^đz}ˇîÁľotłłd óŤŕŮšZíYžŔ6z<ˇëŤ|çpŁ(ţ´ůŐ :^đzuxÓ&ť­©őŐ#%đÜ·ĺţÔ ¦˙×ŇGÚ ^Żö,Ďëpîń­?ý©ďóŐبĎ®ęŠţČí6ßÇŽ>s­ŤŤÚłw«ÍۢžÝ'×/ßµÝ÷˙–+?yË0'8ťšťżA‡7m6˙ÔßćtęűË3;koŢlÎă5s›§Ěź§łŐŐaµŞö*…¤ź•ľŻ)óçËbłiúŞ–Ĺ– ĎŢߣ ^Żaů'o˝­Ö†µ64¬ńKŹ'`β}Ć Ý¶Ř6H•¤6ż1ó/ę“7ßŇ×çÎu8á‚׫Û7kÚcľúݵá·úÓ¦ÍćµăM[ö.SóÉ[o_ł ţŁ)¦ŻZĄ#Żľj©–ÄDM™7O7ßż ěńţż¸¶l™śNŐ—ÔEŻW N§nĘş§Ă ďoÚ¬Ě_Ľ¨‘V«ćĽţúÓćWuňŕAó}ś2žŮY’|ÝŐW«[~Ę‚ůşté’FX­Ş//Wĺ›oÉ>c†FZ­Aď—±.ăóuęر ˙ +šęewL•$Ť›|Ë5żľŃ-e­±‰ö€Ç’SÓ5%•pŔŕp­:–'LĐi7q¤bcc•““rÓ˝p× –†;G(áÂŕ®ć…ÉÝ9_GëéÉ:Ť˝=·Ű­––Íś9Óçđ¦MaĄIYYJp:ÆŃÇÜEúäÍ·‚îŻ//פ¬,Ť´Z•ůŇ/$ůF\Ť ô‚׫Ď®ÖĚ_Ľ(‹Í¦iË–iÚ˛eAĎ;uěţě7‡ěO›_ŐôU˙¬‘V«îÎß  ^ŻškkÍ`×x}¶T§RRdKuę#÷•ó„ѲgdčÔ±cÚł<ŻĂő~ňć[•`ÓÍ ć+ÎáĐÝůB>ݶ´T‡7mşfź‹Ş·ßÖ¤¬{d±ŮäČÎ’#;Kgjj4b”Ĺ ŮĎÖÔŞňí·5ýrguűůÂ{W¬ĐĚ_Ľ¨„”ł&ţ­űµyl¨ĎŚńřH«5ě cFvojôxĚ_6ë>SSŁúňr5z<Ş--Ő¤¬¬߯ł5µ:đ,6 {Ć&Úőô¦wúěúĆ(Śö!ňË_äÍ0h ¤Qč{%%%Ú±c‡&L Ď?˙<äsüÇ{.´S—7čę|ŢŹÜnĹMvt8¶âĺuú¬ô}ݶإ„ËjKKőYéű«ż?m~U’/ta±ŮůŻ1ÔZŤűÚBŚčđ¸šíZú’çĎ7R¶ź#\ł{·.x˝JqąĚ1! N§oÓżňr}TôojmhPŰ©+s‚ýÇ"´64hoŢ M[¶Lc“Î}±µŐ|OB˝–Ă›6édyą¦Üż@öł˘ëTĺ›o…¬­˙yýkI­Âąŕőj×ŇGôýÇŐ¤,_‡pśßfن#­VÝ”uŹ$)!Ő._đzµgyžŮŮJĘa~ľÎTűć·64„ —Ť÷äLuµů™ó˙|¶66޶ô}U…‘ÝuŮ›·BÓ_f~ÖýŻ]ńň:Ő—Ԥ쬠÷«Łu€äëJnj¬ď•îßĘŁжŚîrws›·E'j>îôŐKę~×ô‰ęăjkýŞËݱň¨ŻSšŽh× Á2"•““Ł;v¨®®NsçÎŐ† ĚîđććfhÍš5’¤™3gvąüZvéŇĄKĽ•ýßÎť;őŢ{ďů~ČţKůsX”1Ň` h–ÄÄF´ďÂęµiµę‹Ą×Ç<üĎ2ß<łcî"}Ô…Ť<úň}éÍĎ0yľźfŢžžű‘/Hţ§?PĽÍ׉Ľí—Ë6č›˝đ=°<¸V//›§ŞŁzüW…J»Ă÷KÍ=Ű·čŤWžÓś‡VjÜä©ză•çÔÔP/IжĆhÉżä›ĎmŻ|×víxm˝ů|É×ýŔ/„<ćźü@M őZµńMIK¸~ęíYzâĺ+˙^/žaS”e´^}˙Sí-ŢŞ=Ĺ[ÖőŔ/(ăŢEA×8Q}\;^űŤŽ|řËŢŮ Ńś‡žR´5fČ~=gŢţýďßáĎžI)™˛;Čt@,Ł»\.—Šüţú5cCÄţ8sů:ŢB@rÁëPť™‘}`öÇÚ^đz#ŞáÜ˙ç ÍÎß GvvŘçŘüţTěË0]ďýé}éÍĎ€ˇÁ/1ĘŁçߥK—¤9­ÔŚl_Řş§x‹^/ˇSuą“׿sŘč&®ŻůXľXÉÎtÍyhĄ’SÓŐćmŃĆź/ j%iŰK˵íĄĺ:çmŃÝ —jÎC+u÷ÂĄjj¨×Ćź/67î3´y[ĚpxÜä©A×÷_“”OIM×¶—–ëő‚Ő˛;¦¬Ë?7”ďÚ®uŹĎבv+9Ő÷:ć<´Rń6»öoŃĆẎŔUA°Śžp»Ý*,,TJ»żä•|ˇň† ĚÍű#Ćb€ăÜ©SJp:5*Á¦úňň °|¤ŐŞď/{L’otEcĂeHŚ@öDőqÝ˝piŔĚâ´;˛´ń狵·x«f/|ÄÜ8ϤŰo¦W_ă;בvk­űŹ!ď+O»tôĂRíxí7ťČ%ŰÖ«|×vłsÚ˙|i·gkÝăóôĆ+Ď)íö,łSŘXsĽÍĐ=l\ßt…±Ö*Ď!Ĺ'$é_ßúď€k<—{§ęk>VSC˝ySC˝¶˝´\’´ä™‚€®ćŮ —ę9×]Ş/äsŰĎBöż~Ŕl壡»©ŤăĂmŘ'Ž÷0fBG[cĚćöÚĽ_ńá醿ÖzÂ3`0~Ăw”¶ŕI]?â;K° \A¸ śÖ†}ňć[ć *Bu ·wľŐ¤†šc8~âr7qZč@ĄĕŮÉľcâmv]şúúń6»âmvĄÝ‘%É6±˙µ:‰ołŤĂđ_SČěQŁĂ®)Ę2úr·őT>DťŹŹ§ľýúo:w¦A1¶‰G° "\€"Ôx M őAˇ­ȆŰLĎčJn϶s{Ooz'â5ł Ű_żłnćŔsw`Ł.2î]rĽ"ăt:őé§źŞ©©‰b`P:yň¤Îź?ß­c –`„Ë0@áŞ˙f}ţöo‘ä „Ť 9ÔfzţÝÄm­-!Ďup·oľr¨ń•G*Bv<ďŮľEGËK5gńJżyËÁ!˛˙ř‹Îş™ý] ¤Ż<>ÖfWU‡5;®7^yNÉÎtÂç.ŽŽ–Ë墴֯_ŻęęęŹ#XB»ŽŔŔ`lfw:ÄĆtM ő:¸»X’”óĐ•5ôfzÇÍŰG>( :מí[ÔÔPŻx›= \6şC…ŰF[y¤"`¤E¨p۸Ż+ÝĚíŻŃţµá´†űkó¶hŰKy#> RË@x„Ë0řofwÎۢŤ?_lv˙V©Đó‹ďR›·Ew/\Đ *°őźm|p÷ví-Ţj>¶·x«Ţxĺ9IŇĂϬaöÂĄ’¤Ż­7Źió¶hÇkëµîńů’¤%Ď„Ëćf‚ĘÁ÷…›Íl7Ţ#íö,ĹŰějj¨¨É‘Kµîńů:Q}\vÇTsí ‚e cŚĹ€Ŕ3»Ś{iŰKËuäÝĎą{áR=¸üEóëpă'ŚÎáś%+Uy¤BݬÖë«ÍÇŁ,Łőđ3A!oÚŮšóĐJíxm}Đ1’ôŔ/t:75Ô«ÍŰb®»ýőďĐł™Ű?Ţ~dF´5FOĽ\¨——ÍÓ‘vŐ$95]OüŞ0 €® X:G¸ €˙fv÷.ŇX›]ĺ»¶«©±^c/ŹŻh·µ~Ą9­T´etŔýţł‹ŤăĘwm7Ď?{áҰalÎ’•J»#Kĺ»¶Ż)1xsŔ9­ ş?íö,MIMom‰ ů\ĂŘD»ć<´2äfă&ߢőoý·Ęwmב}c>ÂŐş‚`čša—.]şDúżť;wę˝÷Ţ“$%üĄA‰ žçűićíéą/ éZ<ďşK'ŞŹkŐĆwz65Ôëźü@’Tx°‘Ůs¨č9óöď˙{ ‚!ÇCż˙ăžĹбM zÁ2ĐuĚ\€ ÜXH#)’S J =‚e 2„ËĐĎ…ŰĚ®;z+¤€Á†`á2ôsW6Ŕëy lĚIößL†:‚e {ŘĐúąŚ{iJjzŘÍî"‘óĐJIt.€`č>ÂečçĆ&Ú{%X–ÔŁÍ`°!Xz†pCNÝíVŰŮFók‚e rĚ\ŔC° ôá2†,‚e ű—0$,=C¸ €!!::ÚĽM° ôú0´4~nŢž3oŰív č1Âe†€ł'+ÍŰßűŢ÷( Ç—äÎťiÔ…ÖfIRTT”’““)  Ç—äţż ó¶Óé¤ €^A¸ Ŕ vîLŁţZë1żž>}:Eô Âe±ş˙ÚeŢNIIa$ ×.0HŐ”żŁŻNŐ™_/Z´˘z á2Đ—5GĆa,\¸PńńńĐk®§ .'Ź•©Ţóźć×Ó§OםwŢIa˝Šp€A⛋SÝí čXNJJ’Ëĺ˘8€^G¸ Ŕ đ×ZŹ>˙wéŰŻ˙fŢ—’’B° ¸j— ÎťiÔŮúJ}YsDZ›űáČ~€«ŠpŔ€pňXEŔ÷Í…ó:w¦A’/XöďR6ÄĹĹiѢEr:ť pU.ü7',**JwŢy§î»ď>Џ&—ô[Ł››őUl,…‹‹Srr˛śN§ľ÷˝ď)::š˘®ÂeýÖřş/Ôۢ #FPŚ>ÔjµčśŐ*Iš=AŐ^S[[‹ÚÚZl۶ ĹĹĹ[ű÷ďW b«Ő «ŐŠÚÚZĽüňËŞ«ÝnGUU<Ž9ńűŐjĹţýűárą‚ľ¦µµ­­­0™Lxůĺ—G ł‰Ć‚,Íhv»‡–dRR ˇ×둝ť »Ý»Ý“É„úúzŢŢ‚V«ڏúP†±YYY(,,D\\4 Ün7\.Ş««ŃŮŮ »ÝŽýű÷ăÍ7ß +++ŃÖÖ†¬¬¬9±)ĂŘ‚‚äććúÔ›ÝnÇŮłgŃŰŰ‹ÖÖV>|l€DDDDęńÇGtt4 ŇŇҦô˝ČŃŚ¦ c PVVćó|rr2’““‘ťťŤm۶áСCčěě„ŮlƉ'°sçΩ “ɲ.„ĽĽA«N§CnnnĐ÷S~ΑdggË^˛ţë000ˇµN§ (×n·Ł­­-`¬¬¬¬°zŹÚívÍfźúÍĘĘÓDbbÝ{=ŁĂ!ęMŁŃŚřÚääd$%%ˇ··7äë\.ÚÚÚ|¶]rr2VŻ^tŰř×s}}˝ü<a6—Ë%ßWĽVY7ˇ¶‹˙ţ6Ň:Źw9«ŐŠ÷ßLűQ¤ dÓŇŇPZZ ‹Ĺ‚ˇˇ!ÖM‹¦¦&$†ęŐ¨&;;:ťN]ŕrądpd2™P]]-Ç_}ýő×&zŞ®®Fqq1 UË5ÉŘŮłg‘ťť­:)”ň˝wî܉×_]ur.˝^Ź—_~9 ”ËÎΖ˝=›šš——˛ĘĘĘ‚l±‚Őj•áuEE… Ę\.Nž< “É´üäää “†Ť´ĽŃhÄóĎ?öÄYĘIąt:ݨĆwÍĘĘB[[>ýôSŘíöÜÓ§OʏožLDDD‰îű§ú§R>0ţ|,^ĽwîÜaíŃ´ihháăŽ;Âî )ÜşuK.˙ŘcŹÉú[[[ŃÖÖ†¨¨(466Âĺrˇ  xě±Ç`·ŰqëÖ-|řá‡p»ÝXąrĄOą.— »wď–˝u:ÖŻ_ŹěělÄĹĹÁfłÁn·ăŁŹ>š5k-—ďŹúúztwwC§ÓáÉ'ź„N§Ëĺ‚Űí–ëľ~ýzź÷Öh4¸rĺ  ±±n·iiia‡š€·—©xźŻżţŤŹ=ö’““‘››‹‡z€w¬Ők×®đšFŁŮŮŮĐëőřË_ţ·Ű ·ŰŤ?ü0 ¸vą\Řżż\^YGĐŰŰ‹ÎÎN|ôŃG>źQŠŮŮŮ>=(ŐÂŘŃ|f—Ë…?üCCC¨ŻŻGtt4|đÁQ•!L&TTTŔívCŁŃ ??ąąąr‚ą[·nÁl6Ëϡ$>źŰíFCCĎsůůůxňÉ'ńá‡âÖ­[>ŰÂß믿Ž[·nA§ÓaË–->ű€€±dM&>°Îz˝ÝÝÝp»Ýhll„N§ó™ĐBůYď»ůůů>źµµµ˝˝˝˝ş_~ůeĆ*—ˇłxOŁŃ8¦í@DDD4,X€ÔÔÔ)_Y@DDD3’˛gťN§őňŮŮŮ2ëěě č‰'züůOňTXX('vŞ­­EaaˇO|řđaąěóĎ?ŹŤ7¬÷ˇC‡`µZQUUĺ3&© zĹú/ďrąPUUłŮ «ŐĐ VŻ×ُ¸X~®ÚÚZÔÖÖĘIͲłł‘••2Ľ6Ť0ŤŘ·oÚÚÚ ÓépäČź×('S›kçÎťrŇ0»Ý°žŐŐŐ!—żxń"Nž< «Ő “ɲôxĂXظq#ÚÚÚdoé'NŕĉĐëőňöů¬¬¬˵Űí˛gŞN§ĂÁ}ęş°°Pnżęęę€`Y0›Í>Ë‹şJNNĆÉ“'xÇýUëyl·Űĺëőŕö˝(S­ţŠ‹‹ńÓźţ˝˝˝xýő×ĺvô_®´´Ôg}¶mŰ&÷“ɄիWËe­V«lżjmD´1·ŰŤłgφ= Q¤ŕ¤^DDD4ăMVşâââ€00..rĽŃłgĎĘç”ASaaa@Đx`”™ÍfŐ[˝-çsű·rĚNa۶mxůĺ—}ĆCµŰí0™L¨¬¬DII JJJpâĉ ď=q+}RRRаT*×ÓĺrÉɰ T—߸qŁ ŮĹ8Áj&"Ś<pk˝ŐjEmm-:„˙řÇŘ˝{7jkk†°Äľ ŃhÂX±ýJKK}&• µ>by˝^˝^ʏ¸8ŮÓTô˛ ¶m4Mᆕ”źçŕÁő''są\hjj XÎ?Ś”Ç˝őÖ[ňqĺŹ)jŰ_Ż×Ăh4"++kL?¶Ívě!KDDDO9é–R°†"«ŻŻ÷™H’…ęťh4e/Ö¦¦&Őŕ6XÎdWyyyČÎΆŮl†ŮlFkk«Ďg´Űí¨­­E}}}Đ1>C9räÚ `ë9R'0Xoމ c…m۶ˇ°°&“IöőO«ŐŠęęjŐ1UĹľ n˝¶ďäĺ塶¶6h¨ŞÓé‚._PP üÖÖÖ€¶őőőr˙ §>Ä0ˇzNçĺ塴´Tö´Vîë#MŔ&öu«Ő*ÇjVľţäÉ“Şc‹íODDD41%""˘§ěM*™îá•Ëĺ’A“2lTÎjJ[[[Đž´ă'‡îőŢmkk“­ËĺÂŮłga·ŰQZZ:Şň“““<«Ő*Ç 6Y“˛·l¨ĎęąÎÎNĽóÎ;2.,,ś°^ŇqqqظqŁÜ&­­­ňź/Ĺ„TĘáDŕ(Ęöůý©…ŞjűŁ——‡¤¤$ôööÂd2ŚĄ+öąp'şĂŚ´żů—'Ţg¤ĎŞÜ.ťťťrŤF·Ű “É$?Gnnî/ŃTůă˙üáŔ–-[|ĆŇźl d‰hFJNNö ČFČ)ĂRµ,T(řŽ[+‚&Ąňňň°Ö#T/Ó‰$n{߸qŁ6@ôŇ5™L(((u\__ʦ¦&tvv†=üA°ŢČŁáßłôäÉ“a÷-ĺ8Żv»gĎž•˝P•ď«ü\"dĎ{†"zŘ*{g÷†@HJJw Š2€˝°G»ŻWTTŕСCčíí•eŠrĹrţă3MĄ;wî ŻŻ_~ů%¦ô˝ČŃŚ¤ D[[[nI¸=5ÇBŁŃ„=öĺDö´Z­p»Ý#~ž¸¸8l۶ ŮŮŮ286›Ía×ËĺÂţýűeďJ˙Ď-z;îŢ˝[uىÚţĹĹĹ8|ř°śěLm‚´p´¶¶BŁŃڏ-’““QVV†äädTWWĂĺr©î{:ťnÄ@_YgŁUXX(ÇpUNz&Úp{ÇNĶHJJ ;4U†Öz˝§Oź†ÉdBkk+Ěfł|^ ©Q[[«:éQ¤c KDDD3R^^žĎŚóŁ dE/Ǥ¤¤1…˘Ę@×?|Őét8räČ”ÖÇĹ‹e}TTT„®*o÷WC9|ř°|}AArss‘ťťVŐ‰čń¨3¶  őőő0›Íhjjő~PYY)÷…şşş°–ÉÍÍ•˝‹;;;Ţł°°pRCÄäädčt:tvvÂl6Ăh4˘©©I¬áľ÷X{+·ˇŃhĶmŰĆüYü‡Ő=nEűŞŞŞ 9&/Q$šç˙@ww7*++QWW‡?ţńʬ!"""šÉÉÉČĘĘ9qU¸Îž=+Ă«`ާź~˛ q‹ľFŁ‘Á–ŤÚÚÚFě}ŘÚÚ:ˇĂ(CĺŃÔ…č•nŕ%&“ĽÁdYYňňň½`C({„† M&ĘËËQ^^POĘá vîÜ)Ë|ýő×G]§ĘĎ=šzó˙<ţCXŚT‡Ł ŔŐIăĚf3\.— •CMÎĄ&)))¬uŢ˝{7ĘËËqńâE$''ËĎ=Ňr.—KőłŞŐ^ŻÇ¶mŰđË_ţĎ?˙ü¸¶ ŃlČ  ŁŁ_~ů%îÜąĂ"""˘iSVV&ˇŞŞŞ°B®¦¦&Ů»QŁŃ ¸¸Xőu.— MMMAź·‡çććĘÇ•˝RCŤ!ÚÚÚŠňňrüřÇ?ĆŮłg'¤.˛łłe¸öÎ;ď„UĘu w¸eĐjX†`ź_€‡{TŚ)jµZCö䌋‹“’‰ˇ FCŮ›ôäÉ“aş"üTÖ[\\śü ľľ>d9UUUŘ˝{7ŠŠŠĆĘçććĘ}ßd2ÉşmĎ\±ţ"Ř ¶ÍŤpjË…?řĉŘ˝{7Ö­['_·oß>”””ŕđáĂA—ăÄ^DDD4—ÍcŃL•śś,U»ÝŽýű÷ű„eJ.— ŐŐŐ>!PiiiȰďäÉ“ŞaSUU• ŻDOEŔ†‰Pô­·ŢRíŮçrąäĐţËŹ—(KŚń*&yRS[[+ĂKŤFă,+ůO` ě},łZ­xçťw‚n3\ Ž[[[ĺv §~ňňňäú‹ˇ FłČőŢ˝{wĐ™.— 'Nśőš••ĺ*ë_ŚmëOŚ™ `\‘ĹĹĹÉĎ,ę:Ôv Fŕ ł•Ź‹@]ą]B}V±sssĺľ#ŢÓn·ăâĹ‹Şď©ÜáŽÇLDDD)8†,Íh7nD\\ś I+++qöěYźq'­VkŔ0ĄĄĄ#Ž7j·Ű±{÷n"++ ˝˝˝¨­­•!bqqq@Oľb÷îÝpą\(//‡Ńh”˝W;;;Q[[+Ě‘á±ÔEgg§ěˇyâÄ ś8qBÖ…FŁAgg':;;e]h49«Rrr2ÚÚÚdH©ŃhđüóĎCŻ×ËńKëëë%MŔŰ;TôŽăÓú·eeeřéO*c1­ŰíFkk« <“’’¬KKKQRR·ŰŤ×_=ě1mď°˘^ěv;ĘËËťNťN'?2$LJJ D,//OŽiŰÚÚŠÝ»wĂh4"++ n·fłYÖŤN§ĂÎť;ǵ˝ŤF#ęëëeýŽ%ŕÍÎΖ“„™Ífąż‹ýµľľŢgmJąśŐjĹŽ;PXX(ĂSĺgő››‹¤¤$ôööâäÉ“řôÓOeéííEkk«\Ö?ô&"""š ČŃŚg4‘śśŚĘĘJôööÂn·˝e>77W5HU#'µaŠ‹‹U'3ŇëőřĹ/~C‡ˇ··&“Iu]&köř˛˛2čt:TWWËńaőřÔét(--U­ ö÷Ćz5›ÍĐëő(--Eyy9Ün·ęçaeuu5Ěf3ÚÚÚ|žONNFEE…¬ŁÚÚڀ޼bÝ ăââP\\,‡¨ŞŞ LC-[QQÚÚZ9ś…Ëĺ’Ă&¨íC;wîT]·˛˛2ÄĹĹÉŕ]mßÉĘĘ’Że)ö×±Á°W+++ĂÚßĹř˝ŐŐŐpą\ŞźUě Ę},..±ŤäććĘá(ć’(ŹÇăQ>pýúuyŰRbb"Ö¬YĂZ"""˘Ł©©IöěÄřžyyy#NxtöěYĘŐŐŐÉŢz˘˘^ŻGaaaX'‰ŰÓ•=Dsss®‡2 5s˝ľ˛łłŽý*Eµ1>őz=rssG7VŰ­­­HNN–˝}EůʱKoĐšťť-f«Őę3¶©Úge»âV|µ°:śĎ}ńâEDŽ:ô=aŐĆTuNݬ;e˝ëtş A|8źOmk…ÚgÂŮ·‚­óHű»Úrb_©×®Z w˙$"""šLŤŤŤčëëŕíL‘‘‘1eďÍ@–ć˙@–ćžé d9©Ńá˛DDDDDDDDD4§<ţř㎎†Á`@ZZÚ”ľ7Y"""""""""šS.\E‹MéP‡, """""""""š"ě!KDDDsŠŃhäěîDDDDD4m˘<ŹGůŔŔŔş»»a±X044„… ˛–(˘,Z´«WŻžň÷ č!‹ŚŚ Üşu ·oßV]čâĹ‹Łz“Ť7Žř–É2Y&Ëd™,“e˛L–É2Y&Ëd™,“e˛L–É2Y&Ëś eN¦ C¤ĄĄańâĹň&Ź<ňČ„Ż8Ëd™,“e˛L–É2Y&Ëd™,“e˛L–É2Y&Ëd™,“e޵ĚxÓ!h ›’’ Ě2Y&Ëd™,“e˛L–É2Y&Ëd™,“e˛L–É2Y&Ëd™(` Y"źîîn;v Ď<ó ňňňX!DDDDDDDD$ÝĎ* š8ÝÝÝ8r䆆†púôi`(KDDDDDDDDŇ>999 e#‡,&$ŚĽˇEžÉc_@DDDDDD4W„Ć@?š››ńÍ7ß°â"Łv?ßYűŕ¨^?xë|~µźˇ&+ŚD([XXÓ§O{ĘEŃ„±B?ęęępýúuĽřâ‹"3°‡¬ź¸˙=Ş12ÓŽT“Ć ě)KDDDDDD™úúúđᇎ*ŚĐŐŐʼnľ"Y""Śuą\hnnFbb"–,Yâóo<üËKLLDss3†††ĘEĎ?˙k×®u .Ä÷ż˙}ôöö2”Ť0ěŢI¤âWżú†††Ťţđ‡ŞŻyď˝÷ĐŃŃ1겗,Y‚ 6„|ÍéÓ§9tŃ,ÖÖÖ†žžĚź?ĚePöß˙ýßńÚkŻaďŢ˝ľ 0%Rńâ‹/†üĺ©ŞŞ ńńńăzŹ-[¶ 55Uő9\‰f§ëׯchh===Rž2”ýÓźţ„'žx‚•<Ë1%R‘––6éď‘ššŠŚŚ V6Q„xýđ‡?śĐÎV .ÄřC|őŐWřć›op˙ýŚôf3Ž!KDDDDDDDD4N"Ś}â‰'&ĺÎ×ůó磿żÍÍÍřć›oXáłY""""""""˘qP†±c™Ŕk4D(ŰŐŐʼnľf)öo&""""""""ŁK—.ů„±>Aéüůó±pá {ż›7oâćÍ›řÍo~¤¤$Nô5 1%""""""""ŁśśĚź?Ź<ňŕřššš|^łqăĆ1—/Ę.^Ľ(˙îééÁĄK—°uëVnY„,‘Š·ß~ÝÝÝ“ţÁ~ÁJLLDII 7Ń —””„ýčGň˙|đA¬^˝`6›a6›Ç\vcc#ţă?ţ{÷•––úĽf*&&§‰Ĺ@–HĹőë×ałŮ044„›7oŞľćóĎ?SŮCCCřüóĎU—ŹŹŹG||<–,YÂŤ@DDDDDD4 %&&"11Đ×ׇÎÎÎ -?##•<Ë1%RQVV†#GŽŕ‹/ľ@GG:::&¬ěľľ>\ľ|Yő€ý×ý×X˛d öíŰÇŤ@DDDDDD4ËĺĺĺáľűîĂ­[·X$ÍcŠŤŤĹľ}ű°dÉ<őÔSX¶l٤ľźcÓŇҰoß>ĆMDDDDDDDˇČ1Uˇ,ĂX"""""""˘ą,Q“Ę2Ś%"""""""š[ČŤ`˛BY†±DDDDDDD‘­©© W®\óňkÖ¬ÁŢ˝{Y‘†,Q&:”eKDDDDDDůúúúđĺ—_˛"ČY˘0MT(Ë0–hîb K4 ă eĆÍm d‰Fi¬ˇ,ĂX"""""""Ť7n ©©‰aČŤÁhCY†±DDDDDDD4Z d#Y˘1 7”eKDDDDDD47ĺĺĺaÝşu¬ňÁ@–hF eĆÍ]‰‰‰X˛d +‚|0%§`ˇ,ĂX""""""""ňÇ@–hř‡˛O>ů$ĂX"""""""" Ŕ@–h‚(CŮ'ź|’a,Ń××ׇ/żürĚË/]şyyy¬Čs?« ň Ŕfł…ýúxiii,sŚenÚ´ }ôžxâ Řl6Öç4”Ini¨A{ĂŔćŐňqG—Ťg^¬yîgЦ¸“Ňś9FÔ*–wU„`*ŰŇdµ§Ă°.ÁÚ¶r˝ĎĽŠA·S>–S´ngČ妋ÚgTn“ĚüM0äof°őáľ;·gÔz9ş,h©; ë5“Ď~ ÚtwYł 1š‰‹w´çŇé&ęg2ę‚f{ ÷uqş©†čWLŮ~;Ńű,Ď!DDDcÇ@–f%ÇŤvô ÷މÍŇŚ–+o"ŐgĘŽMčŨXŹßăNGŹě]äs1<Đ/č熤9EŮ&LÇĘĂ X”mi˘ŰŚř$ĹŐE{ÂjŰ`˝j‚éXyŔăÚĄ™ř¸îTĐĺ¦Ëű珢Ąî^řőGA·I {AÎYn'LÇËa˝j ŮV]´\9…gĘ~5á?ŚŚö\:ÝÇ1Ó±rwU°÷0ŤëU\ź&hS±ćąźAżĘ8)ëătŘ`:V®z®›¨ó:Ď!DDDŁÇ@–f˝ś—6KłĽ(¶Yšqąr׌ęeC4WŮ,ÍřřĘ›X±~Ç´­Ă[űźÓIKCŤü[ĘÄÄ&ŕăşS3Şžß?ÍçŹr‡#UŽ. .Wľ§Ăצ`uŃ$hS‘ MÓŃA·SţĐŕtŘPsx6ż|vNŢáał4űôô' %^›˘ÚkT uăč˛`h N‡ —+wÁ¸«bRz™ľůŇSc:×Ńäb KłŢHżö;6\zmnŢh‡ÍŇ KCÍ´ÝV•jČÁ˙ů˙ţ™Ť@sÍĐŻ2ÎȱW‹Büp#zę¦rŽ%EłěŹć6Óńý2ŚÍĚß„µ»ŽřĆ;(ľľ>tttŕ3·YD=diNPö65ĄÍŇě3^]‚6ú•6îě Ű ÇđDÚĄ™>ĺŠ÷Ź;6XŻŐËÉCÂ]˙ĺ´Ă“šŢŰSú#jFřÁŹ~ß_żíďÎGYYwöY@›n@Ll<:ŻŐOŘĐŽ. lířL¶Ł_Y şź‹v ô+Ć©Ç ńšŘxyk¶xÍĐđ˛CŠ1ˇE›R[NŤőZ˝ ĂÄűŽÔ+jĐí„­ýźĺ‚-+Ž5ýŽž€cڞý«=6Ň1%F“€ÔĚďý|Ę㜲>­×ę}ö)›Č†ÇU-ĂĂkDÇĆ#˙ąđ& Ëß~ťĂŰŃfiz·‰˙öööúűޤťwüŰ˙hßOŮíß6KłÜŻřü­Övťl–ddĐŻ,»Ý şť°4^Ŕ Ű‰m* ů›F<ć…j—Sĺ[W/ľîřÎuFăQÝRlÝş•Ťm+Öď€őZ=z†ŻA]– ŰŃ»‹ăijć÷®Ă9׍tŽí~ĺľµĎOÔ{Źöĺ˙ľÖkő˛ť†şvé;C°íŔöFDDÁ0Ą9ÇöhŔŰk¶ńĚ«>_ś„FM–Ż+™‰7Úĺřs›Vű\+·^«GË•7U×ĹřBEЉÎĽŞş\‚6Ď”ý g^EŹĄ9E{&|b‡iűňw»}·>îÚłĘÚ]Gđ//ĺch \CÉJÔ&ţi<ý ô«Ś0ľPáóI´ÁŇP#{ű‰[řĹkR 9r˙±#]ůhSjË…s¬in§j» şťh<ójЉ͊6.ľ€*Ź5ţÇeűW{,Ü÷M5äŔ¸«"`»)ßű…_tÂ(˙uć1bęXŻšäąĐż9ě!A›ŠÍ«~PT¶‰Ć3ŻťkůúX˝ůĄ ,Bµń~ůŠŢ‡ţ, 5h<óŞęuAŞ!kžű™Ü?ýŰSăéWäßʡ?Bµ›ćóGĘUk7;ŢxoíÖg˝]äo?0l> ЉÄÂ9שëE;˝uĹÝ ˇ&ĆăKGÇĆ#ŐmŞ\7q®PŰ'ýŰ‹XÖŃeÍҬxďŔq«•˦(îQžŁÔÚ‚Z;UľoçµzŐk±-.Wî zčGóůŁt;}†K#"˘ąi€4ŹŃ©©HKK xžl„;wî>éĽÁŰCżěiĚ‹Kš“őĐxćUŐ‹^Ń»F\„nŘ{ĚçbŐŰăí´7\€Ąˇ‹—>6éłÂ;ş,Đ­,ŔÚ]G|.?ľň¦üďź?ę3Ţź˛×Cfţ&ä?wŔ·G b2˘™rŚĐŻ2B·˛`ĚCŽď—ű´˙ěÔ«‹öČTĚ`-ľHŠ/I˙ü“Gx{†Ó›Eôx=¨=–fhÓ aOâĺtŘdű]Ľ4E«}Ú¨cťçLdi¨‘_őb}÷Ř>tßri˝j‚~•Q®ŰűçŹĘ1 G3áŘĺĘ]ň}×<÷3źí˛şh¬WMx÷Ř>o~|żjŕ%Žgjëýî±}ho¸0|«©iÚ&Zś•••čsŤA÷7yâo¦eü‡Ë/p@fĂŢă>ĺćo? ĎCŽ. Ţ?tÜa…h˙ѱńX»ëĎą}uŃyÎď§Üm–fĆŞť3]ŮCµáô+ذ÷8ŠVűcůŰÔ]Ă™W‚¶ńľ—^{A¶ó`?FŘ,Ír˝ŔzÍŰ®•Çżýůo]qĚt;ai¨™ôë¶·‰µxi&nú 5Ŕ燼ż{Ł!`źQî—-u§ĺń4śsÝÇĂmRí<*ÚöĄĘ]2ěw:lA{şŞ]G‹v8čv˘áĚ+×®ˇÚ‹˙ůÍfiö ]Ųϔ h˙ˇÚ‚rülµó“h§ţ×âó;Ź>çÄůCĹŢęNauŃţ(BD4Ç%řŻQQHÚ˛<öXŔóĽ·!ÂuwwŁ»ËŠoowĂóőÝüŚb 'µם›/=%{Ä+z‹2a­ĘíT˘_üpʞćš7&ýóÄkS°aďń€‹¸ëwČőpú]¬7źC^ úąâ˘\7Eă5Źٱv×9ÉWsͪƨń*Ő=Cţfä ŃňëmŞ}|ĺŢ"öŢö©M7Č/ĎN‡M~6q«Ľ6E54“ÂČ IŃóq¬DĎ$Xľ®D5ÔŃŻ2Ę÷utY‚ŢnšbČQ]o奎®ö9Őć:::Đ×ó)ľ˝Ý=mëj,ő±°4ÔČý{í®#Ş!Żň<ÔRw*ě¶®FƬ.ÚŁÚËwĹúČwŐ˙IŞÄkSÂXŃ—Ż+t*Ʀ Ĺé°ˇ˝áoď`µv“jČńiçÁÚŤň\ŁIÇ7ĺą_­w­!3t+ doxší-\b?T¶ G—EţţöŞ_Ş!G¶-˙1ĆG"öŮCŽęy4F“€ĂmAíúSđţs,ŕ:ZŮĹŹpBKÝiŮŐÚKŚ&Áçqĺ9|¤^ţ†üÍH1äx{¶+Žwn§ü1F·˛@őü”Ş8oů_;1´ő«ŚŰÂűťˇ‹—fż/;AQhě!Kłž˙¸nÁDÇĆcCŮ1źÇÄ…•n„IV푿´+ˇź ú•ÁoMЦ˘ßŃăÓ{Âé°É ÜëKB~†NĹ$+D3ĺ čX†.P~AR~Y e֕Ȣ֫¦Im»ˇ(Ź5Á†GĐŻ4bđ9'´éůš {ŹŹř{˘ÇŠTNĆę˛býĽţ(†úa˝j Šo şÝŁcă14Đ?!!2M/±ĎÄkSB <YŻšĆÜÓzÍäłcČߌö† ňÜ­_eÄ Űy/TYi ÚmĹúhÓ3ĂźL1Nr¨÷†üÍxx$ëµú ?x¨¶›áŻoĎFµc߆˝ÇąCFmş;ŢxNGŹ‚ ŘëĆrŤ·ăŤ÷`ł4‡Ę&ś^žˇĆ_]´GżĘv˙·?˙-l–fźýÚ_°s¶r˙đ‚ę8îa;V¬ß!ďjQ^;^Ě-u§T',Ó¦P\q‰;î8utt »»}}}čîžşS±xńb,[¶ ©©©ŤŤĺĆ ˘IĹ@–"žč»b]IŔ-‰˘×ËH!ŤňůÉdC]řj—f˘gř–1ĺú(źźŞĐ†h˘Śeč§b\ÔPm&F“ ż@Mgđ'Úl¨včß(TŰőţÓ#Çč›H¶QŚ­«M7 ÇŇŚžöTźµĽX–fżž3·Ű—m–ć1˛7oüI–Şç»˛gśăF;ô«Ś>ÇPë› M…!?uÔí&śómŞ!í ‚î˙ÁÖKżĘ(i<ýŠś0I·ŇTĂ÷Ć41"Í| ÚTŐmëč˛ŔyósŘ,ÍčT™81\jűŰ Ű Çđ8¬á§GjKÁÚ}¨÷vÜhúŢ⇠1f«7 5Ź%Ľ-¨ŤĎj˝ť[@o\1fď[űź…6Ý€Cô+ ¦íßHŇ×ׇS§NˇŁŁcÚ×%11Ű·oGFF7 M˛4ë…ęMl&h˙/j#}yš)_rÔ>ËH·0*‰`Šh¦Y»ëţĺĄ|ď—«š7Bö¶0ŞýX´›ţ ·ZN¶P“ †Ëz­ťWMŢ/©ŁĽ%uD»ŽŽŤ‡~Ąú—ľ©’D„$b|Ü`_ĘĹşŠ[7ĺg ŇŘŃeAsÍa­C¸_ ů›e`ŐxćUŐ/ŕĘĎ?­a7ŹăogŽ. jo ¶Ř,Í>Ďëüf'Üă9ŢžˇăßTŮţMÇĘU×[Ůţăµ)>áĚŠu%ň5_ySu˙V.+>«ň˙÷Ô¦dŹ×–şSÁŰÍńýŠăBÉč¶Ůđ1¬'DPܢż¦a(‘yńI^ńlÝńSlÝş• -Ř—q·Ökő¨9T,,צôRUnĂ`ÇpÓ±ňÇwŃVü_ă?4‚ëU“Ď>l2>›ĄŮçPeąbůCŽl÷ľĂm?żµůqC´EG—%h[P®Ź¨Ge;m>TuY1ä‡˙ńM»4SÖe°óî Ű)ďÜ™Š;ę"ˇ˝)ŠGϬą.IĽ•^Ľ4u>7xü=€óţ§Ç˙ŕ—¸´^Ĺä¬ÄʦxĆőoBĎŠ·ŔĄá:¨Â˝ú’ÇßĂđÚąűĚöáíó‹ýWś;w.ŕyY@sţ ifţ&´7\€ĄˇŽíXľ® Ú ôŁóŞÉçB9˙ą3ňsň7Ăfi–źĂfi†6=1± rVöčŘř°Ć„śmî{đa,NY„Ś ŽëI”C¨QNćtŘđÖţg±|]‰ü‚ił4ŁĄî” †6ě=đcŠ¸ÍşĄîúoz{ë_¨”]R 9r|\KC ś–ŻßŘx8=>NţöŃ$ F“ŕs[y‚6©Ăť8n´Ăz­-Ăa’hßj_¸Ĺ×ÍšCŰ [eDLlü·¦n(;†·ö?+ÇßËĚß,{9n´ŁůüQY·ůŰĚč! xŚÝÎÎ*ĆÍáóÄ[űź…6Ý ÚÁ~9|…˛Ý({±{ĂŽT¬yîg2Ŕk˙łXľľDţűęÚ]ăZďMň·A”ŘGS 9‰Ťh˙ţűűŠő;`i¸€›7ÚŃxćUÜĽń'Ů6śŽ4ź?*Ű’ňł*Ź Í5oŕćŤ?apŔ‰gĘŽ ż¶çcĐí®>í¦ĺĘ)Yîšç~6ęvłb} , 5čÇĺĘ]>írp -WŢ”“÷ZfęŰeÔý pß#í;K–;§Ű×Íí¨9TěóÓŃŁžFÇĆcCٱ€óŹ~ĄQţ ~ąňEďđ™ßĂŕ€6ËhľćS^ăůßjŻM7 gř1 ŢŔ+§č%0 ô{ƨ(¤fz'ë“„YjÂş~ŚŽŤ‡éX9z, 3Óí^ÜÝŐ?|ţ‹Ń$@ż˛ÚtĂđ°őho¨Á Ű)ß_ůCŹ[Pž˙ďc˘ŢüŰ‚h§Cý¨9T Cţć í?3“Ď+Öď@óůŁptYđćKOÉá…ĶUž§˘·z$´·O>ůD5ť1߬ńţ}ýúudddđâa°± €ÝăÁ Ú ŁTf<|oąei÷^“‘6b ›´Ď;pčvx˙ţjčîŹ{äß_ EÁćżţÔęg!ĽĂ,…wL[0ó0%~!Ýu1± 21 ßš¤”bČQ˝Pž©źĂé°ů\đ{ż@Wŕ­ýĎrÓ¬  \^$Ż2¸« §_Á Ű‰ćóGѬö%wďqŐŰ1 ů›d/8Ń Ţ˙KÝDÚ°÷¸Ó.Ř­Đ9E{|zň)ż8Ş}ľxm 6”Ăű珢óZ}Ŕm«ÚĄ™2Ôu:lhąň&RÂř|Út6¬ĆĄ×^đŽoxĺMůĹZY·kw™Ń˝ciävVt°ďź?*{˘)o©÷·|] VíQ=ŠđAôXo<ýJŔkÄąh<˝cďµ_o;í?Ř>šżý€ę­ËE«q©rz†{öů÷îËú–Ż+‘aŻ˙2Út,·ßŃ3ęu‰ÄL„ľjĺ‹)űîŕÓLŚ]Š¸Ă ˙ąŞíJŮqŔŃeÁĺ×^Pm—úUFţz'ęSŚ1Ľ˛@) öŮĚüMH5¤Ęóě Ű˛Íľ{¬\ţpŁfuŃ|\w*h[*:XđÁ†˛cľç7żˇµâµ)x¦ě>ľň¦÷ü¦ŁÚż-¨ťĹő»˙2ţíTmťEů˙ř´şhśÚ.Ŕé°©~gvě "/q ÷ 7<ôú‰#ŻÄ©‹=HKbDÉŐ7`ŤšŃu»Ŕw}—?z˝E€+ÂŰ>'pó¶źô„ţśw†˙uëľá±kÓá h9ńŘôc á¶nÝŠOznábKćĹ'EĚç2äošĐ ťüí°b} >ľrĘçÖŕ„áŰ˝—X˙ŰS 9€J´m r†÷_F<ęs+×˙sxÇvě‘!\—' IDAT š Ç`ű~0úUF¬yîgňIµĺ ů›ˇ_iô-â7ÖśaxĆč`?¤¬Xż ÚTXŻ™ŕtô &6^ľG°¶=žçď'†üͰ^5ůk´K3±b}`Ź6mşĹ—`q•Ç’TCŽ uV¬ß!C.ĺ„B"pSÖŹň긓jČÁß˝Ń {Ý+oUŐĎ­V·ˇŽsŁ©«HUZZŠćOť¸Úĺšë#z›®X_"Ç0îm-öGďľ¶iÄ—+Öď€~•1`ŤQĚ?çŇpÚż~eô«ŚA×Y´ q«µĎ–!–Íß~ÚôLXŻš08ĐŹmŠO]iÓ ˛Ý×Ld»Q–ďß.ĹůžAĐô¶·p®QcbăˇM7„µ­Öî:‚TCŽÜźÄţćŢűOLl‚ęąÎŇPÁ~źáô«ŚŘńĆ{x˙üQ8=ňüáŽY]´GučńžúUFň7ăăşSŢýr¸śPűĽ8ż‰ko±LĚpH­üŃBíüćÓֆۿrýC]żkÓ (ţů%źvî˛â<.ŽĘu őy‰ć*1ćk—ÇϢ˘pGůdđőŃbiIQH\$&(C̨9U‡Ę×?Ľa­Íáýűúgô9żôÖ‘rč!ÉăÁҨ(Ů‹vwŮ)ĺńxŠ˝{÷θĎ÷‰}ż|ď îíĚi56Kłě=±ů`uD}a[kX„őYĽ™Ç˘ŮqڏŇÖ‡w-·ąa(býô©%xt†ÜBÍöFlołă{ř˙őĚü·~o4}-‡,(--ťCÜPÔŮLő€ëđöĚĽĆëSµŔ˛TŇ’Ł‘ć _iüş{›÷ż×?󨆴ţ’<<…eŰ“ÇMX{P=ü·Z^ȲD@ E°|]IĐŰý{׍W€ë:ő1`Mń ăaďpiZoĐąÖëu*¤%y˙yEˇĎ \ďşítŘÔǨ퍊‚ € Ţ@v€lpüŮÉÂ@–(ȉ/Ŕż) ·¬ĄˇFŽÍ•bČ™ŃđŃĚvŔZĹD\*Aě1ŢŢŻG!U+n˝gř:€Ľďřî˝á”=hýǤ˝ŕęđ?Ńs6 Ö`"1%ŠůŰ F1łsŞ!‹Ó r;áPLÂ0ł[ŃÔijj‚ŮlĆ]x{,fOăş´hVNČĺÄ>ďÁňG€Ś‡Ł†ÇéĽÁŰCżěiĚ‹KbĄŹDÓ¤˛˛}îŻ1čţ1Oü +„íŤhƸ Ŕäń M1Fě1Ŕ†<ŕé'Ä’Wě±Ooż˙·÷ńÁ¨(TxÓ;ńÜL‘ ŕżFE!iË<đŘcĎ3ŤpÝÝÝčî˛đŢÖBDÄcŃôrŰŃLrŔYŹ˝Š0öq˝%?ŠâФ*v·ÇěÓO§ţ`sxż ŕKŹkŁâ‡ÂÁfć0“_»ĺż/2ŚĄ‘Ą%e[ĽĽ ע˘ĐĘŞ ‰,ŃŐř S°}¸×#Q¸DoYe({ŢÉáH‡, `:V§ĂXľ®úUĆ9ńąť´©ÜxŚŕ1‚Fí.€«0Čnů/@ŢwY/46ŢIż<řŁŐ»?™lcµ¨b KłžÓaĄˇFń˙=¶864žywÝN¬ćN@ÄcŹDDDD4jŔ;<ďáä]4n[…?z§)ÁgîXČj Ŕ! hÖűřĘ)@Ľ6€7°Yš#ú3ż{¬Ö«&n|"#xŚ """˘1»áńČż·ţ€a,Ť_b‚ďĐd•¨b Kł^{ăŔŠu%2pQö†#"#xŚ """˘Ů*//ĄĄĄ(đř—Ý«ř;-‰uMCą/Ýau¨b KłšőŞ n' ŐýJďmČí äăDÄcŹDDDD4[%&&"##K1ń·~*&óJL`]ÓÄQ ú©˘ö\rŢ!ţlłˇ»»;ŕyŽ!KłščĺŻM6Ý€š´Ôť’Ď­Xż#¬r]XŻŐË˙ׯ,€6Ý€A·ŽííŇLÄhÂZ>A› ýĘ‚ŻčG‚6EN¸ci¸ 'ŠŃ$@ż˛ `2§Ă§ŁCý€ˇ~yëµ˙ú şť°µG—E>¦M7 5ó{A×k6Š^ń¬JŹCÎwxő@Š˝{÷ú<Ď@–f-§Ă†Îá€CôzKЦbńŇLÜĽŃŽ–şÓ#†-N‡ ¦cĺăI6ź? ý*#2×lÂĺĘ]€Í«‘jČńyťŁËÓńý>†Đ¨IŔňu%X]´'ๆ3Ż˘ÇŇŚś˘=Đ.Í„éxy@o˝ĆÓŻ`ůúČîgň1KĂ4ź?ęóţ5‡ŠÖĎŇPĆ3ŻŞöŚ ±^łŃ}>ŚĹ)‹‘‘ČFA7čvâüˇbÜľŮß Ű)–čŘxlŘ{ܧg\ţöx÷Ř>´7\€Ąˇş•Şëa˝jBĽ6kwńYŢŃeÁůCĹčGKÝ)Šň7ĂżçŁÇŇ mşE«}Ęl®y€7ĽńnuŃůą”ĺŹڍű#çg.›ĄYŽĄ¨ěůxo·Ő­,t^«—ŻSrtYdďą5ĎýĚ'he¬–3˛űű¸î” jüaí®#XĽ4dřŁĆ?hĽă8Šut;CöÂSűlT×)F“€ŐE{bČAJć÷Të†xŚŕ1bbŽ˘\#H)##‹St¸ďÁ‡YDloDD3ŽŮ…SuVŤÚŔ] ňmlúá` Kł’¨¸76¤’2<±4\x^9ąŽĐ"x'ÍQď9'ĆĄ·0cČß@L´lDÇĆ]>XŻ˝‘DߖܮȿܢŐذ÷xŔ„@D3ű‹:ś, 5Ş˝ÜB-Á(g%·Yša ňţţ7ÚÇÜŁm4VíŁË")G—Ž.‹wňźá}™ů›BöÚ#â1‚Ç#f†¦¦&ÍfÜ=üo2lýAŇ’Ó˙Ëű˙_ Fˇęm`Y*P˛HLŕ¶ {Zţ Ľýo÷‚Xx(Ţ Ł00Čú Yš}Ť~x˘ řmľţÚ.˝íx´”ďŻM ű–Ţ„ cMN† {ŹĂŃeĄá‚śÍ€śE]„OÁfv'â1‚Ç#f†ľľ>ttt&{TěĽďiZŕWµüĄßۡĂü÷˙Č5x°áŻ˘ĚÎőď[~÷ˇwżPz\ďAÉŹ˘»ěY˛4«86y+pfţ¦”†ÓŻŕćŤv9ÁŹ024Đ?ęuPŢBlČßI>­ďŹĄµ$Yň |’|˙®ËB’eéŐZŻ˝n=ëy•Ó—˝•í‹c¸P€kNŔóĎ?Ďť„sçÎś#FIws:J Č) DJr"˛łł9(DÜßČ ib€ź˙Ł}®t:z›˝Ú1婢Ľ«RĹlÖ<`Î4ŽŐxŃj nú\׊X@ŞŠÍ^&`Î4.ć5 dÉ«|ůŢaĺňÜ›úíŻ8÷ńMřč×/)ßűČĆ ­.^s5őWak±ôzZ˛ĽĘzO!áhomR‚ź^€ l­–_§ľâŞËب˘ă1÷ńďcîăßG}ĹUüţ'«¤×÷׏˝>lé6a2&îÄ9‚sç˘Q%vXŃm6Âh‚'p© "îoäít™@ć,)„Ëű›ăú’jéK­’B٬ylgŕ«.\.qů†ÔÂÂŮÄ ë@Ö7¤ŞXÎÜäU®ť“ß ŹŽĐb7<ä´pĎąwť®_®\v^ŤÝ™óÂ@nŹkďóXö׏ű<ú“·~†ß=·űÖMë7 W?ů#ö­›†ß˙dUŻAQôÔ™coHâÁ9‚sőC­6=ěyXô ëm&‹TA»ó đďo‰0|á^=Iާ¤Zę »ó đë?I•ŃÎaěÄ `ĺ"`Ďf)´gëY0Ť(â¸8h4·ŰČú¸śśäüźý°}q ÝÍu^ýZn\r,Ô3wŦ}OP¨J FäŢ=‡ü?ľî¸ŘZ,8˝÷˝)ΧAźŢűC—E|dőW•ŐŰăff Ëjé=O§~ŕ!Ç‚@ů|Ýă÷ÔW\EŤ}5ő‘ěYIś#8GpŽ `ďŢ˝¸pâ l_ă`q#"ňÎÁěĘEŇięÎŞë%ÄŰyP ôŚu7oĐj•*aż/bŰ~{sŕ1\ŹŹ6>Ľ¶•Aě@ÄŘ xníZŹmeزŔÇŤF+nNkńfÎČ`V"źůČw”Đăňűo*AÉŁ?řOĽý’¶ >úőK¸üţ››™¦újT_͇­Ĺ˘śvÜSüĚ ĚY± —ß?ŚúŠ«řÝŹ–bΊMĘŞäŐWóqůýäS—yz׎Etâ ĺtęß˙d˘g`ΊŤž:Oţů˙ý+éy=·OţH Uz>ݱÚŰ’8GřÂá˛/rŽ ;yA"âţFDäŤÔ*)ŚÓe (6J­ śŰŽĘYĂŇ˙SămŞâŁäŤ6“E ËKŚ@±QDu˝\ýęŢvr¸9Ó€ĺq1·ˇĆ@–Ľ‚ĄľZ958*qĆ ú-ĆĎĚ@xtšękP_qUéť¨ŠŽÇ“˙óm|ňÖn%¸p>exΊM U!˙żĺńqŮř/Ęí¶ ň˙űWČďqźŔp<ů?ßňĘ·ćK Mäç-ź˘˝đÉÁR_ŤkźĽ K}5>úő‹nßÝŽ˙=â}+‰ĆÓ!‡™ś#Či5Ň×ÚĄŔĺRŔwů+Ç"`2ąç¬ň÷w´­F@ŞFZ@ŚAßđ2ÖŐő@q•’jˇGĺkď!¬6 u '˛äl-dŘĂŤř»čmřč^Aµý\›S5[ôÔ™xňľŤúŠ«¸as‚BÂńŔüoAŹ‹NAKtâ ·Ç]řäŹ0ó‘'pő“w•Ç—#~ff>ňŹ‹Í|ä ÄĎĚčót`Utśňš{Ţ/~fľű§đĺű‡a©Żq—Gđ f>ňܸôę+Ż řyqŽŕÁ9‚#$Č|Púě A}%€·›Ü˝ęzŐőŽ Zµ HŤˇ‰ ‰aí˝0ÖmíR8n¬“*`ý_{W㣥ÚćL41 aGYň ŃSgŢSYüĚ —0˘çĘćyüŢ Utü Oëuî/Ů›ţ7zęL<úWüš‰8GpŽŕADDDDĂMöA9=^®Î¬®wżżÉ"-•×cŤŰÔx)ŐÄHaH°tÝx×jŞSŁ4vĹU@«Íąő€3ĎájJśm‚˝J9šřh` KăR}ĺ5üń˙űŕ»˙qŞ× ĹyÁ"âÁ9‚hŕÔ*éK>ő˝Ő ëíýK«D”Öô^Ť)·9¸ü•çÇÔÄHA˘Z¨#\oófr•«rŮ4E,ŚőÎŻÎzljAR˛6A@jĽ4n}ÝźFY—śOďÍ˙ăëřÖ?ý§[uŰGż~IYA}ć#OpĐ8GpŽ """"ş!ÁŽŢłşL)4ÖI᫱N «·>´ÎLéËą/­'Îá¬Z%"*Rp\Ń÷÷ŢM%®\ąÚ9`•/·Ú×Uî=h•őßvúżĘaµ&V€V#Źر†,ŤKŞčxĚY± —ß?Ś—>BőŐ|ÄĎĚ@Tâ 4T^SVP€Ź<1 Ó‡‰sŃPËĚĚDjj*j_}‘>řú41rŐ& ‡r%­É"ťš_\%˘­ÝsËŢČÁ­óăŽ]{~)q˘˝ťÔwwbr3xő diÜzdăż (T…/ßűl-ܸôn\úHą=0$s˙ţ {?ç""""˘ˇ˘V«ˇV«1žÚ|Ę•´2ąš°W–Ú¤¶€ş6E岧…ÄĽI|401Pz=R[!ÁŇőŽ~Ż ^Ç:+€:–ęjD††BŁŃ¸ÜÎ@–Ƶ…OţsWlBőŐ|—•ƹ؍÷ś»ó§†!#‰+ÂçâA4šüÂc8wÖĚQ#%n„űѰ«iµ.ů–{@)·P]ďÔ NT.÷¦ŻŢ¶˝‘Űô÷ÜC‚ĺňÄ űőn k1pővµŢ€wŢAJJ věŘár;Y÷‚BUx`ţ·đŔüoq0Ľ˙¤DĹEB«Us0sqŽ EB@0ü'%@“t?4±!"îoDŁĘѡ˙đvöL: ‡€hd°BÖÇeggŁ´ćN\6Á/<†BDś#FŃöíŰ‘_nÁĄŠf÷7"""§Čú8ŤFk ţ•ˇ "âA4Ę´Z-n´›ŕßhć`q#""˘qŠ- FY"""""""""˘–DDDDDDDDcTnn.ňňň`f˙""ďĆ@–hŚ2™L())$p8|Y"ňj¶/ŽáBq®}8Ď?˙<„8GŤ’îć:t”S”äDdggsP¸ż‘ d‰Č»˙5a2&qŽ Ub‡Ýf#Śf x—Ş âţFDD˝a KDDDDDDDDD4D‚hDńńĐh4n·3őq999(-«„ÍÜŽ ©Yđ ‹á ç˘Q˛wď^Z:`kéDĐ7Ös@¸ż‘аAłv-&Nźîv;Yg4a¬¸@:­…sŃč‘ä "îoDDD4~±Ů Ńa KDDDDDDDDD4BČŤ˛DDDDDDDDDD#„,Ń ŕŤM™™™HMMEí«Ż"’ĂAäČŤQjµjµÁ "ŻaPŔR]ŤČĐPh4—ŰČҰč69ă!X5"?+pî:Ěź†Ś$žsqŽŕáŘi…Ř\Ď o<ďsaц˙0Ţ/<s×aÍ5Râ&qŕ‰¸żŤ[µŢ€wŢAJJ věŘár;YşçĽî†ŻĐu» ] Ą@§ŤB.üŁSŕ™˙č”a `ü'% *.Z­šÍ9‚8GŚű9B´ZĐm®BW})şęKą‘«€ řO’ö7ż¨iCŇ Áđź”MŇýĐĆpĽ‰†÷7""/˙łŚC@w{Ŕ×Qv]·Š8Ô'9č(5Ŕ/R€¤oÂR†sç!ŢçÚŻţ_VźSß:m.a˝t &¤dŤXĹ:IČú¸ěěl”ÖÜÁ‰Ë&ř…Ç Íßňąč¨şÔgĄŰ“9öăŮí6ŕN›űőÝf#Úżü9GpŽŕ1nçíŰ·#żÜ‚KÍCňxb§%gúüđ#8ăt;®ŐXk§űőr8ëß,LH]6"m Ľy#"˘±íßöřńÓł© ňxźĂďŁňf3~şuŚFY§Ńh` TĂż2tHúl_sëC7E% Că‡ijq*NŠŰmŔW ݸr«Eµ˘ëŕť*}c=üÂb8Pś#sĸ™#´Z-n´›ŕßhľçÇęn®Cű•­Ť.×ĎŠĺ‡Ů÷ůaňDngäşĎŢęFiCŹ}îVş›k8ó˙ń©}n(÷7"]żóÎݲÇ[÷ąG’üđH’j,"Ţ-ęÄŤŰŇmbs=l_Cŕě5lBDD^çđ‰D„bj\8ö˝YÔk K4V0Ą~ÉUoÎ}kôÇ#I Yhpߛ㏥É~x=·S9u˛ăÚ{&Á?*…Ä9‚8GpŽůYpđ˝9}+ĐiŕâTžËś€ĎŚÝx÷o]Ň>×iC{á žÁBDcVDh Ň’˘<ŢVY×„Şş&R¬'WĘĐŘŇÎô1f‹ 2T`é‚ű±4c ţÇž<śÍż‰ĄS884f1Ą~9-ÁŔćů¬xŁ{;üiV^ĎëÂM‹tşdűŐ÷xđÇ9‚sÄ(€ŘMšlžŔv t×hü!8>é´ˇýĘ -Řčs=e‰ČűĄ%EőÚj`÷±Ď°űŘ%čő>CÝú`¤äćć"//Viö/r8ůqĚ–v¬^>KHěko*=›ož,AEŤęG†aőň©Ř¸&Őĺ~˙¶˙s<2˙~›Ú±ď­BLŤ ÇO·ĚCEM>ąô56®NĹ'—ľĆáĹ€9ÓŁđÓ­ß@¤*ož(Q®_ş`ŠÇ^·ož(‘^O“t|µ:Kz˝őÄ%ďĹ#fęSűŐ÷”~ÁŔs™ ZčžMś ŕąEţ"ť6´_ýż;­ÎDś#úĐó—0ŚĄ{§đâ#l/Ő­Ť.UŘDD4şL&JJJP€]±Ýí{«áX˝|*¦Ć‡ă‘÷ăO†JTT7 čűż˙ĎgńwOźĆ_ňo"",˘ü%˙&6ýä¬Ň—VöňëźăĺýźcÍ–q6˙k~·pöłŻńňëźcÍÖ±é'g!ŠŔťĆvĽöf!ÖlýŰvçâÇ»s!Š@yu“r_gŰvçbÓO΢ĽşIyŰöäaîęwa¶ŘřFű5SŻşJ]Vm~âAôŃ™8AŔ÷ćř;ţšëŃQrć®Â‰ 'bďŢ˝TÎÄ9§玲 .‹ć=—€‰¸ĎŃĐU©"ÝdŻj=|˘¤ßďż|­‡ß-Á·łQqć»8ů_ŹłGt¨8łáŘ÷VˇŰ÷|ňŮ×ř_;A,ţG|yň LŤWn+Żn—'źŔŮ#:\ţÓwđČ‚űĄŕöD .źüŽý±ż‹og%âlţ×.ˇńľ·Šđí¬D\ţÓw”çńżv.B!Äf) IDATEMÓ€^ y˛Ô«ÎŞż*——L°@ĂÍ…†VśJp9řëşU4č ¸nł¦šr””đçâáŰs„s8¶†€Đ0¦öĂß§řyśçBě°˘Űl„±âŚF#”hq#’Ľö¦nrj-°zůTD„âÍ„‘áAřéÖyxmg¦ëőŞ Ě™ˇ†ŮâŢs81.LY4lΠמƛ֤ş\·tÔ6aŰĆŮ.Á­|ąE‚\ŰŘÜîR »iM*ţňÖJ.RćŘC–<ęşS…nłô‹=8@Z)ťh¸ţÜĄ¬ňÜYőWLH~Ă9‚s„“ÎŻ‹”V“&‚‹ćѰYšě‡ĎŞ»q§Mj]ĐůuîźĹ!"˘1éÍ“%TB€O>sôž;SŤłů_ăäÇX˝|jŻß?5>/?7€T-[ył—Ż™půš ×Mžż'.Ľ÷ߣ˝ô­ť3CÝçëT)Ő´IYǰtÁ,]p?ľť5•‹“y©`QD`|<4ŤŰí d}\NNJË*a3·cBjÖ€DéúÚqňěűüxJ$ « Ť?nÜî’B‡ęĎČrŽ ňŮ9bďŢ˝0µtŔÖ҉ o¬đ÷u/9ţĐOć 4|&N° Ţ.íV¶=o dďv#""ď /ćK7śöxź}oöČŇB]Ż˝Y¨­Îíz>ĆËŻŽôéjüîçó0gşZąďŇ zTÖ4ŹŘë‰Táĺçćáĺç桢ş ' R({6˙k¬Ůú!ţň–ŽoşáQ4ąéşSĄś9E%°GŤŮ±Ží¬«ŽPś#8G(Ż×éY±+ŇiŘMž<0Ůi¬˙ŠBDDcJEuţd¨Dút5V/—NëďůµiŤ€ŁĎ¬'ňbY'<ŠŐ˧ş·#Ć^ľÖ€ď˙óYśÍ—Ú.LŤǶŤłqö‰qa8›˙5ßtĂ@–ÜV‹ryb€ČˇˇuLG]wŞ8 ś#8GČű\{›r™€ĐH™¦věsb›™BDDcĘIC`Ó©˝ŢG^čëÍ“˝/÷˝ÜŁ_ě÷˙ů¬˛ŕÖH ÂáwKđ?~žç˛¨WEu*kšńČ‚űů¦ű˛ä~ŕçôG·óăDĂ)NĹ1ŕAÄ9“îć:ĺrH Y…yżÓĘA "˘1eߛҚrčęÉÔřp|;+fK»ŇŢ §mĄ>ék¶|5[>Ä÷˙ů,’–ÉŹ*đí¬DRőęp›Žźnť‡Ë×LHĘ:†ż{ZŹż{ZŹąkţđ@Ľ¶sßtòD4&¸ś‚kkâ€ç;çţÎüđŠFJJ”€?Ű»et7Őq@hĚ0[lظ&SăÂú]čjŰĆŮ3#Jąß¦5©Xş`ŠrűśQřňäxíÍ"TÔ4áŽĹ†oś…MkRQQÓ„93˘\ď§[çaj\ŰĎYşŕ~`ë<Ąâön®ůąyXşŕ~>Q˘Tçn\ťŠmg÷Ú—ĽYră|*hJ+qhd8ź†+Z9 ś#8GŃ€íZż»Ö/čó>iIQ.˙’w’żąź¬lÓZ·űĚ™…Ă˙±ÔýzU[ ŰŰĎíůs†úzň= d‰Ć±Ó ±ą^şÜa…ŘRďń~]·+ű,«˘µBp„ŕţKůý''zĽ^Ť†0!Xş ! o^Ůü0[ÚxOŹó‹g{ĺëĎĚĚDjj*j_}‘Ü|Y""""""Ńm6ş¬bG›ŇvÂ9„˘µq@g1t›Ťz\çpÖ/<„‰Ňőö×/RĂ7ŢÇĄ'GßŐ÷™›m ňúׯV«ˇV«ÁŹ(Ľ‡@Ku5"CCˇŃ¸ţ®b KD^-pî:Ěź†Ś$6V$"ÎDŁÉ/<s×aÍ5Râ&q@†‘\‘*…ŻRŕÚÝ\tÚ|óő6×C´_î5Ě ‚_XŚŘúEj\±ËýÍ÷śÎ/ţb9Ž®ŁőÔŤ¸ZoŔ;ď %%;věpýµĹ!""oć?)Qq‘ĐjŐ "âA4Š„€`řOJ€&é~hbC8 C@´ZĐm®‚hµ ëvĺV¸N ⣤3$Đĸ÷… âP¨U€É4  Íwu=Đjużľ¸JT.—Ö ˛O}§ ÝfŁÇŔV‹†ßÄHř…ÇBVÁ/2ÁëZîoî*k-8zć:ޮٞދŃŘĆ@–h čn®C÷#şÍUŇ"šwQń L ˇMpš©öł$C‚MŚó˝‡vqNµJúꏶ—şLĎϧŘ)c-qş\\%˘­]@u?µŘ\Ź®ćztŐ—: ÁRü"ŕ7Iż°n€^ČÜlĂéür=sç k\n‹ „.#™DDcY—ťťŤŇš;8qŮżpţ‘ADś#FÓöíŰ‘_nÁĄŠfŃ8ßßäj×nł]·+ŐWur¸µJ WC‚¤jUŞÁ§ŢKç×ů˛s€+Wç–V«cPÝ  ­·L»Ó†®úR—Ö/R˙ɉđ‹Ô°/íWPVŹ˙Ň_ţb9Ě-®oňĘŚ$lČšÝB†±D4v1őqŤÖ@5ü+C9DÄ9‚h´C­7ÚMđo4s0ĆáţÖm6˘«ľ]wŞÔz`b/B#@#ý_ $ľÁ=ČŐąžĆ§Ř´Ůc`¬QRí9¨íŮň@‹†˙¤řß?›´c@e­§ó˱˙T[K‚´¤(lY•]F˛O,âEDľŹ,Ń0‘«0»JűmA09\j5Ş4Ńr{†Ż÷J®Şť3Í1žĆ: ¤Zú·¸JÄí&÷q›ëŃŮ\ŹNăçR‹¨řGK_4˛^8xôW\®KKŠÂSYÓˇËHBb,ď$"ďÂ@–huÝ©B××Eý†°ńŃR¬6AŞ€U«ľŽMŚs?]&‹#ś-©öĐ—¶Ó†®[EčşUägďź˙I ĚpĄĽAąśž…˙ýŁeHOŽćŔ‘×b KDDDDDtŹD«]·ŠĐył˘µŃă}&‡‹3 Đ&öjM€°cÜň`Î4éýhµJ´—żňPAëÎ Á2ţ÷Í‚Ě*Íá’ŁÂyÜ”5`ѶăĐ-L‚.#+3’|ľMAnn.ňňň`f˙""ďĆ@–č.‰V :Ę.H•“ÄGK§ĘĎ™hbľz‹`Çű0ÖąEîŐł˘µeĐQvţ÷Í„ä‡ĚßnË–Ui8j¸ŽŁg®Ł±Ąú‹ĺĐ_,GdhÎ.LÂĘ ß\ČËd2ˇ¤¤Ŕšl"ßŔ@–Ľší‹c¸P€kNŔóĎ?Ď!"ÎD٤»ą%ä”"%9ŮŮŮ>˙z;«ţę1•+a3g N§Ĺ“7ÓÄŮËýgs‹Dä]u]L®š‰`vĽíožŤôähüâ™Ĺ8b¸ýĹrśÎ/‡ąĹ†#†ë8b¸ŽÄpč&ă{Ë´li@DcY"ňî!ł&3`âPç˘Q%vXŃm6Âh‚'řůîëě´˘łüSiˇ§RâD,HPN{'ß$‡łŮˀܿ†ĎáR5+łšyHú&„€`îoClCÖ lČšĘZ ôůĺŘŞUuM¨¬kÂţSŘŞéÉQxjŮtlY•ÎŤ–ƲDDDDDDĐŐPŠŽ’3n=b=č2ą(×x”ů ôUlňţ&}É:ŤźŁóë"Î|ţQ)¬a«ÂÖUéŘş*eő8pę ôůehliGAY Ę.0%˘Q @#ŠŚŹ‡FŁq»ť¬ŹËÉÉAiY%lćvLHÍ‚_Ď™""ÎDŁeďŢ˝0µtŔÖ҉ o¬ç€yŃţÖQjp«Šu±ďńN«‘ľt™@Î7ěá|§ íWN`BňĂšÉFéÉŃří¶,›†>ż G ×qľč&†FE,€ ‚€µk1qút·ŰČú8ŁŃcĹ Ňi-DDś#FŹĽ yĎţ&vZŃqí}tŐ—*×ĹGk˙N ŕś©UŔW ¸üpř(=f;Ę. »ŐŚŔ™Źs†YdXKK"˘±,Q/z†±‹Ö.B‚96Ô»9Ó€=›ăgm şnˇ`(;*k-8_t•uŽŔ51F…Ĺł¦ 1ÖQ˛î|™h,a KDDDDDäAGŮ—0văcRżP˘ 6=&U͞Γ®ëşU„Ž`&$?Ěş •µĽřĆč/–÷zź%łă°sÝ|,™Ç#˘1ËŹC@DDDDDäJ´ZĐY‘«üźa,Ý-]¦TY-ë¬ČEwsf Ęę±hŰń>ĂX8WXÇvťÄĂ5ŤY d‰zč(» \N@dK÷dÓc@Jś¨üżłęŻ”A07۰bן`n‘šň®ĚHBÎθvprźvŻĆ®őóxvßś+¬áŕŃÄ@–¨‡®[EĘĺěe„îŮň‡ŰQWC)ÄN.¨:PôJ›łsŽďzş…É.=b—ĚŽĂ®ő píŕÓHKŠĽôĆŤI d‰śtÝ©R.ÇGK=@‰îŐśiN˙é´ˇ»‰m ętľÔ¦`×úůĐ-Lîóľ‘aAřÍŹ— ĘPYkńúן™™‰íŰ·ă{Ňą9ů˛DDDDDDÎÚĚĘĹđ‘ăAC&!Ći{ęjç€ PAY©UÁ@¤'G+U˛•uM^˙úŐj5´Z-Dps ň VUľŞ®†Ńht»=€CDDŢ,pî:Ěź†Ś$–®ç˘ŃäŔąë°fŽ)q“Ľúµt™Nµ·ůŢŇĐąĺ´=u·šáĎýmPŇ“Ł|_ą—lŁ˝ŐŤś6îv¨đ6ĽóRRR°cÇ×yśCDă…ĽÚföž÷Ľć9››mzMă™˙¤DĹ%C«Őr#'Îś#8GŚ‚ĘZ ^ďóě>W%âÁ9‚FÍ‹‡.`Ć3G°˙TĚÍ6˘sł űO`Ć3G\ÂÔÁjliÇąÂ倮”7ŚČ6˝˙TÁ|8cn¶aÝĎßÇąÂ4¶đÔĺ1%Ŕŕtv Čý‡„î]ϰKÉA ąýŔŃ3×t˙s…5J«ů{Éł`§Ë—K٢…†FžÓďMÎtž1őqŮŮŮČţţÎ]żđq;ćfŽžąŽ§–M—~‘®ŹéçŰßgAY=mËń‰~HÄ9‚sçńdűöířćšÍś»Îë_Ë Ďc˙©¤%EáÚÁ ¸¸/Ţł÷eăÝ«g÷ťńĘôˆýgTÖZ°â_N*}ilďo§sV+ǔ8ţ†]wK·Pę»űŘ%8UĐďľnĎű€§–MGd+dű˛Ŕ©zńĚ—LŽ Ý›ÜżAŮŽ‚¤qHâmÇQQkaĺ–—ěo& °÷¸ČP–îJ«UÚ~ÚlŮ{ŢĂŚÍo!{ĎűĘßAŻl~׏D¸¶-řŻ“śëčîőüđi>‡¤W di\ř/ýD„B·0YůtµŻÓ]ô˰h[BVŔ”ő‡đ⡠J/Ćž=Ţ*k-xvźSÖBČŞ±ů-ě>ö™Űcľpđ<^8x•µdďy!« dŐdďyĎĺ1Ź®áĄ7.HĎŃpÝĺg^)oŔţSxjŮt\;ř4¶qŽŕAŁŕŔ©+€_l~¸×ĘŁ%łă°ký|lŃą×EČŰ®óv~/•´G ו}ŇÓ~Ó×ţŰóg?¶ë$ ËM(,7ąő°u|čářţg÷Ü>Ŕ9b¸¦ě›36ż…UđÂÁóĘí»Ź]Âě$5.ľ–­Ě94vŘW]Ş®ĘŇ Éa>ĂŘ{„‹Że+b‰ç÷r—6Ţ˝šŐ±´J Źç:ş[Ć:×ů.ŔKď_pČ×UÖZPPÖ śŠĽ!k^*k›pôĚuś/Ľ‰Ľ×Ö"1V…ÄfOŤBAYb±xÖDÚŃ'Ć„#ďµµZ]”8G µóER€ąxV\ź÷۵~ŰuŮ{Ţţb9Ďš‚]ëç+-CôË‘łst “ő\žÝgŔĂuĺńä}ň|áMüaç ,™íxŽ/<Źú+HKŠr»ďű»żŤôäh,ž5öEcĎš‚Ä•Ëţln±a‹. +3’PYŰ„#öţÓyŻe+ţUuM8WXúŐö`Ŕynř`÷j—çEcŰS‚Ž bç!?ř¶­†cC}3|čs]ĂŘŘÉ@ÝŽÍÝ Â»Wăč™ë¸âˇÝKDh ĎŠna6dÍŕ€ B€'a_Ţ>×ýěđošŽ lľ;ţć;•(â).ćŐ'˛äóöŰ{ =•5]ąN—‘l?ř+s9đ37ŰđҡO‘Ž‹NVO-›Ž…ŰrÜűĄ7>…ąĹć€ě>öv»„#†k. ”5`eFŽďr„0‰±áŘ}ěôůĺŘş*]9@;zćş˝şhÓ}UHä[JÄ9‚sŤ2ąi°•GG × żXî¶ťo]•Ž…Űrđěľ3X<+nŔŹ{®°G ×Ýo‹.3žy Ďî3ŕÚˇ§Ąç\kÁý,ž59;W~ĆĘŚ$,Úv»Ź]Âń]Źc×úJŬóţ%ďĎ=Că´ä(Ľxč^€ó´0PĎ? äę˘Ćß,"Îś#ČkÜMK ýĹrR«—DzďCć›R};ň>$W»ö<`Ż´WŞJűoą˛Ź9ďżéÉŃČŮąÂcEĽĚÜlù¤%EąUđn]%ő5ôÔćÄůocë”NEčîFűŐ÷`Íý şJÇĺö>gđüZ`r¸ăD鼫ţý-§ó¸ŕ9‚‰ÓyŔżż%ÚĂXI|´´ýd>Č1˘»Ü¶L&”””  €yN*€§ U7*ż»sĄ`6ďo|ȡŐ*Íw;ş†±QÄSb9Dýb…,ů4ýE)<‘*ĚÜ{6žÎ/Ge­EYڤĘ^ń#źžčlÉě8Đ_Qţ/W–7ŕ±]'=ţüÂr“Çǡ!<`üâ.ŕÚ‡đüóĎs@sqŽ!ň6>…ĺŇ)¦=ÝÂdĐ_Á•ň†·-¨¬“Z ĽôƧî·ŮŰ\)oŔ’ŮqĘé­ž»ëďç]±?ďĹł¦xĽ=-)JiSŕĽ{š+ĽEP€TŐ'8ťn(ZŃ~ĺü"5Hú¦ŰbYÝÍuč(1 §,)ɉČÎÎö©m^üëÓ _Hˇ€T-«Ď>ţ\ÄśŔŠŮqŞŘä‰Č»*ď/Žýfĺ" ëC_]čëű[Oeő°´¶ßŐ÷ö×^‡\ĹŘ,ř@ˇý:“E:K@ź+UĚ.⇠ă–É"…óîÚŽ%Ň^Kئ`ŔČ’O“«Ű ĘPŕˇĎ ťöë©Ç]Oˇ®§OĘŐj"ěŐ#nżř§đ Ýf#LfŔġ ÎÄ9bĤ%EáJyËž÷±k„‡”ÚTÖ5!=9jČž‡Ü—ŐÓ>–Ž„pD„p„·÷’öÖJÁWĐEQ d˙ »:”ýŞýË?Ŕ/R ©Yđ “ ŠVt›Ť0šŕ ľy"^H°FĚ™äśQZ#ŤO›M@ŢU ďŞtۢĄÉ·ĺý Č-rmM K‰ńýLJ/ ű›ł]Ŕů˘›wő˝ăˇĄÁP  ÔA/аŘČÁlÎ_D,ź'`Ńŕ‡Pㄱ0|îů§QÄŁ‚€Ó 0%źUYkÁéür¤%EáâľlŹ·ĎxćŽa‹\5ăéÔ`ą:¦gř˛!kú€Â"âAä+t “pĄĽˇß,^:$ő]˝ůűͤ ··?äŔt0ä°őĎ{Öô{ßÄÎă¦R1ëL®Ľę­ŠJţ9˝=wy˙÷µ wçęX˙¸ą€_:+r•ëşÍFŘ>; ˙űfaBňĂăjĐÄ;Ö (6úOÁ, ťşyů+`bT5›5Oŕ˘8>J\ľ—ę0%“z k5¬#ď—`« ŕ €sNÁ¬|v€>HŤ—úl§?Ŕ>łľF®†Í-’.÷üŕI#ŠX"HdU¬GÁö1 ŚŹ‡Făľ(Y—““ҲJŘĚí. ăÜ+NîéöË%V…•I8ť_®,¬łÄ~ v@ĹíôĹÓöÇ“Ą%I>ÎaŤrPYkÁc»NbeF~ńĚbnÄ9‚sçěÝ»¦–ŘZ:ôŤő^ű:¶čұ˙Tśşâ±O2Ľpđ<Ě-6<µlşRY*WÖö\0ú9Ň"[•–…óE7ÝČ€|Í€Şú&üç?|éÉŃX<{ ŽžąŽóE5nÁé?ýę î4ŰpÝľXOéÉŃ Ä…˘›07Ű\*eÍÍ6–›<¶Bđ%]Ť7<ď»’†Ž˛ čşUä¸íVşnÁoňÔq·żi5€ÖĚ>QpĂqPę\5«V©ń"椬śőB—ż.—:‡°îáâĄ/±Ăă•Í+gEôć|Q ĚÍ6=sŤ-ířÍŹ—ąýn »“ ÍC0 HârĎä9Ó¤/†łŢËX'˝źąE"Şë=Ďg b&ŔA@ĚÚµ8Ý}]˛ľľ3Ť0VÜ ťÖ2ž°Żś®Ëč˝/܆¬é8ť_Ž·ĎcCÖ eQ‘Łg®ăŮ}üP—Kk;öź*P‘E†a‹. ôWđě>ţóFdXĚÍ6¬űůű¨¬kşçJ™+ĺ 8_TŮSŁ˝’5çÎ4ö”””řÄë Â+›ĆłűÎ`Ĺ®?a×úůX™‘„ÄXÎŐŕ¨á:Ž®#-) Ż8-ŕµký|űţs 1áJ»űŘg8WXĹł¦x w{łuU:ŽžąŽ—}ŠÄ•˛O1\ĂŃ3ŇĎ—O—‘ŚÝ1—pŕÔ,ž§Üw÷±ĎPPÖŕ¶0Xaą ç‹jŽÄX¶®JÇîc—đěŻ řÍŹ˛”ýyĹżś„ąĹ†ť=ľß׍ŇѶ¬BŕĚÇ!&?Śö’ŹŃÝŕXÉŁűv…Ňć ««k\íoZŤÄ™,Rx÷ń_EÜnr¨š,Ň"`yWˇEŞFŞ´MŤçÜ8ććęj{(at^¬Ć=x.bůCÍdř4Üň»Až×w­_€ÇvťÄłűÎ 24hŔ}É©r0[ Žł2ů Ź‘9K@j–«Âo~Ľ /ş€EŰŽ#24f{ Ĺł¦ gçăz‰±*üöÇYřÇ}<¶ë$"í­BĚ-6$Ä„#gç ĺľ‘aAČŮąŮ{ŢWî+˙ě§–ą¶‘+o˙~§TÉ~|×ăص~ĚÍ6Đ_ţâ!·ýyëŞtź|źťűČv7×)gRÁ*Ą=®;Uč(»ŕlí÷­¨¨ŔéÓ§±rĺĘqµ_¨UŇNYߤSŰżŠ«\ĂŮžˇ …˛Ú)¤ŤŹb¸7’Z­RQ]W9*ýz39\Äś˝)‰¶ IDATi@ć,š†c‘üˇác»NâĹCČTű×·”ř @]ŹűT× 8ţĎó?ý9ŻÄ{Ż‚•Ą"Ň©Ň/yŕb K>)1&ě^= E6>Ř˝Úĺô—Ȱ ßő8*k-¨¬kBDh Ň“Ł±űŘgna‹|ß‚˛zĺ¶Đ čěUBÎz;Í&-)Ęăs˝vđiűé6¶^_Ç@NÝ!"ÎDĂeCÖ lČšýĹ2ĄŹjDhPź•®˛f@—‘ }~Şě¦sĹj_Űľ§mZ·0×fĹą<^ZR”Çđôäh\|-ŰĺľžZ.üâ™ĹX2;WĘ”ö#ňőOŮ+çűÚźźZ6‹gĹą|oosßŃÖm6şµ¶ńź”˙yß•‚Ůë@l3K÷íî†^ŻGnn.t:-Z4îöM °é1ÂŮÜ"%ŐŞëÝďë|Ę/ U–i5‚REËEs†ŽÉ"Ćş…Ňű!µś`ë=äŻ+ëšp®°†\“`Ř«f4B g+DĄ‚»ľćąÔx~5\ŚuňNŇď “KË~÷÷)RŘžh˙7!ě°a Kľ¶ÄŞú\őąçÁ™¬˛Ö‚gu[tiĐ-LvyŚĘZÇAž§Çčď4šŢnŹ ňřBdXPżŐ69­“8G ÝÂäAU E†őŰ×ĎÓ¶ß×~2Đ>˝ooŻi űó`ćÁÜw48/ěŐm©íő~ţ“€éʎýË?¸TŐšL&>|z˝k׮Ŝ9sĆĺ>˘‰˛—9V)/6Ú«“sGĎ\ď÷ôf"âÁ9‚|Uwsí€î'bcca±XĐÖÖ@ fýë_#55+W®„V«·ă¨V™J_€ ś>*W/yŞ mł öĘ2÷ęřh!A´ Ňc«#ĆOŰV+PÝ`_mRËV›§Ş×ľ ąV› Ř+őľz;słŤaě(r®ś…  @-¤ęŮ*Apko8zm{š3Ő*©ÝÁÄ ÇYăőĚ}Ř„>ç<Ť("Qiá©`űűD#‹,Q9;WzŃ9[™‘„ßţ8‹DÄ9‚sŤKbs=ÄN+„€ţ“>•J…źüä'0 0 J0[RR‚W_}©©©X»v-4͸×`yerÇÁ°smiMßÉrřč©÷©\Y+˙ąÚL5€±Wi+‡ Ęe›}LŞ÷é»Ďkßă51Źr­€Č÷‘÷07ŰđŇŽľćiQEĂ+Âţ•jý¬Zą‚ÖŘGh˛Ř[ŤxŘďĺ^´šLJPńŃN—˝čĂ)ůuŽ*WŔşŢ͇M€Ô‚@®~ŤÔŠ€áëŘŔ@–¨‡%łăpýĐÓ8WX+ĺ HŚ GZRÔ>ť8G ż @w »©NjO0!!!ĐétČĘĘ‚Á`ŔéÓ§•ŰJJJđłźý ‹-‚N§Z­ć8;ŃÚB]¦ŁÍACŁÔ°Á,ÂX']ç©Ý3Ge­Äy!±Ţ ´ňLŰĎfĐj•ŐÁ„Cir¸¨śň,źÁöýľ&&NśAÝ6Ü^8x…¦Ý÷JYË‚‘ľđ7Zff&RSSQűę«ôí,R0čVBZ¬ RHk륒֙<żő·ŕ8›p p=I˝‡Ď [­đx¦LX>˙őşĘ4˘`±‚€ű UżF8Ť1Ť- d‰ú]Řüť8GŹv{ {§jŔ¬Lf333ˇ×ë‘——§Ü–——‡ĽĽ<,Z´k×®• Ç(§ějÜĘ‹ŤR©Ň6űŻŞíË@Ň #!%Nę9)‡ŻŽľ“ Ëh4bďŢ˝HMMĹřC·Ű÷»ßˇµµ;věń}őJyÎÝÔ÷D„â•ÍűĆ VC­VĂ—;‘(-ŕ ÖBި­„´pYQ'° ňńť«KGwţş·ąI%Š€¶N˛W˝F‚ÁëXd…ô!Ąş‘ˇˇng1%"Ż8wćO CF«‰sѰđĐčnî˝^É/<s×aÍ5Râ&y 6mÚťN‡śś(·ĺĺĺáňĺËXľ|9–-[Ć`väSîçLs?ŘoµF{Ą–ÚJ׋Ęĺ¶vˇĎj®‘ L ČUlŽŠ¶‰ö6íšhçę¶ń@ô·ż –ƶµµˇµµµ×űŐÔÔŕ—żü凲žMí‹na2žZ6ť˝ý}@¬ýßža­ÜöÂZYĄ(:¶k/ (ÄŘź»\ĺ !-şĄŚCWŻQ ŕmx礤¤`ÇŽ.·3%"ď>Fś”€¨¸Hhµ<Ő‘8G ! ňám·ŮŘçýü'%@“t?4±˝5jµ?üáQ\\ ˝^ŹŇŇR@[[ôz=>ţřcĄŐÝ›`穞CŰžäöý©®—ßľ~v|t˙Źăh#ŕ˛5ńÍŔ~9ým ä06,, aaa}Ţ7<< #Ęţâ™Ĺ|ÓÉ…ÜöN˙J;‡çůٞ—Ëď+Šh`łµ şř‹"bEţ}„˘ÎaŞ'Îk0s_Ďť|Y"""""ęťź?„€ ť6 ÓŃj|ďUçZ­Z­ĹĹĹ8~ü8Ş«ĄsHŰÚÚpüřq čt:,Z´ďÁpY®ÇćśĂصk×âäÉ“}Ţ_ĄRaٲe8~üř¨TĘÝ­Ä^.€UĄ¶‚Đç"c›Äúůq0iH0őqŮŮŮ(­ą—Mđ Źá€ç˘Q´}űvä—[p©˘™AŢ%4h”Ó®;U¸Ö=´V«Ĺżţëż"77z˝·oß - třđačőzlܸZ­–űŃęƬCiLL Ö®];ćCŮU­§¶đÍ&ŤŞd+Eµ‚XE/aě·Đwĺ+Ń`1őqŤÖ@5ü+C9DÄ9‚h”iµZÜh7ÁżŃĚÁ Żâ?)ťö@¶»é0„¬,33™™™0 Đëőhkk łŻľú*RSS±rĺĘłÜßzw·a¬l´CŮóE5wu_UH Ň“ŁąŚ3rđZeŻ€µÉÁęÖTQÄ|†±4ÄČQźü&%ą€î¦şaýYYYYX´h  Ě–””(Áě¦M› Vł74Ńݸ×0V6’ˇ¬ąŮ†u?ç ký˝żÓц!24yŻ­Eb,ű/¬˘? ”–×}«˘(Bčq{„(b%‡‘†ăo+őyĐŕÔÖF´Wʧčt:ěٳ˖-są­¤¤;wîÄáÇa2™ú},ŃÚëg‡!vXůFҸ7Ta¬Leĺ…ľZ[[‡ĺyżxčÂ]…±=™[lxńŤ ÜĆ‘`A€§%áDQtůApů?řŔĺ ŰYgů§čüşo*ů´‘ceñЗĆVÖZpŕT^8xŢí>ćfNç—á¨á:*k-ÜHÚfĽŕ’‡ßžÂXöŤĄˇ @#Šx .ŤĆívVČú¸śś”–UÂfnÇ„Ô,—?¤‰8GŤ¬˝{÷ÂÔŇ[K'‚ľ±žB^G‡ŘX @Ş@÷ŹJĎ+33™™™0 Đëőhmmőx í\ Őqí}é€čţŮ|cÉçŚV+ŽJŮ‚˛z¬Řő'[lHŚ Ç/žY¬ÜvÄp /úć›rÝÖUéxeóĂÜĆ)+€ÓJz\źŕ>8Zgi# ±6bÖ®ĹÄéÓÝng…ě8řEl¬¸nłb‡•BDś#FQII L5ĺč¶/2DämüÂÜu[jÇÜóËĘĘž={Ü‚źŢpé(=î¦Zľ±äsߍf+ĘJŮĘZ m;®®ˇA.·=»ďŚK űO`÷±Ď¸AŚÇż·E—06R+‚ ‚ UÁöřžXË{śMA4¬Sqh ü''*—»îTŤÉç——‡¶¶6ĺ˙=OOui_ĐiíË?0”%ź1VÂXŮP…˛»ŹIőڎř`÷j\ÜçXÄl˙©ĺň+›ĆÍßoĆSˤj´§®pŁG¬ţŰţesú .E±@ŞÓ}ťĂ×`+E‘}ciD1%""""˘<„9jŠÄ–ú1÷üZ[[ˇ×ë]®ëYëV-key¦y;“É4¦ÂXŮP„˛ç‹jHë’Ůq.·ťÎ—ďKKŠÂÖUé Âo·e!"4ćÎÖpă.Á˝*V%ŠxŔ˙+čą7$ ćŰCŮĺ˘Čľ±4ňSqh „`„ű©Âť6t7׍©çg0\Şc©BVţňôůµ0”%o×ĐĐŕ¶ýŹ*• *• ·oßľ«çXY×X2Ë5Ś-(«WnÓ-Lrą--)ŠĹ8Đŕ(€ŹŕZű(bł ¸TĹö´Řţ•Ć0–FY"""""0!ұRpwÓŘ d[[[a0ÜźŻ (_žţ/›ë`űň|ÉkiµZlܸőőő8~ü8¬Ö±ńŐjĹńăÇŃÜÜŚçźjµú®+1Vĺň˙óE7•Ë+3’¸Ś3—E87ĐQ‰"ľŕQU±= si”0%""""˘@„9-ě5ĆúČęt:¬\ą)))wőýbsÚŻľÇ7™ĽVffć e{†±ŤćžݞÖâňýĹ2RoŮôäh—۪앳ä{j!±=«b°YČ!"oř{ŠC@nĹÄHĺňíVŽŤŚ¶§7‚8 ś#8GŤŐß“”ËÝÍcg1¬deeA§ÓaÇŽ.·Î]‡ )ËôM‘NˇrO]·ŠĐU_Ę7šĽÖX e‡2Ś•Űś+rô57Ű” Y]F˛Ëý÷ź*p´9čŃsÖ[ßÓíŰ·ă{ŇÇůö}Ŕęś‚Ř˙` €`Nä%8Ô“ě8 ÄÔ*r@hDÔXś¶µĐhç"ÎDٍ­Ăq +Lp=Ľőw dĹćzŻx=ţ“űóžôMÇó·6˘»­bsÄNşîTť6c¬7.Ń`effŢ|óM?~|ÄůęĘXÝÂ$\)oŔK‡>…‰±á8j¸îr»ě±]'•…ĽžZ6Ý'ŢOµZ µZ=®ĂĆJ‰˘K HU±K¸ËÓdPŔR]ŤČĐP·y,yµŔąë0j2’T "ňÉ9Â/<]ŤŐ€ 0MÍ÷•†_uc·côPM*„E+alם*řOJ€_x ç®Ăš9j¤ÄMňŠ×)GŔ?8Â-¬m/=‹ÎŞ|·>łDcéwCűŰh…˛CĆŔ]:öź*€ąĹ†ÜçÚ+:!&ş…Ž Y9ŚŤ Ä®őóą±x9+€óúĹÂiNÖ"V "8D4FŐxŢy)))ngď°eą˙rWĹ*— ous@hD”68ŞßśO‰ďŹ˙¤DĹ%C«Őr9Gçźś#„ÇÁsu#«Ňid8WĄ Ý„9~Č}d…€`řOJ€&iÚ0ŁĄóëBt}]Ŕ0–Ćüď†ěo#Ýľ`8ÂX »W#!&ÜĺúĐ@äě\árÝ]v­źŹkźv[ŚĽK%¤ö—ś® đ-Ć’—c…,ą˙í´rîÍ&©oßÄ ü”†—s°ç“ÂáAÄ9By˝©č¬Čun4ěľ29Y˙(÷}ÎoRşnş}äôţ®;Uč(=Ł´+E‘ˇ,ů„ÁTĘŤFTWW÷úX>ř T*Ď!çp…±˛ôäh\?ô4ÎÖŕ|Q Ň’˘°xV"Ă\{Ë˙â™Ĺ|Ó˝śR{‚Âs0«bÉ—0őqŮŮŮ(­ą—Mđ ŹĐ÷Á"â!ÚOŹ,Ľ%b†ŚŇđąÝ&{ĘA^¤†Â9‚Č'çíŰ·#żÜ‚KÍţż° HŘ,°v_™ş1MÍ“śhřŢaí´˙'HĺŇ;\Ů.ť~gŚŐ>˛ÝßDk#ÚŻľ‡nłŃqĄ„.7ň e?ýôSÔÔÔôú8µµµX˝zµŰőĂĆ:[2;Î'ę"ĎJčE6§06€@*?$#Â@ÖÇi4XŐđŻ ܆«E‡=lů ¤ 4<đŁáó~q—ă/jšËéąÄ9‚Č—ć­V‹í&ř7šő}ţNŐ”tcë"îs4|>)wěsţŃÓ<ŢÇ/,B@ÄNDk#D«Ĺcp;–÷7±ĂŠŽŇ3Ęľĺňú"¦ űvą×ľ‡/żţ9ţm˙çýŢď§[çáĺç捩ç~ňă ¬^>•;â0h(ë©×!üň—żDkk«Űő#Ć’ď˛8 )uî›"ŠĐ x„Hľ†,y>đ»ďAt–]€ŘiĂí6é`x…–§HŇĐ»Ý\ŞvśŠ yÂ9‚sD’VBŁŻL"«diŘ|fěVÚA° ÷;‡Föçş›káě˝Ĺ+:«?G§ńŻ@§ç*X!4đâ@V–>]ŤHU`Ż·OŤ SĎ7iŮ1$Ć…1FC˝ĐĂX —śëQ«E<*¬Š%źĹ@–<˙Ś€”,t\{đIy7Ňî÷CśŠ“! ť¶‡>ëtlwńđ·ŻrLś#8G8ísÁ*řß7K eą /,Řż™†|ź{÷oNŐ±ńóú¬zőꔀN{ Űu»ŇcŻŮ±¦óëBt–çB´6özż¨iü}ă0鵝‹°4cŠ×<ߊš&$ޱŘ U(Ë0–îU#=€*ŔĄ*ö!QÄVĹ’Źc K˝o÷ĎB§ńÄćzX;7.uâ…%<řŁ!óîßş\úB=¸’Â9‚sD/&¤.CwC©R™ţzn'ţöî=ş©3˝˙w[˛äűMľ–| –Lbc‡ <@(ÎJšgBڡΏ$§Iş’&]…öś¶«ĐžLť&Y“dć ©Ç$ ÎJ&™!„±!L¨o,_ĚŶ@ř.ŰŇţý!k[˛dc[ľHň÷ł‹­ŰÖÖ«ýľÖ~öłź÷Ő‡‚ąŁĐ´čń“Š!©v¬ WBž2~FzPT’´lëň퉽FOŘĺL‡3‚‡ň…9°uÝäN1U—:p§k€#W9îóżřę :RĽ%ń“z/Çk'ú^4>o˛ ĆÎ®ŠŠ TVV˘ŔŇáľäę^s@µ(bĐ)&Šř® I0Ö_•ůxjţ&€ŮŞj=üŹfáxšM@ăQÜűg¸Pârđ÷7ů ¸÷Ţ«ÂyăČ ÎÁK÷ąÚsÄ1‚8FřA‚ණřź_°Otö‹?á/seěs䕾AżřŁë Ų-w­×ě<Ážc˘G_ÓŰy– żvť° ®AXÁůrŘ(Č2ç]@öÔąkř“ď„—żźŤ7ö廓É˝^Ŕ7’-°—¸ {–ëÔܸţ~éün/‹`9€ÔüľűüNqG¦ěö–R‘$Đ ·1Í ŁńwDçl”n_ëţ©|­ť"‡¦|Đw°Ň5Đ"S/|aö”ÖgąP‚3ÇŢFQQ—cqŚř1B›‚ŕ%ŹK·koŘłoőqߡ©ií´ďCŽş±€ýHPDâÝŹŁĺ!€räD‰µí2,JPúË(--ťÓĎŐŰŰ‹ââbüîW?v ĆŽÄěGp€_”] ëV,Bjr~SŢâöXń‡ö Đ+ŰsÍĆ.üÉ÷?ÂíN ţßż>„Űç·ă÷żÚ€Ü,ţęďľŔńĎ›ÝÖńĂ•ČÍŠĂďµ˙sü)|Ż §Î]Ç+*yKTřýŻěWAäfŮ—wlÔ6ľř;Üî´ŕءG Ö?‡¦ň-ř^A*Ţ8\‹âëçmßµu·MKËĎĎÇöíŰŃŢŢŽŁGŹb```Üç 0;Źőř€÷`ź|ëÎmÇťá÷ox{ú¨Ťő‰"jç( –á÷ţ€wDqÎľç@Ç YšđÁźŁVd˙đăÓCxL'ĂÚ4ÖŻŁ‰;o´á“z«Kŕ@¶  íĂS˙1j6ŔdLl^ŽÄ1bžŚň…ٰuÝ€Őx€ýDČk_ âOî‘áŃLžk§‰éqşYÄďżµJe X[0© A‘‰°Y:‡ű›6ł3<·űâŃŁGQYYérꧬXQ]2dSkâ@GÖéXÄúç¤ĺuř§_ăÔąk.ug×#7K…4u$ŕ~ sç~˙« ŇóÖ­X„S+!mýá‡*Ý&ĺzčÁ…8ţÓGĄŰĹ˙¶±ËKYŻ1QJi]1Q —÷oníÂöŤZiťięH˙Ű:Ľr r^—-ű§­ż9gĘ@LLĚĎmooGhh(±óÔQD›‡I¶,X0µżE}} ťÔknܸáúű@‹(bgLţuŔű*—űăââ P(feş»»ŃÝÝ-ÝnĽ#ŠŘ*HbŢßôlščÁ_Pd˘ti2`źUý÷ß+4AxPĂÉ|Čł[}@í ľh´şepk W?ŔFâA#8FL’Bű0†"¸śů¤ŢŠsW­x(C†Ĺ*}ަ—˘Ç?o†ąs˙ńw#űĹ_]Gt¤Âădaë\„ĂÇôh6vI\ÇýÎb˘”ČÍRMh»R“#př‚<ůpľW†(%Š˙m;ô4rʆ……ŤůĽą Ć>÷F9R“"±u}R“Xćl.ś\‚±©©©řîwż‹ŮźŻ»»řĂĐŇbĎęoś°ÖŹŰ÷&†±Ăâââđťď|iiiłľ-‹ řă˙XeôžM!4˘…ZíqĚd@6Ŕ•––˘ˇ±ó‚µşül,A‰P>řW°\üHŞÖ?dź]ý‹&Bä€:Z@h0 Žf†Î|ÖŃcĂí>ŕVŻčů2Zeş‚y{Y ÇŽ#ćďQTTSĎ ,=CP.ŰâŐş'B,ŐĂŠ·ú€c­öňˇ@\űĆ;6ô ŢĄĎĺ>5ĄżBx‚´ěi¬ąd2™‡ž;·ěŰ*nxŤîSfÇF­ÇŔ©'ięH|Ż ż)o–îs”pÎxudµ ş·Ć\Ws«k@Ö“¨‰e{?ôž|ń3¨—Ę'<ůpž|8 ۇËĐôČĎχJĄB|Ľç × 6'™±-mť8rň2ö—śÇÚśdl]ź…­YüŇf«ýá,\·n´ÚąëxôŃGˇ×ëqęÔ)`xűRáź5eű|ŕt;55Ź>účśmŹR©Dvv6ŇŇŇđÁ```m‚€ĎDŹ0(;aI¶ 7mBh–űxĹ€l€3 04k˙‘çMvj IDAT<č}e!$ !ü%†®×a°ńŚtčĽ8ęŹŐް˛ńÉ}˙‘+!S?yĘwî:QqŚ ŽČ1!Çt ŠHDčw˙†®×a¨ˇ\ĘPěÁŮ[}ěs4ŢW‚3V{”ĹŽL/#ôřdE‚LŘĄ ¬[`V®ä‰bŘťż)oÁńĎ›±îÁ…řMy ¶oÔş•ČÍRáŤq&ÔĘ[˘š¶mĘ[Źć“‰Sç®ářçÍ8őŐu˙ĽÇ?oĆ˙|Óá6 yG§ÓMé±Ůtş¶§k[ńę;gP¸2/.EnFżĽT/ŠŇeô©©©sŚu¦ŐjŃÔÔ$eĘÖ‹"Rý0`č<9šBˇŔşuë|b»"""°nÝ:|öŮgöß“aw6 ČŇÔvś…Ů/̆µŁÖ¶Xo_u Ľą âC– …,a1±#8FĚPź“%,†µý l·ŻÂÖŃŕś%räJĹg"(6eÚúś­–®ŚđÉĎ,­RđŐăd^ ™‚9ţěxJ‡WT˘řĂz;ícČčz°©É¨ľlňyŰl삹Ë2­µ]«.u &Ň^cÖńžÍĆ.ä=ůßxóWu ČÎźŘwË/ˇěl>:×0÷XđnůeĽ[~ąńx±0V¤#&BÉ›f7ť–srr|jŰrrr¤€ěM?m_çéłłłˇTúÎ>ś––†twwĂ"¸ š]bz~żł ȲřL)›ŔÖÝfϰłZ`ëĽÉĆ™Ď{ˇ1B쵝ś3gcÇâ1í*±g:g;ZoĎÍË>7ďE%2ĺŚőą ČDX}8 ë¸Ëž) Řěł9—--ČćŽ2ěɇÓpř=»859Â- ë¨űĆáZĽ˛}$0cî´ŕOľ˙nwZĐ|rË´e›Ť]¸˙ɱ}ŁÖĄflš:éęHT]â´®óɶ‚%ŘV°ćn ŽśĽŚ#ĺ—QÓÔ¨něŔso–?/ ϬĎÂÚśd6Ú418Ť—‹-ň©msŢź^NÇi9*Ę÷j$«T*i˘Ż›`@vş0 KÓ÷cÜ©î/ů""ŽDsË9đĆ>G3:ľG.€Ł(†#řé+ś·'(2ŠśŤ¨=îZď6$*`Oýđ_+ďZ«ő÷ż*tąýĘöl>¦ÇoĘ[đ^rźXńŤ˝«püófüđ@%ît `Ý aîŔ?ţäk4·vá˙ýëCSĆ>ôŕB|ńŐuüđ@%ľWŠu+áˇâđ1=ŇŐ‘X÷ŕBŔńĎ[PuÉ„—żĎ@ú|ˇÄKOäâĄ'r±żä+ě/9ďň¸#k651[ ˛8ů<łSIą$ínT*•”…|«wOd‰hĘd±)ô±męííu Ć r%÷ţ‚Bcňŕ \úÖëu€`Íwö»™JiŢ’xäf©P}Ů„&ÍŠ‰R˘ęřźcÇßťÂ?ţäkéţčHţĎK`ÇSSŻ3şcŁU—Lxăp-ţçRN­X„ă‡Á+*=ľ×?ţÍě€X“˝Żí\ŤC'jPv®wzĐŇÖ…ý%籿ä< W¦ŁpE'#"źÁ€,M™A®„8dń™ěآ˘"—mQ,ŰârĄ†bÉă)a5~ YBŕe˙ăßx¬¬úÍźŹűxš:§Ţ-„ąÓ‚ŞK&¤%G"MéöĽu+A¬Îă:N˝[čvߎ§tnÝ(%Š˙mŠ˙mŞ.uŔÜ9ŕ±~-QnFŢzĄ@[˝Ů˛łM(;Ű„Wß9­Yxf˝ŽŃśb@–ĽžřHŮââbŤ®ŰâŚuPh `[ !„Őđ¦*&J9«ÁŃĽ%ńltšG˝Ů–›ť(;ׄ˛łŤř˛îĚ=:ׄý%çaî±řÝçP©TP©TáWJä7ú´č4ŤFăň8˛Dä×÷?ŤĺiX‘ÎBýDÄ1‚h®ŦÍ€¸xlŢĽyÖ·ˇ˘˘'Ožä—AóŁĎE&Bq˙ÓاBfr,Äs·ťkBŮąF”ťmry,:\É"˘uŔ{đţűČĚĚÄž={\g@–üš,6ńÉ1ĐéTl "âA4G‚"GJÜąmrË™iőőő8|ř°t;77ŐŐŐüb(` ňČbS I_MRÄÉéÚVĽwň2ĘÎ6ąeĂn]ź…­YX›“̆"˘9Ĺ€,yE‡@H€ŘÝŔ ŐétłňŢ?űŮϤŰjµ;věŔřC~1DóDgďö—|…#ĺ—ŃŇÖĺňXJb$¶dáĹÂ\ÄD03–|˛nóćÍhh˝ŤcU&—Ě…é&őCěn‡­« °ZŘđŠŃ@‰†ÂK…9FpŚ Ž°k×.śkęÄůćî}±żb˙ö9!S"(2BDůĚT! ŠH‚u8 k4g% ŰŰŰ‹˘˘"ôőőâââ°{÷n„……ÍZ#˘ąsu8řZÝŘęĆ—Ç6¬HǶ‚,®Ě`C‘Ďa@6Ŕi4ô+Tµ„O˙ÁŢP?¬×/ÂÚ®‡Íl`cÓ„ČbS ×,gp–cŃĽ#t:ľ0AvÇ<ý}®żÖuj«—˛‰Ćęsň…K!KČśÖ>•ëŤ:ö Ů‚‚‚ýٱˇˇˇxá…6ăýŤćNuc;~ZVă±$At¸/=‘‹­ëłšÄă"ň] ČŇäř†úa5^ŔŕŐóŔłnhűLw;†şŰ1dř˛ŮÎXÍŔ,Ç"ŽÓÝçϰ1hÂ}n°ˇMgś˛2ő˛iÉš Š©k4güs>|Řĺ}ţúŻ˙zÖk×Ńě0w[đŢÉË8rň˛[&,¬É^„­YŘV°„ŤED~Yš[wjŽAěżăöآ(‹ă€Đ` E€+&ľ˝ĺzźőF¬ Î,€|a6‰cqŚŕá…ˇëul(÷xňăž8 9J`ź#@ß ÖNŃµĎ Y0ŘxWĎCqďăĹgzőA#ĄoL&L&TŞ™™Pݏ¸UUUŇííŰ·ĎZÍZ"š}›|Ś/뮹Ü®ŔÖőYxé‰\fĂ‘ßa@–&wĐwéc—űbCÇ´2ä,ŕy"ÔŢqŞqhä pČ‚ÁKĂvű*÷>ÎfâA#8FLĄĎ5W¸eĹެËc±Š'?ČłľAµ7Dś3X]úÜ@Í1ű‰Í^­_VCĽcĎZ5 3-//GeeĄt{Æ ČĎĎç—K4O,MŹÇ‹O,Eኌy3IWEE*++Ń`éđ?"ňo ČŇ„ 6žÁPs…t;D{:ŻZ­ĆîÝ»ůŇĽ%öĂf6Ŕ`B‚yśČß0 Kc˙‘ę‡ĺB‰t{Q”#Đ ňŢCéA0Ţ]ţ‚bS ‹ĺE8#8Fxěsýť.'@ŚĄéôLž ·zG&Úl(‡,6BČä'ĘE‚`˙[`0 ŃhĽÚ¶ŢŢ^KÁŘĐĐPĽđ  ăG4O<ş÷ۤ^ŢX“˝źŘȆ%˘@#ŠP¨Ő 1 ŕJKKŃĐŘ‹yÁÚ—ŮoďfčęĄY›CäŔ߬b …föŕo¨é ČrŚ č1˘¨¨¦žAXz† \¶eRŻuľŚ|Q”€§îcFMŻťËĺřIĄŐ^ÇyČ‚ÁĆ3^O¬çm@¶··EEE0íuiCCC±{÷î MćM#"""ňF€m‚€ÄM›š•ĺö8˛Î`0ŔĐü-űe-%őcČřµtű©űhˇ™ńĚýÁřçňAöK‹­ Ĺg˛a8Fäác˛¬·ŻşÔŤ}ęľ ö9švˇÁö@˙ÁĘ‘r!bĆęIgÉ:˛cű|~~ţ”·éčŃŁR06mÚ4áďTűůžĄéńă>ŢŇÖ…«m]ěŮŻc©ięŔťž6(Í9dÉóߍ‹Rć[l(89͸PŕŃĚ |Ú`ż,y°ń ˛#8FŚîs×G‚±ËŐAX¬bźŁ™±X„{â¬#Ą ĽĚ’5 S~mii)*++ĄŰŰ·o÷*¸KDţëÇĎ®÷ńý%_aÉy·Át—> "š*ţš'ʆ®ŐHËŹiYźŽfÖşŚ‘ˇHěn‡8ÔĎFáAÄ1‰µŁÁc{Í„Çt#ăşÍlđj]FŁ˝˝˝“~]EENž<)Ý^µj±DDD0ř‹žÜCý»ŰĄŰ9 xI$ͬĐ`÷čܶ¶_aŁpŚ âářĽ·Żşd¤'G±ĎŃĚZ¬ BČđutb˙ŘşŰ&˝ĄR)-O6K¶ŞŞ ‡–nçććbÇŽüb(`0 Knś3!î‰kÔѬXşĐ)çöU6Ç"Ž>oNűÍŽś#‡ Ö¶É×b ‘–'SËŐ`0 ¸¸Xş­V«Ś%""˘€Ă€,ąřuŢ”–YŁŽfK˛Ó|!¶>3„cÇaÎ%Táěs4;âB˝{˝s@v˘˛˝˝˝(**B__ź}ââ°{÷n„……ń !""˘€ÂI˝|­kärńä(¶ÍutűdzŢ–,H†ěč`lhh(^xác‰hF¤&ú×Ôüü|hµZÜ|ýuÄđë# Lł 7b§´¬ 㥑4;śk"Š=ílŽD#ćPh°8Ňç'?‘žR©Dh¨=Í¶ŻŻ&“iÜç˙ěg?Ńh”nďŢ˝Ť†_MČšědiŮÜmąëóS“"ýęó©T*čt:¤ć×Mäú\pĹhôxµ3dÉŤóĄ qLJ Y;đs ě Y&ü:ĹýOcyZV¤3mŚcqŚŕA4—‚"ˇ¸˙ilĚS!39ĄĄĄhhh`/[ R©<ľ®¸¸Ř%‹vűöísŚ#5zűE<IľÝßć»čp…´\v®Ű –¸=ÇÜmÁ—u׸óѬ¸ ŕ=x˙}dffbĎž=®ă8›ü™,6ńÉĐétl "âA4‡yd±)Ф/†FŁqéwőőő_S^^ŽĘĘJéö† źź?÷ź%,ÝÎĚĚÄŽ;|¦ż !Q" v·ŁĎ" üPĎSuĹi‹VCa9ťéôÚÎŐ¨ię@MSĚ=ě/9ďńy?ŢąšŤEDsŽ% ćÁaMúbČbS ČyZź8FÍ%ťN‡řä ČbSŘđÂÂÂ'Ýv”-0 (**’îW«Őxá…|®żÉ5ËĄĺň "KW*.U:í_‹–˛Q¦YL„żÝ˙$¶®ĎňřxJb$~»˙I®Ě`cŃśc†,ÍťN'Ő5ŤĐh4(..F__ 44/ĽđÂÂ|o–HůÂl ÎKY˛EGEěŢ$@Ă Jh’ mŔŃߏL'D«!_͆™1JĽőJömYŽÓu­¸ÚÖ…čp%Öd/BnF|çw›€f‚FŁ‘˛MMM¨¨¨€Ńh`ĆîŢ˝*•Ęg·_ąl úĎ–N)(ű×ß ăJ4AőŕgżŃgĆĘ•PŢ· 3ĂR“˘°-‰%!Čw1 KDDDDD3B­VKË555°XFf>ß±c‡ĎĎ/ČC Ě} J YĐgđúQ{=Ů «řýŇŘzűíY±•ß2cĺJ(–maíXš´ŠŠ TVV˘ŔŇáDäßXC–f„N§“–ť±Ű·oG^^ž0E$Bůŕ_AąÜą¬Řű¶=ű‘h´Ş+ŔŢwÁX;!"Še[Áš4y&“ z˝WŮD˛Dä×,Jp¦^ŽKźc÷îÝl"âA4GlÝmÔ—Ł´QĚŚTlŢĽ¶¶‘±V­Z…üü|żúlBH”˶`ŕ›Źaë¸0uŻ´j`C>XĆ€Pu(˙ĐGV,Čd#X»~Z'P«żŞG÷Ă—uצm}k˛áÓąÓŃśa@–üűŕĎl€É ŘDÄ1‚hN‰ý°™ 0`ű…xUUU¸yó&ÁśR«Őرc‡_~>Aĺҧ`íhŔ@}9`é`ľ˝~P'xř«îăľ0źôö•ßر¦ÎQ*Ł Đ@ź9+ýŤü˛DDDDD4í Š‹‹Ą`,( ż˙\˛řL„Äh0tőʰż†8d/Ĺ`lPü[{9ĽĹŔŞű ŻNÜý» ¨ĽT\™°ËA+!S?yĘw¦5+v>[šďÓë#"-€FˇP«=ÖĚg@6Ŕ•––˘ˇ±ó‚µ¬YDD#ćPQQL=°ô Aąl „ÖĐĐŠŠŠĐ××çr˙őëעż ňg¬†<ĺ;nYS'P~ÁţOť "?[@Ţb@Ĺyśüž©Ó^– ˘N„±Ý„uŞË@ěŚůńłkŘDäW’l$nڄЬ,·Ç p†ćoŘ/k!"âA4wôz=ć…ÖÖV BCCałŮ`±XĐ××Áŕ1SÄű›s`Öză"[ÎKĄ {ÖěŃßGoÎę43gýí·Ňp&lŐç’®±PF!8u9d îc –&„Y"""""šVŽ`,ěŢ˝eee¨®®€Y ČÎ&AąúČŐŔzű*¬×ë`ëh˛f{pÖŘnĎś UÚłZŤ}R0h}‡ŢhÂę @˝Á˝ÁČw®DP|&d ł!‹MaĂѤ0 KDDDDDÓBE—š±Ű·o‡FŁFŁq Č2Yl d±)‡úamżk»˘Ůŕśíł¨şbĎştĐŞ]  Őęx ډ–3®·0v _ŻÚ±®ÜkĂ 1Č´/ĚfŃ”1 KDDDDD^ł^Żu ĆnÚ´ ůůů­V+Ý?_JwňČfK;kG¬m °ŢľęRÖ@jŁk@ĐQâ@«±gв­÷Lťöૡ͞ý:RvĘ({=1˛řL6"M d‰Č+C×ë`˝qQş…‚‚é¶s‰ŁŃ8/ŰH?Đłu·ÁÖŐŰí«chťK8¨D„)čRěZU4łiGsd˝šîذőW^‹§ŕëÁŘálPl ‚"9á)Íd‰hĘlÝmĽô±t[E$%%ą<',, jµZ ĆÖ××C§ÓÍŰ6 Šô gĎŠýť°uß„őV lfÄîvŹŻsG_ZŞˇI ŠcĎŞ†¶Ykę´˙ëíŚí@‡Y„©S€ˇ}¬šŻcg  ŠŃ@—Š  'ĺ""˘YÁ€,M‰­» – %.÷9—-p¦Ńh¤€¬^Żź×ŮŃ„(ČB˘\.‰·Ţľ*eĐŠýťłhú,ÂpvěŔŁspÖ¸•îŹvÎlrX‘ĚV`$Đ:ú9ZpÔ˙cPFŮŰz8–“q‘żČĎχV«ĹÍ×_G ›( 0 KDDDDD“&őc ćŕ¬J XÇ|ľV«Eee%€ŔźŘk:8& vşĎzű*`µŔÖy¶î6ýďL¬„k@SđjŰĄîĆs©€ÉÚk…h5„ŕ{&rT S2řJ~MĄRAĄRůŰDţŁ@€NŁ1áá.ĺ›d‰ČĎ)îËÓ"°"ť3]ǢŮ"őĂrˇb˙öŮç—>"6ć©™ëöÖ‘őž#¨8zr)±ż¶>3Äžv}öŔíđýăeÖNŐĬÂĚ4Äp¦«ŁME(„đE&Λ’A‘‰PÜ˙ôýŤćÖMďŔűď#33{öěqyśY"ňű“řäčt*6qŚ š%ú“.uNď}˛{ŔU“ľš¤0·×8dM&L&T*öÍéŕ(y€á€m°‡ç8‚¶ŽeŃiŮq?¬–1ë×ÎčöG$2% (4F ¶ NËÎ÷ ČC ‹Młż‘oc@–&lŕ›Źa˝Q'Ý^ň¸[ĆćX233ŃĐĐŔ^¶€ŮŮ#m§Ł\Â]ßs¸ląb@6ŔmŢĽ ­·q¬Ę„ Hţ""ŽDsi×®]8×Ô‰óÍÝ÷ŮŞŰŃŮ;€”„H¤&1‹-P ]Żs ĆĘÔË _=á×ët:—€l^^ű›b•Č; Č8ŤF~… ˛–đyßď–_ÂóožDL¸߼˝ 1JźŢŢ–›ť¨ię@áĘ —űŹ”_ơ˛jT7vR#±µ ű¶<Čž8FpŚŕáăt:ľ0AvÇźÇÜmÁ_źÇ‘ňË0÷X¤űc•xń‰Ą^íw§k[ń§űŽcß–ĺŇzÝ{ _Ö]Cď‰güs}Y×ęÖż¦ËéÚV*«FŮŮ&©ź®ĚŔާ—űôŘ3t˝—>–nËdCˇ}xŇwęëëQXXČţFDDDóN›€ć‹ź–Ő :\sŹeç}z[«۱äŮwQÓÔár˙ÁŐxîÍr"°oËrĽ¶s5R#±żä<6ř_2ÇŽ4«űáŞWJqđD5rŇUŘ·e9~ţňzě۲Qá ě/9Źçß,÷ËĎöÜ›ĺ8x˘zFÖí4ź®m•úéšěd ŐŤ¨nŮęĆv|t®_ÖµÂÜm‘._4w»¤9;R~_ÖµŽą>Ç{¶ÜěÄGçńŃąF´Üěty^ËÍNÔg˝]më’Ţót­}˝Ű ˛ÜÖí¸ďËşkü˛‰8FpŚ ÷nů%T7v`ëú¬1O^ĽőrH—ĺŹöe]«Ô‡FďçSQÝŘ.ő±»­Ďą˙Ž~î—u­čě@gď€ÇÇG÷gO}ľĺf§Ô×ďeî¶ ĺf'ZÚşP¸"Ă­4Á†éŇk}Ť­§}$+WBqďźM)ëŕ€ŐëőěPDDwQQQ˘˘"PĂć ĚĄyὓőHIŚ”2Ă•ŐŕЉĽőJÇŔÉÓ˙ú‰ÜěµđönYŽWß9ßîks’ĄÇö—|…C'j\jçĄ&Fâ×{sÉD{őť3€­YxţÍ“.ďůŇąxmçjŔ‘“—±żäüđďeĽ[~YzĎŢ/z>đkëâ—LÄ1‚cÍGőĹ'–ŽůśÔ¤(\z{›Ű_ŐŤířŰ_üÁĄŤŢĎ'c2ëóÔG?÷Ń˝#eÝëZöěl#žó¤[˝Ü˙»ó».iG?ýůËëĄţĽ4=gßÜ [ ˛°4=~äŔĎĂÚˇáěľÂáěšůÄrˇgęĺ¸ôY0vďŢÍžvŚŘ°"ks’9FpŚđ ťk’úËxFc[nvâ±}ż?y= Wd Ą­SÚĎEQÄŹź]3áí0w[đôO`î±ŕµť«±u}îôXĆ\ßcŐŤxmçj®H—úŇÁŐHIŚÄKOäâ·űź”úók;W#51R+6řKÓăńó-ëQ¸2egńŁwÎHŽžěoßů^,\ŠÔ¤(D‡+Ćý,~m?Ѳ&;ŮgľgQ!ÂČCfÁ¸ÁX[wőĺ(mT 3#›7oöř<µZ--3 K45íoDDä›X˛€Ţ»ĺ—ÝŽ Ę‘“—ÝËÎ6aMö"üřŮ5HMŠBjRŢzĄk˛ą<•śGJb$J÷>.”®Ě@éŢÇaMâ8`ÜV°©IQ(\™}ĂA ÇÁmjR”\IMŠÄÚśäqłežł-m]Ň߼ű1j6ŔÔÚÄK)ŕÇÇĺĎ#8FřŠ»=9x˘Úe?ʉP"7#G÷=ŽĄéń8TV3©KöŹśĽŚ–¶.ěŰň ^z"1J—>y¨¬FĘ/;Ű(•Yxé‰\¤&E!&B‰·^)@t¸BĘš]›“Śčp˘ĂX›“,ő›Wß9čpJ÷>&^ WfŕÓýOJŹŹöĚz~üěĽôDî¸u©ž¨ĆéÚVéÄ‹ŻpĆk _=îóĹÁ~ŘĚšż7ĐŞÓé¤eŁŃŢŢ^v(˘Išh#""ßÄ€,4{Ć&lX‘îŘşŢ^OńШ`HŮpPÔÓdŁł^N×¶ÂÜcAáŠt·`ă`îĚE÷zŤkGeľD‡OýŇÄçß,ǻ嗱u}Ö¤2ŠČwÇçlWŽäËĆš\n<5ĂőŹ='·×:vôł‰(;ŰčŇg=őIÇÉ Ç{oőPgůzÉł8şďńqÇŠ–¶.¬ÉNv;±‘š…5Ů‹ĐŇÖĺL=.x˛żä+ĽúÎ,MŹ—ęîú QĄeY|ć´®›Y˛DDDČBhD÷$'{śŔ”% \ii)[`1 X[ŕuÍ/ă8¨«męŔźî;îňXL¸-m]8]Ű*eŁ8fUö$q<÷PY •y.­^ÝŘávßtd¨9×ŔŰş>ËcťK"Žţ9FÜípŽţ­¨¨¦žAXz† \¶Ĺo?Gt¸bJŮ/ë®!e¸ŔX}čNŹeÂëëěµoâż|gĚçÔ4u pe†”;şŻN„ŁsnFüŰţeÝ5´´uMęďĽă¤ÉšěE(Ýű¸ĎŐŹufíh€\ýŔ´­O«ŐÂh4°Oěĺś5ËţFDDDţ. Ŕ6A@â¦MÍrO`@6Ŕ öËXűe-ó͡ŐW %1Ň%ËR#an˛ŕHůĺ ]8úŃqÔpś--7;ńôż~‚ęĆ—‰F(°őööâäÉ“Xż~=ÂÂÂ8FpŚŕá§Ątšěd|t®Éĺ„…'Ď˝QŽ;˝Ľö˙­–ę¨vN!;–¨0{لߗ đ$uTřNŹeÚźŽŕôDË8řËIç’C×j¦5 ëś)R__ŹÂÂBö7"""š7Ą€uş¶-m]ăč,Üň6ŽśĽŚ}[–»Ôeliëě¶ľ±x:ý˛®U:Pś.ćn ţtßq´´uáç/Ż·–ů—ÉdÂ矎ݻw{Ľä8FpŚ ŮR¸2ťk¡˛ę1˛-7;qääe¤$FJYŁŽLRs·{PÔQZ`¬ Úń¤:˝‡óű_mď’NެÍI3‹őGo‰ÎŢü|Ś’Žľş¶ű<$ZÖ6ŰłÝ'’ánL.橊/»Ű!öwB™žZÔŁëČÍ'¬!Këc˘ž•cĎ*î¨9縧peR#q äĽ4 ă î˝“ő.Ż]›“,ÍÄ>şfÜéÚV<ş÷8ť¨™ÖĎôŘßŰ-Ą{c ež1™L€ľľ>±ŢÇŽ4§¶,ÁŇôx”ťmr«µ Śd€&¦sô!8TVíö|©O®Čđv8˛Ď÷—św{ěů˙<‰G÷—˛×× ×pwĽŹó{żw˛Ţ­„ŁÄD(±&{ľ¬»†ęĆv·ţ\ÝŘ +Ň'´ÍĎ˙g9Ş;đó—×űE0ÖůękGĂ´­WĄR!44TúŰćř;GDDD40C–’ąŰ‚ŹÎ5!%1rÜ 5^z"‡Ęjp¤ü˛tYďŹw®ĆćźŕŢgßĹÖ‚,Üé± ělDnŻw˝ĹLYŽDD3/8‚\ qȱ˙lÝmŠHôzµaaa‹‹Ă­[·ŘëČęt:¶7Ń(*• *• !l "żŃ  @§Ńđp·cud‰F)Ýű8•UăÝň˸ÚÖ`¤ŢăTjŰŃĚRÜ˙4–§E`EúÔëŮ™L&=zÔŻ>7˛#höĆ"‚â3a˝QŔ>ą—§˛A‘‰PÜ˙46ć©™;ˇőęt:TVVŚF#˛Dí“SčoDD4{nxŢ™™™ŘłgŹËă ČŤˇôßoó,6ńÉ1ĐéTS^GGG‡ß|ŢHőČĺµ ĘrŚ Ů#%Ždm߲‚<˛ŘhŇB“6ˇőj4) [__Ź‚‚66ŃLĄż‘ď`@–ČIH¬ —GúěöE,P¸ÜfP–f,>sFĘ8Oěe4ŮĐDDD4/0 ŕ6oŢŚ†ÖŰ8VeBPd"„č.‚‚· §Żó&(Ë1‚hvíÚµ çš:qľą›ŤAţ÷7re &Ëůď”ÉdBoo/ÂÂÂŘß(°W± ›FŁ&}1d±)ä,N¨AYY°RPÖ`0pŚ ň!:ťńÉŦ°1ČďČ3Ąe[Ç·Ó¶ŢĚĚ‘őęőzö7""" x Č©e‰&ÂQ¶€T¶`:8gÉňoÍ Č±‚˛˝˝˝l""ňţŕ!~$›učZÍ´¬S§ÓIËőőőld""" üßTl"˘Ŕ'‡jÉHý˝ľľ>”——łaČk3Q¶Ŕ9C¶ˇˇŤLD4JEEŠŠŠp@ ›( 0 KD`nۇ¶šé¶Z­FAA†Ľ&‹Ď”Qěe ¬ŢPU*âââ¤Ű,[@DäĘd2AŻ×ă*3›( 0 KD~ÍrˇgŽ˝Ť˘˘"6ěÁXă:ĄŰjµ»wďž¶«‰8F‘,a±´lm ČÚşŰ`ąP‚Ň_Diié¤ÖÉ:˛D“ăM#"˘ą'g‘_˙5`2&6±D#fçbŃRXŤŔ%CVě‡Íl€Á „O.ďCŁŃ şş ×둟źĎ†&‡7ýŤćGn"˘Ŕ`,ÍÚDD˘T¶C–i)[ ŐjĄefČ‘ż  EÜ“śěr%3d\ii)[`1 X[`˙MDÓ¦ďÖl¶Y{żđ$…Ű}Ţc9FÍ®˘˘"zaé‚rŮ6ů-YÂâ‘,ٶ{mY/čt:iŮh4˛ż‘_K°M¸iBł˛Üg@6Ŕ ší3ŕŠýl˘ití«N.÷Íę{†ÄĘY/Ýö63–cŃěŇëől c•-đ†Z­–‚±őőő.AZö7""" $,Y@D4E}·†fý=űo[Ąe–) "˘9;˛Ηó1JDDDڞDDÓ@­V#44tĆÖßĐŕz Ë`,Í5·˛ ł˝ZźV«Eee%Ö‘%""˘ŔĆ€,Ń4Ř´i“×—VŽçůçź—–Ś%""ź8U¶ŔŰ€,3d‰hľ`É""?Ă`,ůÄĨ˛¶;­^­OŁŃHW›ôőőÁd2±‘‰(0G± ü±DD4×äNY±VSă´üms`Ů""" T Čů!c‰ČČ-•–E/3d¸”˙©ŻŻgČĎĎÇ®]»đ €\6Q`ü†bůc‰ČW!Q" v·OËúśëČ2C–ČNĄRAĄR!„MAä7ú´č4îň`@–üśâţ§±<-+ŇŁúsŞŐjŤFc‰8Fůů˘ĄÔ—„čdlzęId&ÇNi]Z­VZnhh`ăŤ!(2ŠűźĆĆ<Ő”űÍś›Ţ€÷ßGff&öěŮăúű‰MDŁÉbS0tÇh˙!Ü!b±ŠmB3ݵS”–…„IíŻńÉ1Đé{GÝ˝{7 4Íśc9FÇ"régń™R@VĽÓŠ„…ÉĐh¦Öç‡[·n€ô·Ź\ ňČbS I_MOÔůÖ%"źĐ7(:Ů)Ů Pu:3c‰cÄ<#„ŕ§v¸3Ьhít:`L.[ŕpĺR­Wďí\GVŻ×óË ""˘€Ă Ů·yóf4´ŢƱ*Ó]LK?ŞˇŇň­>¶!Í8F͇1b×®]8×Ô‰óÍÝ~MPD"lWĆ;6ä,q‡ ×;ŕ”•.ż{ŐBç˛ —j?]7ĺ÷Öh4¨¬¬ŕ]Ů©ô7"""˘ŮŔ€l€Óh4čW¨ k źř_ĚČeaßšlxŕG3Żć†MZ–Ŧ°A8FäˇÓéđí€ ˛;ć żFŤ‘–ݰĎŃ,©uęsAQIw}ľsŮ‚o/ס··wĘWu¨ŐjiŮ› Ů©ô7"""˘ŮŔ’äľSD$BŰ/˝ŐçZ·Źh6üd‰Z6Ç"ŽŽĎë€ţöÖ¨ň D3 oPĵ.§q?ćî5\G—-¨ŞŞšňű;—,0™Lčííĺ—BDDDu\Í& Ź;F|¦´|ŞŃơuĹdC˙Đđ e‚"Ů(#8F 誽Á€,Í,çq]VO¨d`/[ŕŕM@23GţÎxS¶€Č'Ź©Ůä‰la¶´|ŢhcťHšQ˙UeŮ÷łA8FqŚĹ9ĐőáE+łdiĆô Šř˘Éćqß»ëß§“uŐŐŐ^e¶j4#YąśŘ‹(09źx±—äń-Γ:ŹIsˇ˘˘EEE8 †»Q@`@–<˙ ŽM=RżëŘĹ!6 Í/šF‚y‚\‰ŕŚŐlŽD#F‘«”Q€ţ!f¦ÓĚ9Őčš‘.w:w7BH”ËßÇÄ\Sáü¨ŻŻçC€śűąsđÓÜę4ĆĹĹMą&öt1™LĐëő¸ €U±‰˛4&çŢÚ®ŮDÓˇµSÄ'őN™oę&|Y¤ĺB Î{EEElPŽÄ1" Çç>÷iŤĄ hÚ}e°áÓ†‘±\ˇ+ÔëmÝmŔŕHVlEEĹ”·ĹąŽ¬Ńhä—C䡿Y.” ô—QZZę—ź!>>^Zţ¤Ţ·®ţřŻŞ‘dąÎŽ%˘ŔÄ€,ŤI›™z™tűŘE+ţhÚô ŠřIĹtćYH€<ĺ;“˙1j6ŔÔÚÄË9FÇ€#ä ł]˛ß«â¤z4mZ;E|xqäHPüb—!öCě˝%Ý6Ť0™LSÚ•J…ĐĐPűxĐ×Ç:˛Dú›Íl€ˇů[żí«V­B\\{6ę/ţčAŮc­¸bŮŽÂÂBîpD4íĄq)´»üýâŹCřĘŔ,8ňţ ďźĘť-r%”K˙|Ň™oÄ1‚8FĚ7Ęܧ¤ ľú‡€źT ůdÝ=ň/µ7ÜO€(î}|ĘëĹ‘@†7“{iµZi™Y˘Ŕ†^xAş}Ĺd˙ű˙i WLâ¬gŻěW{˝öĹ ËU_6l`†,MIŤ(âžädŹăśMŘJKKŃĐŘ‹yÁÚ‚)ÍL­Ě} – %»Ű˙UmEk§?Ő!4X`#Ó¤|e°áË֑út˶@‰băpŚ ř1˘¨¨¦žAXz† \¶eŇŻä!PÜűg¸PqČ‚ţ!ŕ`ĄďJçyvšĽ/šl8ć”;'@ad쯨¨@AAÁ”ÖŁŃhP]] `jYoűÍ<ŤFíŰ·ăđáĂě'ťËÍĄÜÜ\fÇŃ”%Ř&HÜ´ ˇYYnŹ3 ŕ űe,€ý˛–)ý¨–‡@ąl‹KŔĺ‹&joŘđN†ĺjŇÝ]1Ůđ[˝ÍĺňA®DpÎĆ)‰cqŚđGÓQ:!("Še[`©ţ°ŘgA9vŃŠ/­řË<«Řçčîjo8vqHš4pdĆţŮ´žq”-P©T“~­s†ěTúËů‡üü|h4űDÍčĐĐPěرyyyürhĆ0 K⸠ęOÂzŁ€}ćÉ÷ެř¤ŢŠ{T–.B\€ä(fÄ‘}˙¸Ő+˘ö† 6\ëµO ô1Ë1‚8FpŚĽ D„¬Řár"äVź=[6.ÔŠśAX¬„¸P¶Ů÷ŹÖ;ö“µ7l.X˘Őö’ÓT$<<===ěe ¦’%ë|y'ö" lŤ˙đ˙€ŞŞ* Ô××Ăh4˘ŻŻoVŢ?33ŤńńńČËË›ŇI$"˘É`@–&Ě~™äăŠMÁ`ă)+çVpË(âĽŃĘF˘‰ Á«ś±¶î6XŰô°Ţľ X-ŇĄ“4Ď)Ł „D!(4˛ÄLČâ3Ů&#8FÇěs íĂ€öaXo_…µ][WÄţN)[ťŘç„(E&B– …,6eFßN&“!77Wš”Ë›˛•••ě˛Sť ŚČ—0 K^ ŠHDPD"‚ŮDÄ1‚hÎÉbSf<ŘF4yyyR@vŞe ś'öbY""" ăd6M·ĽĽ<„†Úg•3Ť0 “^‡óÄ^&“ &“‰ KDóN~~>víÚ…gä˛9(Ŕí9u ż¬­E§%°Ëp1 KDDDDDÓ.,, yyyŇ튊Š)­'3s¤¤ÉT‚şDDţNĄRA§Ó!@4›ś±« ˙\Y‰Ą‡ăąĎ>ĂgÍÍ~ů9ú\peŚ“Ň,Y@D~Mq˙ÓXžéś8FÍĄ ČD(îóTČLŽ`Ď’uÔ€­®®ĆćÍ›'˝^ťN‡††ö€¬s—ým¤żšĎš›ńYs3Ô‘‘x$- ?ČΆ:2Ň/¶ý&€÷ŕý÷‘™™‰={ö¸Žăüz‰ČźÉbSźśÁY—‰cŃä!Ŧ@“ľX*5ŕ\¶Ŕd2y]¶ ľľž M4F#" TĆ®.ü˛¶«KJđtY>Đëýľ¤˛DDDDD4cĽ-[ŕlrdĘQ`úß«Vá/´ZD)?{ý:öś:…Ő%%ŘsęľéčđËĎÉ’nóćÍhh˝ŤcU&E&˛AcŃÚµkÎ5uâ|s7ć o˨T*ÄĹĹáÖ­[ěe &’ČţFDDäǿŻ[Ŕ^˛ŕúz|ÖŇâöĽÎ| ×ă˝ęČHü ;ŰČU*ýâs2 ŕ4 ú*ČZÂŮDÄ1‚hŽét:|;`‚쎙ŤAó†ŁlA__źT¶`˛—Xk4šIdŮßüŰ#iix$- ť >knĆ/ëęđŤÉäö<ÇD`˙\Y‰GŇŇđZ-IKóéĎĆ’DDDDD4ŁĽ-[ŕ\ZŻ×łA‰ć‘(ĄˇÓáă?˙sśŮ˛÷ŞTc>÷łćf<÷ŮgX]R‚®¬„±«Ë'?˛DDDDD4Łś˛ŐŐŐ“~˝Z­––§21ud¤K}ŮGRS=Ö›őő‰ŔX˛€f”·e ś3dŤF#z{{Ɔ%˘yˇ˘˘•••č°třŮý 'o=úč¸őfĎ^żŽł×Ż#JˇŔ#iiřAv6źÓíf@–fśóä^ĺĺĺرcǤ^ŻV«a4Řłdť´Dôz= tâf¶¨T*ÄÇÇC«ŐB­VűÜ “É$•kIánBä‘s˝YÇd_ŁëÍúŇD` Č‘_ł\(Á™z9.}ŚÝ»włAcѱu·aP_ŽŇF23R±yóf—Ç ¤€lUUդׯŐjĄ€¬^Żg@–ŘßĆéoţĆd2ˇ¸¸Ř'jD«T*lßľťc ‘źŠR*ńśü 'ßttཟ57ĂŘÝíň<ç‰Ŕ>~ę©YĎe@–üűǨٓ0±)cŃśűa3`0!ÁîSUh4ÄĹĹáÖ­[čëëCUU•KmŮ»q.qŔ:˛Äţ6~ó'UUU(..F__źOlŹÉdÂëŻżŽ‚‚lÚ´‰;‘»7>˙;>˙;?_ Ξ˝~Ýcćělc@–fE^^Nž< ^d}!‹ŽĽçČŚuĆ®M  BrÔěmÇ­^ŔÔ+â‹&ú‡ě÷•——CĄRˇ  €_QpgżéčŔ řem팾_Ť(BˇV{¬›Ď€l€+--ECc ,ćk ‘ČF!"ŽDs¤¨¨¦žAXz† \¶… BóN~~ľK@v24Ť41cr0•JĹţFäÇ~úÓźJÁŘEQžÉ“!9Jý JÖeá˝*+ęnŠ€˛˛2äĺĺŤ;Ö‘ď3vuá—uuöŇ]]łňžI¶ 7mBhV–ŰăAüZ›Á`€ˇů[ŘĚýl"âA4‡ôz=L­M°™yą5ÍOޞ¤˛“ˇV«Ąĺúúzö7"?VQQ!Ő…‘;—Ëç&ë$4XŔÎĺr,ŢŽľľ>”––ňË"ňCŽÉ˝˙ď˙Ćę’ü˛¶Öc0vĺÂ…PGFÎúö1C–fŤ7e t:°Ž,‘żsîĂĄ!.Ôw¶í©ű‚p°Ň Ŕ^VüÇgÍÍř¬ąŚSŢ(JˇŔ_hµřANÎścd‰hyS¶@«ŐJË Čů7ç>ś/řÔ¶-V°dYĽDä»&Z’ŕ^• ?ČÎĆ_čtsľÍ ČѬq”-¸uë–T¶`˘Y˛Î“b82e‰Č?9÷a{Ô·Ü|{Ëľ\__ťpČÝž/ľ7ĄPŕ‘´4ü ;÷ĆÇűĚvł†,Í*çěd˛dä´ŔÝëČQ`9{íŚÝÝŇí±‚±ęüűC´5R IDATáĚ–-ř÷uë|* 0C–f™7e t:*++íaF#łÖś±« ü˙ěÝTÓwž?úgř‘ & ŕŠ-‰żĄ¦?,3SSéévzŰÎş"ÚÎtzçÂÜťŮ3ý3űťéŮQ{ÎtîÝN»;söH»Ýq¦ŠÚîµcضë]h-Ř©±Rh5ˇU41T05(„Éý#$$$@ř™_ĎÇ9cň $/ňů<óĘëm0ŕ-aŇnXءP`‡B-ŮŮ}źČŃ‚?¶ ąą………!_×ČrŽ,ĹÂÂB( ܨ©A:ËAqÄł8שÎÎI·Ë‹ń܆ ءP M$ŠŠűĆ@–ÜŁŹ>ŠăÇŹpwɆČćććzO&YA™(VHĄRHĄR¤°< t˝Ą×ŁoppŇm[±;”J<¶reÄÝŹÝúL&¤/^ě7` KDQNxď.ÜżRŚóŇX "â1‚(Ś$YŢ» OH‘źłtĘí Ľlkk+îÜąÔÔÔ)Żç;˘Ŕb±„|=˘xŢߢEUc#ÎvuMxy®XŚJ%v(Č•H"ö~ÜpŢ|ůůů¨ŞŞň»ś,EµÄĄË‘‘“ĄRĘbŹDa$HJAâŇĺçÉ _6u@*•J‘›› “É`z]˛ůůůŢÚ ß"aDÜßbĎ™Ě=6FfÇ'đWJDDDDDáŕŔNgq/ߏýqŽ,QlJ ńÜúő8SZŠŁMĚ„±;dc^II :®ß‰ $H˛X"â1‚(Ś***đŃ•>|Üig10»± ˝^ŤFĂýŤ(F¬•JńÜúő1ŔŽÇ@6ĆÉĺr ĄHĽşĹ "#ÂL©TâËA {­,f>¶Ŕ·CÖs]îoDDDŃď€ZŃłaç Y""""" ›ÂÂBo—l¨¬T*ŢE‹Đßߏţţ~ŤĆ€Ő‹‰(úLĆ~~ó&ÎvuálWúŽ€ëlÉÎĆ™,jÂ\˛DDDDD63[ P(ĐÚÚ d‰bÔ[^Öé`˛Ů&ި« o Ü‹=ŻRaKvvDß/.ęEDDDDDaă[ŕęâ^ľ¬aôEQ,jnnFuu5Ţđ)ËAq˘Ďá@Ů©S¨jlś<ŚçlWvŐ×ăő¶¶ľ d‰(¬|Ç„Č* ďiŁŃČ"Q̲X,0 ¸€S±)^쪯ǩÎÎ /_+•"W,žđň[ZđbssÄŢ?Ž, ˘¨ć8_‡3ú$\<•ŚĘĘJ„xŚ §˝C†Ó8vYüU+PRRňuÇŹ-°X,JĄ“^GéłňňD {q#ZXŰ·oGqq1Š‹‹‘á§Čőbs3>·XüÎۡP`‡BtAźĂł]]x˝­ g»şĽçżŢŢŽÇV®ŚČńě%˘č~2j5Ârý ?ŞHDĄR‰}űöˇ±±{÷îĹÖ­[˝—566z«©©a×l3Ůlčs8f~}»=˘ďY"""""Šł[ŔY"˘č˘ŃhP]]Ť“'O˘˘˘Â;ÚŔfłáČ‘#ě–ŤC[d2ďéSťť3ţ>ľ×];Ĺ"ˇáŔ@–"ĆtÇ( ď鎎( eggc÷îݨ¨¨ŔîÝ»Y8ć»×ËçĎϨKö¬ŮŚ·|÷śÍâ`ó…,EŚńc ¦ę’MMMĹ]wÝĺý?»d‰(Ö˘˘˘{lŠÁűg6›QWW‡íŰ·cĎž=8rä÷2q„/ĚDsĎ7<5ŮlŘU_“ÍňőOuv˘ěÔ)ď˙sĹblÉÎŽ¸ű™Ä_5E’ÂÂB´¶¶ZZZ Ńh&Ý^.—ă믿 ż1DDŃN*•B*•"%†î“ÍfCSSŃŘŘčw™X,†FŁAii)˛#0HŁů•+‘ŕąőëńz{;ŕs‹Oüçb‡R‰ÇV¬0\=Őى· †€1Ôę°ÜŹÝúL&¤/^đÜ„,E5á˝»p˙J1ĚKc1Ǣ0JdAxď.9 ×hN°…#·®!ĺgĘÎÇů:śŃ'áâ©dTVV˛ qFz:0Řv"ř…Ă w4Ŕeë†pí,Ź4†®|á+N¸Ď ~rÉůŰÄ7BćśÓŢŤ!Ăi»,DţŞ“§Ó1~l^Żź°ÓG©TzYŁŃÉZ˛ÄýŤh!Íu [VVĆ@v”J%<čiP__ :{˝­ /Žľ!;]}¨jj‚ÉnÇóúřa K4Kßń c].÷_ď ŔĎßhs) 6×OF­FX¬?”H‘Č54ŕîŇó=/Č1bä«vŚdć#13źEă1‚fóű¶Ýđ c].—ßĺž}n¨ŁÁ=6D˛ŚE›ăcžÓj„Ń ¤$ĎíÚÁ=ô---˛ …Â{Ú`0@µöQţbűŵZ µZŤĘĘJ466˘®®ÎŰeí kłłł±k×.ěŢ˝;ân˙[z}@›+c‡Réž+ú]Ö78łf3NuvÂd·{ĎY§ĂZ©Ź­\q÷‘,Ń,_ř9o~á÷ÂĎóbo|ŕâ´1ŇÓÁŔ…(Ž }Ńŕ÷‘éńÇ_ßÁ˘Ěź˛hDł0ŘŃŕ·żůîk.—Ëoşň!DżË˘E‰ÂÂBo {áÂ… ·ó]đËd2=UDD ëÜąs,B 6oÖ3cÖl6Ł©©)âŮ>‡Ă/ŚM q@­ž2T}lĺJüް0 łöĹ––°˛)ä.„ąąA$e ăŽ;†ŽËWá°"YQ„q‹2‡śVŁß˙Ç,ˇ¬˝›,ńOÇ[`÷Ľ/żŔhŘ×@/G›Ä¸ęęjXnÁq{źš®Iţ.Źß˙Ć˙ §Č&—Ëq×]wá믿F?.\¸€‚‚‚€íRSS‘›› “ÉřóŃc8™űĹ˝ěěläää ŁŁ#ŕyQ$y˝˝ÝofěŃâb¬ÍČůúĎmŘ€µR)vŤŽk0ŮlxKŻ_đy˛Ë<# kçN,Z˝:ŕr˛1Îh4ÂŘůĄűEĘĐ 2Çśý˝ÓÚ~äÖ5$ç}“…##â„ď8“`Ozď©ÎNďéçUŞi…±[˛ł±CˇŔ[ŁĎ˝?·DŢ3˛Dł HN™Ööě>$Š3I˘ŔUŢ';¦$‰X3˘™ţM1XőëšeU¦3¶ eôŁŠÁ>ť@DDóKŻ×ăŐW_Ecc#÷BS~áź^ŻGGG¶nÝ ‰D¢Í1›Í†úúzÔ××CŻ×ű]&“É ŃhPZZ‘µ÷ Oź[ż~Ć߇,Q ó XCyÂź°/üâ‰@śĺýuHÇ.0D4óý-9HIú&Üçüć8óMҨęŘß…˝Ć-,ťN‡ňňrżóôz=ĘËËqđŕAo(ŰÔÔ„ÚÚZH$8p ";5ŁQWWŞŞŞĽa¸Ż­[·BŁŃ@­VGĹ}É‹‘&šyĂJn„ý\Ž‘h6;PşÜýâţ‹†řţë}!$Bâ7ÖłhDq$Y~_Đó=‹ ů†C‰2f+I¶Á{Ú3Äw,o8—Ä}.*ů°uÉ[8Fmm­÷´JĄÂÖ­[˝˙ßżż÷´çŁó6› UUU0›Í,Ţ ™ÍfÔŹÎKő,Üĺ!‹QVV†“'O˘şş:jÂX~sdgÂ4n Ŕz>>_YY‰B,ĂfłůąZ­ëëë±gĎlßľZ­Öďr•J…˝{÷˘±±eeeČÎÎŽšű–+p˛ł U}Çl‰ŔűĎ@–h–—.‡pĂSîY‘Á$‰”÷M$fň…Q<®yÂĘ#gA´á)ľaC4GDź‚ }âIAş˘ O±PQĘ3¶€wlA0Ę^I™Ć/őĚ(ő=&űŽ*đP©T¨®®8ź&f6›±˙~lßľűöíó›+‹QZZŠ“'OâŕÁ~żhň܆±O<˝<đOWźĂáwÝ>ăŚ"gČÍÄĚ|¤HžĹеspÚ»á˛!H—#Aś…$ŮzÎ…$Šc‚ä6~#=ąŮ§­®áR– )3ź]zDs˝ĎĄ,AĘćR wµa¤§N{7÷Ü÷ÄĚ|ż±ť ü÷ 6GÖwlďŘ "˘hTXX…B55HŹ‚Ű¬SĄRA§ÓyGřž¸;>ÍfsTur†C}}}@7¬Bˇ@ii)ÔjuL,¶CˇŔëmm0ŮíxË`Ŕ™ ;¦ńFkźĂ]őőޑϫT9O–,Ńľ*ŠX" *13źťňD ů$W¶ák ľěť;wššę·Ťo Ë…˝(ÚIĄRHĄRDúç©<áŞÝn¸Ěłŕ˘§‹Ö—D"ÍfCWWىĹb¨Őj”––ĆܧBŇD"Ô>öĘNť‚ÉnGUSN]˝Šç7oĆÚŚŚ Ż×çpŕÔŐ«x±ąŮĆîP(đ|ŚĐ  ĎdBúâĹ3îČQTŢ» ÷ŻăÁĽ4xŚ ŁI„÷îÂSRäç,ť·ź#—Ë‘›› “Éä[ŕ;[Ö^ř.śč´w#Ágľ0÷7˘ůˇP( ×ëş]e2 ««+ŕ:¶_|)’Čd2TTT@ŁŃÄD7l0oéő0Ůíx,/Ż·µNuvâTg'r%äŠĹX›‘4ˇpÖlFßŕ ßĚX“͆]ă:ŠůŐCMöÎÄ ‡ŕÍ7‘źźŹŞŞ*żËČQTK\ş9éP*Ą,ńAF‚¤$.]yž ňe©óúł qüřqČŽç´1%îoD ˇ´´ű÷ďÇ«ŻľŠ˝{÷zĎW*•‹Ĺ0›Í°ŮlŢ0Ńl6{·Ź.ćD‹Öą°Óń–Á€łA‚{Ŕ°šl¶ //Ôí<µ ‰,E•‚‚o ŰÚÚtlď¨ç­k€l= GD4Ď4ŤßśÓŠŠ ořŞR©ĐÔÔťNµZ ›Í†ýű÷p‡±\qjZ­őőősöý *++YŘ0` ăJJJĐqýN\° A®"â1‚(ś***đŃ•>|Üig1fA*•zÇÁ»d˙öo˙oľů&ŔiżÁ˘-˝^ĄR ťN­V ­VëíŽőŚ+¨©©Á«Żľ ˝^~Z­fńBĐŐŐťNÓ÷q­tá?Ůć°ČĆ8ą\ގ‰WłDÄcQ)•J|9hAbŻ•Ĺ šĄ©Ć<účŁŢ@ÖeďaÁŔž={Îó ^˙1»4§C&“yO› žĹÖ"ÉŻ¦C+ČQÔ elAZĆ7Đwó+ŔČ­kH\şś…#Ša­—{°iU& B Ĺb1T*UL/P5×4M\Ě‘Ť d‰(ę„2¶@šťç dť dç…Ój 8/!]ÎÂĐĽ»zŁo4\§/áj·-ŕňM«2đô¶ŐŘłm5ŇĹ"lěÝ»—!QČQTšjlAzfŽ÷´ÓŢ÷őrÚ»a‡ű´­ńśľ×ĐŔ„ŰÎĎ+QÄţóëÉ)H,s˙'Q46ß>ȶż¬vĘ˙ĺ4´gŻLş]ëĺ›h˝|?{í ž)ZŤ˙ç˙řVÔłÍÍÍhiiÁ€ŤŁ_‘ŠaěÂ1›Íčęę‚L&Cvv6 eČQTšjlÁ’L™÷t°NÎXâ´á€ëvŹ_Ŕ:ďÁęL ;‚ţNFz:¦x;Îz\ÁâL÷ivćĆĽÖË=řÎ ‚őöŘczÉb!6će6ćeŕÓ+7´ŹÍ)ýăéKxżí:Žţň;Q9ŇŔb±Ŕ`0ŘçOz˝/ľř˘ßl^ŤFŠŠ żŃUUUP«Ő(..fŃ"Y"ŠjŽóu8ŁOÂĹSÉODŔ¤, % ’,RŇG˙]Őż÷HŮßÂÍjwř…±ó2đăíˇypUĐÎW«ÝíG—ń뺏q­Ű†«Ý6ězé]´Ľ\Â󨼼|Ć×=xđ 8łŮŚýčG°ŮüÇthµZÍfo őz=ŃŘŘ­V‹DÍśŢ]Z-Îvu-čĎę_YY‰ĆĆFo§¬Z­Žřű¶Cˇöř€łf3úńąĹ˙sqĎ­_Ź4ŃÔoľäÎCP˝ Ŕ3˛vîĢի.g ăŚF#Śť_şźřŤ[9•Ǣ…ĺŰBDs§  ŔČ677ٍ¨(čţćí” §˝#]méů"¤™ŻwIÜÝ®ň,w÷ëX7©€żđIČGß[v×ËżVz#`ęq‡¶úk® c\ö Ű{Ł» ÍĐ&Ę6đŤë0Ňž˝ Ŕ=7vŲ™ŤI‹đíő9¨˙č ´g/3 3ĄR‰ęęjTVV˘©© ?űŮĎpňäI&D …Âď˙2™{Ë`##Ôj5ęęę`0˘#U*§ĄŃ€şĎáŔ‹--xkô9ŔŮ®.-.)”]h d‰(ŞůŽ-0™L°Śë( pôFn]CâŇ…Y§|¤§ĂýułvLşmn& Č aĄi ^çšRîl[úÜ!­ń† S𑞀vب’DHĚČGb¦ű‹NŰ÷>­Ů’7«ďłiUę?şÂ‚F•J…¦¦&Ífżů§śX,ŕ~“ß·VJźÓfłůŤ&§1i"¨ŐH ńz{;>·Xđ˛N‡_Ťź/ČQT 6¶ŔWâŇĺůŞŕşÝĚc ëčðńc wµOÂćç¸ĂW…ÜwÎ+CŘ…$M ×Xç®»gˇ1ŃÝAŰq}ÜďcŘ‘ŻÚ1ňU;)Ky’ä÷C% ĹE3Ďb^KĎ®ËíŰës|ŚÚÍ,j„đý˝§Ë“&¦V«ŃÔÔ„ĆĆĆ€đZĄRA§Ó„µz˝>îęô«ÂBśęě„ÉnÇëííxnÆyK0 d‰(ęŤ[ŕ+!m™7uöÝ—ź?ňU;†®}<á6—öÂc_ľsh]˝6ę0lÔ!!]ޤě HüĆzqž-Y, zţ_]'Č@ IDATýň>h7ăŰëłńß/=ĹBE‰D±XŚěěldOsnh<Ňh4¨­­E}}=Š‹‹ý:c t:ôz˝7µŮlčččËZíP*ńňč‡SťťxnÆş} d‰(ęŤ[ŕK°8Ó{Úw§Ůr `¤ë3 Ďť {—Ä…‚{€ÂőČłŔFĺăĘ »gĎ6·»pá řÍźuZŤ´!¸ü!’ä÷!Q¶Ž‹Í“kݶY]˙öë€ĺY\i>R”••ˇ´´6›ŤĹŃľ}űP^^ŽýčG¨¬¬ÄćÍ›‘ťť •J…şş:čt:ěŢ˝fł5550›Ýáń6b‹OÇ5Y"""""˘y0~l/ß™±®^¸†f w6cčÚÇc ‰ÜŢC륜!l,‘g%Ű(Ů6Ö5Űňü[C§1tĺ ’óľ…$9gaΕĺY\붡őňÍY}Ďő7će°¨ó$آRˇb‡ěÔôz=ęęę ‘H`łŮ°oßľ€mšššpß}÷ůť'“É8ź7Â0%""""˘ŕ;¶ŔĺrA  DKráęuwÎ:mÝ3^ŘkäÖ5 uśMp—ÄÍ7(¸Ű3–bú±6Ú9«)šŰÓç}F ;0ÔqĂĆsH^óť[D.–=Ľ>o4\BýGW`µ;.žţ,Ů÷Ű®{ôzxCNTÝ˙ÂÂB( ܨ©Az„ßÖňňň_÷Üąs|°OÁ3?v:d28wµ2Ex×5Y"""""Š ľc |ĂXHdaÄČŢş6íĚ5<€á+bŘčßý•źăÂŁ÷ Pp»aă‘4ÍĘj ćĎí‡.ď8×@/?9Š$ą ÉůE,Ö,h¶äፆK€ßi[ńBéÓşţŐ}ř‡?Ŕ=‡öém«Łëq&•B*•‚ďőŕX …ßü؉( ¨T*H$ń7¦ă-Á{:M(\đź? @źÉ„ôĹ‹!—Ëý.g KDQMxď.ÜżRŚó¸ş-ńAN ’,ďÝ…§ ¤ČĎY–ŰššŠ‡z---—%޵#¦óÜ]®ÉÓřľ®á8Î×ůuĹz:b ×1%·Âu@á:Nź´ÍcłĂFFn]hăß@’3űŰBŇlYĺ[𻓟âém«±bŮÔµĽzŁo4\ÂďN~ ëm÷/ä'Ű7ͨÖBsđŕÁ¶ÓétĐjµčęęÂîÝ»QQQÁâ… ¸¸eee,Ä$úĽŘŇ‚ł]]Ţó¶„aĆ ‡ŕÍ7‘źźŹŞŞ*żËČQTK\ş9éP*Ą,ńAF‚¤$.]yž ňe©a»AŮń2ďi×ížżźÓŢ Çů:żY±ŰîuAS(ŕh Şhł{śÁ±Zżí–µ÷ŔńéB´ątNüŠ”ým!˝Pz?Ę_i€ő¶kţĎ?†tťßžlĹď´źz˙˙ô¶ŐÓé uN©JĄBii)ĘËËqäČlŢĽjµšśB<ĚŮ}K݇ÉnźŃu?·XpÖlFßŕ ßů;Š»ź d‰(f`ѢEčďď÷;_’A’®a0ě€ÓŢŤqÖ¤ßË5<€ÁĎ˙ËĆ.Ď>Ž' )IÓ€ż{R€ _żĎÝ-ë˛÷ŔqľnÎBŮxóLŃĽqú>h7‡|ť‡7äŕwÚO±şŇ‡Ź;í,Ń<ňí’MČČ÷ž/H—Ăuó î…˝¦ d?=áS°HTîäüsIÓy,Ţü_ Ôw˙ßeďÁĐĹw!Üđ‹3Ç~ůJ^zgÂË7ćeřý_łeZ^މM«2YĽĺé¨5›Í0›ÍqŃ:µµµ¨­­ťŃu<rs¬xnýz<·aCDŢ6˛1N.—c@(EâŐĹ,ńAfJĄ_ZŘke1ć‘o ë˛w{ĎOgÁé dżdë'ü#·®Ái5`Kł<öËď?zoô±ŐÓ‘›Hôył€B“.áż_š^Í06˛Ůl6ďé®®.˛4kąb1¶dgc‡B–ٱˇb KDDDDD1Ĺwlk ×;ž aér łĽaëD†Ż|č=]´™a,ÍNá:ŔŇ ÔŹŽ7ľvŽ,€ššďiEÎůŚ4*•jŇE˝Ěf3şşşĽ˙@ii)ÔjuÔÔ÷¨FżK˛DDDDDs|»d‡ÍźB¨xÔo<ŹgA0®áżŔ¶h3ëIsđĽg,uZŤp ôA’ĆÂPL őăôfł{&°Bˇ€D"a§ R©B; ÓéP]]Ťşş:ŘívěÝ»—Ś d‰(ćř˛Î›_ŠG!HJ@śé cGn]CâŇĺ×uÚĆĆä縚ÂEĽhöäYîńýŽŃÇYż‰ dCňWż<1­ĹĽ|Ý9ů㨿˙ÍÍÍhiiÁ€ŤŁ_‘j&óMĹb1ĂÂy R©Ľ‹¦iµZlŢĽš8é>Ť d‰ćk C—?t,Îj„@ś…q’ä*$H–±@Dqn¤§#_µĂië†kxŔýŃŮĚ|$}c=É\i™h® Ďa¤§ÎŃůˇ â,$Ę6 i’™ˇ[ ,LÁĐŕ€˙Řń2ŚŚ˛®Ű=@°@öëËc§Á0–ćŽ$Ő…~Çčcjđ B!±X,0 €ĺ±¶OH$8|ř0gÇÎßnZĎŠ d‰ćâE_W†:€a‡÷<—˝#önŚ|ŐŽDů}ćocˇćă|Îč“pńT2*++YŠ8®ˇ ]z#=ţ/ö­F8­FŚtµC¸ć;|ă†Çš«}n ŽOOř-ääżĎµA¸ö R–°XsĚiďĆá4Ž]"Ő ”””„ý6}cŐZ/ťw?_óŚ-Xş#_µFľľŠ¤ÜŔŹ~ş·˝§oőą†˛4Gz|Ött:ěHڎým>=]´oČ™r»ÖË7Q˙Ń÷u¶­ĆÓE«ů [`çÎť i;˝^Źşş:Ô×ף¦¦`ńćAccŁ÷´X,fA"Y˘Yąu Cßť|ă9 §¤!I~ 6×/ţ¬FX¬€…Ą 5xńďŠŢÁ¸ěÝp|r)•łS–ÇšÁÂŘńŹ Ç§'ňŔł,Ös ąç®­@JrBDÜ&™O ë[ĘYp±÷ô×6î ©`ءKď˛`Dł4tĺĂ a¬ßţ÷!CW>dÁâá…誵€Č=ŁÓ5Đ‹‘›Hű˛˝p ôy•äß·rú]8Á^ěŤAčśä#”D ýŃü u,ďřŽŠ#Łc 0ěpŹ-HqäÖµIŻ*ýTą'”˝đËIˇ»đđo;î’Y“…`µ;đA»™…p6› 555Ţ˙+ eČd2¨T*.ęaŘ!K4 ‚$ŃÔŰřtč°óŤ(ÎŽâ¬)»đ|ŹA4s ’eA÷+_~ťłI"R–°pq"1óŚÜ`Gş;¬Ř6ö¸픝HÁ=Ŕ9=Đďp‡˛˙ö' h3Püú˘‰Yú€ăöĚŤuwr3apą‹ő™o˙đďgĽ§WdqnćB*//i;»Ý˝^ďýżJĄâŚÓ”••…´ř™^ŻGSSŽ9»ÝŽŠŠ Ž0 d‰fóâ/]>ń ˝`Ű3l!ŠŻc„$ #ˇ~,:IA2_ŮÍjźË¸Ço!˝éü §Ń“˝q,˝ŮáÚ'Ü]ł÷ §˝{ÂçiK/ý¨>zÜçť>ďÚ4…ŔCëX_ňwú< mvˇß1öş 7ř»'˙x‡ő™‰?žľkݶ)·»ÖmĂűm×qutŰŤyX±,Ť\@3™oŞP(pŕŔo)•J(•J( TUUáĹ_ÄáÇYHznÂÍâ…źdsU1ą˙茟é· Hş‰™ů,QIľg›{±.źŃžăÂř7p„kž`Áf»Ďĺ}ź@v˛7ą Wś=ggŤ°ž±’,8=¬Ő8éç©)@ĺNwČÖđ‰űqdé~˙Đܩܝ´ßZ>ţGç‚©GOW,l»×…’mhŢ8}iÚ#–,âŕO·±x LĄR…Ľ­Bˇ€Bˇ€FŁaáć‰Z­†Bˇ€^ŻGcc#Ôj5‹!ČÍÁ‹?§ŐčýXrĐ~I"6<ĹbĹAr „kžŔ`ۉ€ă‚ß8ŮzľaC4$Ëśż C ű™ßßîüm~#(>Ś[x× oGµłďĆ”×OMJ¶ Püţ]ľ¶ą_“űKšćîÝt7GÄ“;@Ă'î ÖÝ;vÜąKâÂłß@)g»6će`c^^(˝źÝ±apđŕA!ÂČd2  ˛„,Ń, ’S şw†®|čí”ő{qq„Š"~™(n€|î˙>/ľ8O6Iáš'ĆÍĺ“[ů}ł0xń` Ď˙”4÷>Ç™îńůŘ7¶ Q¶Ţ{™Ó~#äż)@Űěţhz˙č‡ <ł‹D.Ü <´^%'cĬ _:\hůܶޅ®‹Dîpľh3Řąňß/ĹwsKaa! nÔÔ ťš&»ÝÎ"Dâó–€höÉ)*ŠŕĘű&śönďÇŢ$Y\0„ Y†”ž…ÓvN{70ě€@śĹPhž$.]ŽE…?Űç€ŃżË슍ëcń¸±ń'cďkx‚¤Đß@wn@ËçŔ˙śëíwĐňąű|iš{”ÁCë9—zĆn÷X‚ _¸xßpwÄ® h3»¤inIĄRHĄRđaEÓUWWçťë+‹Y4 @źÉ„ôĹ‹!—űżKË@–h ’S¸t9C–$Ľwî_)Ćyü8EA YĆ@Çâ>Ăő΂đŢ]xŞ@ŠüśĄyÇŹ-3᲻WęrÚş§ý.5ĹĘm ů3ŕ´nlá/ŔÚť>ďţ’¦Š\ ňś7E<ť°“`4„ ”›éž#\¸NŔým}Đ~˝·h˝|+˛Ň°b™Vf ],â—b–V«E}}}HŰŽ_`ŤłzÖ ‡ŕÍ7‘źźŹŞŞ*żËČQTK\ş9éP*Ą,ńAF‚$÷Óň<äËR#ň6&/`,ýŞ‰Ů›0â do]›Ő›ę…ëÜ_–>wÇlËçď8Ŕ}~ËçîîYPäĘĺ€Bäf°Ł2ÜL7Đ_sĎ}tl{—Ä…‚{€Gď@šĆýmˇXíüĂżźöěXo;‚nóđ†ür×ýxxCÔsşşş‚Ö©ĹbTVVB"‘°€„,ĹAJš_W,\.ďeÎńsľgHšć^ü«dŰhwĺčW˙¸ěČłGn¦ Ją ą{Ľ”ŤýóÎŇç_ŤÝ€Ţč‚©gň×E"÷ ÷çĂ.´ÖË=ŘőŇ»¸Úm›t»÷Ű®ăý¶ë8řÓmx¦h G1E&“AĄRMąťŮlFWW`ßľ}\Ě+1Ťq%%%č¸~ '.X áŕ*"â1‚(ś***đŃ•>|ÜÉŵż%eoÄá4Ŕĺű şÓjśóŰŕ ďwč×ÜîÂ…/ŕť7ëËÔ#€©Ç=Ţq _&€<ËÝQKłc0ąĆÁFX=ť°…ëśFV»?ú—oűíőŮxşh5VdŤ˝sŃ{ŰíŮ+Đ~t˝·QţJ6će`ÓŞLb†FŁ yô€N§Cuu5ŞŞŞpđŕÁ‚\Z8 dcś\.Ç€PŠÄ«‹Y "â1‚(Ě”J%ľ´ ±×Ęb…iKĚČ÷˛ÎŻ;$!0< ;ŕč e~ZSĺYcťł–ľŃ®Ěkî`Đwî¬Çř°Hä‚<Ó ¦¦¸ÇHÓŘMëËŘ ôş;_ď Ś°=.ô;|׉»[s3ÝA¸rą`´S™ť°‘ŕŤ†Kh˝|&í|ŐlY…ÖËńř oŁ÷ö ţáß?Ä{ż~’¤¸¤R©°wď^ěŮłUUU8yň$ÇD˛DDDDD7Ć$/v˛Fn]C’lýĽßOęůŘűťŔŘă™]ęBÇőŕ!`żC0ęŔ#7Ó…T‘ĘĺŁßÉčů16źÖŇog«Ą×}Z ¸ăläŔġj~Ž;|UČĄ|ęí)<7\ĽPz˙”c6­Ęı_>Ç_xď·]ÇŐ}X±ŚďZP|R*•P©TĐéthlläÂ^„,Ĺ׋ ź±ŕ™$ë´},@ ;^jŠ; TĘMˇ; 4v»Wý5,}Á»h}yÂČ`a­‡'´ŕí˛uźÚÎ÷ĎZľ<+Ü´şď7ŕÂNlę57¦Ť°ąAŔđ5xşcźŢ¶:¤íŢŤyřôĘM\í¶1Ą¸&‹Ŕ;S–"äąK@DDDDDńÄwlëÎ-ďůN[wÄÜFy–ű«hóX`hénöz>Žď‚±Ű}^°™´ÁřvN܆Ę3BaĽŔ31łëß%qAšć®]Fşą™@Ćß± `ŁŮt‚Ő%‹…1sż›››ŃŇŇ‚GżBŐŃŃÁ"D ˛DDDDDWĆŚrőš"úv{F¸?Zď,ęŤ@żctfj· wÜç›n ĐďźŰ㡤ÂóVE" 7ĂÝÓśšȳܳ^‰‚×…bËtĆôÝŚ™űm±X`0Ë#ü¶Ţwß}súýĘĘĘPVVĆ˙ čőz=zfł “ÉX”Â@–˘šă|Îč“pńT2*++Y"â1‚(Lśön NăŘe!ňW­@IIIdżň[€„$Ŕ9 Ŕ=GÖĺ¸ pą\0v pú\Ŕ?Xőđ¬žÓą™Łç‹<ă<şFŰţ6[žńo4\ ĄLąýűm×˝cVdq#еµµ¨­­ťöőÄb1Ôj5 IĎCX"Šę'ŁV#,VŔÂRŹDaĺ€Ój„Ń ¤$'Düíő[ç0\.w88Řú\#C@€Ö/Ý FůŽVľˇćŘVŔLNĎbd?'3ԅĬĆËţ6[?ŢľĺŻ4ŕ×ucy–dŇ…˝Z/÷`×Kďľ˝>›ócĂD&“!;;;čefł]]]‹ĹP*•A·Ńét,âţ.8‰„oND˛DDDDDw\Cý@ň"¸ď@ @  ťĂc§Ge,apŚg12˘ů¦yp~—÷)>˝rĺŻ4ŕpšó°qU†w›ŢŰhĎ^ÁO_ŕž!űO?ü‹®ß™F3á¨O—§R©ÄÁn3ףb…JĄšÖ•J•JĹÂ…A ąËan.äňŔ?– dcܱcÇĐqů*ÖA$+Š ÎbQǢ0©®®†ĺö·‡!Ú\Ę‚…aĽřFşÚ˝˙ľ#eʼn .@*• ôz=RSS^6ßŇĹ"üé6”Ľô.®uŰđ~ŰuĽßv}Âí=aě¦U™S[‹ĹŁŃ‚‚>ĐbŔ ×ÜżŔ?—kôXŕY;wbŃęŐ—3ŤqFŁĆÎ/¸?ÖBDÄcQřxä ˘…ß߆»Ú0lÔÁi»1aë[řµrľ_Iq ąą‡B~~>ŞŞŞ.×jµ0™L¨¬¬ K(»iU&Îľ\‚źżvÚŹ.Ł÷vđE»ŠĚĂ Ą÷GTk4Q]]Ťţţţ ;B)şčśđ•ËŤ€ź˘ éc KDDDDD1O˛.{7\.—7pő==QP»HčçťR,ó„±SéďďGuuuŘBŮt±µĎ(ňvČ~zĺ&–,bEV6će ],ЍÚú†±{Ú€ˇ,ÍY"""""Šy‰K—#9†:üÎĐŁ\ÎÚ» |ő„˛ľ§‰b™o»sçN…ÂI· …Řąs'Äb1Ş««a4Ăr»?hżŽ_×ý?í ámüüµ3řuÝ_P˙Ńĺ©­o»sçNdeqöI,züńÇݡ¬@-˙vĐtž“°DDDDD/’ď٧­.{÷”ť±p—„ă (6ŤcSRRBş^JJ vî܉ăÇŹ/x§¬öěeüüµ3¸Úmó;ßwŻôĹ"ĽPz?~Ľ}SŘj,Śýâ‹/ř ‹QŹ?ţ8°S–¦…˛DDDDD7É)m| Hů…±łÉlaˇ4Ó0ÖĂĘ.d§ěO_DÉKKűwőZo;đł×Πü•Óa©-;că;eişřô‚⊠e „žÂŕ'GýÎ÷tĚú†łKłÓ‰bËlĂXŹ…ě”˝zŁ寸ç?/Ď’ŕ…ŇűˇypUŔ^Úł—ńŰ“­ř ÝŚ?žľ„o­ĎĆ3Ek¬¶óĆBˇPŕFM ŇůŽX씥é` KDDDDDq'Ř"_ľłd=ˇě"GPě%Śíďď‡Á`zţx Ęţşîcî0öěË%A¬‡fË*h¶¬BŮ˧ńFĂ%ĽT÷ń‚˛ˇ†±Áj @€ŢŃŻ…°dô+ÚÜŕçźŃërAW†˛ä1 @źÉ„ôĹ‹ŽŤ d‰(Ş ďÝ…űWŠń`^‹AD¦Ó;Qm#Ő/Łl߸ŕßâM´2”%Ďcń0Ľů&ňóóQUUĺ˙„%"˘h–¸t92rҡTJY "â1‚(ŚI)H\şň<äËRŁćv{ůrÚnť#›šů/¤/\Ľ‰ü˘ .Z.KOâůďoŔŢź¨˘ö±Ą~¦{˘ÂľżW…´íÖdhüنű›ŹPĂŘm۶ÁḷpÉ’ŕý’óĘZo»oÓ·×ç„´}şX„oŻĎĆífôŢś×ßE¨aěćÍ›lńłąđŮgźáłĎ>›—ď]VV†˛˛˛yůŢŁ˙>ňČ#ČĚśß ^$šřÍů eÍf3şşşćě>Ĺb(•J>‰ ˛DDDDD·<‹|őôp ;BŮÔo.}ű:ń_4ÂÚ7ź~o=ž|t% ÖHŃy݆ -Ř÷[öý«·zxů…Â˙}n}@†‚Ő|Î×tfĆÎfćéBΔŤÓéŚMII‰ŞzĚtˇ¶d2٬~ţąsçfuýĚḚ̌×{>BŮúúzÔÖÖÎŮmT©TŢß-,˛1®¤¤×oáÄ $\Ý‘xŚ §ŠŠ |tĄwÚY ˘Úß)K˛ąů˝ßůîy˛‘Ű!kísŕżh„Ë|ňöwý>¦_&BÁš <ůčJ¨ź©Ç+hÇóßßŇÇţŁY¬wĆN×\-ŕŞů e—gIp­ŰíG—Cš kµ;ĐvĹ2Ż÷uľđŠv*•ŠEđÁń4˛1N.—c@(EâŐĹ,ńAfJĄ_ZŘke1"lK,píüüť Ł "ŃďO`íÄŢź¨&ś™šž&ÂľżWáŮ˙Ý —,Ţ@¶Ódá· řţ“ Ŕˇ· čĽnĂĘ ľ˙¤"hpk퓸áH IDATsŕĐ :Ż»Cî‚5R|˙)EĐźkísŕO§ŻzÇ(L´m¨ŰŤ˙ůéiBüuŃŠ€ű˝˙·:¬ČăŮďň#¸ ĆzĚG(űđúď"]ó2&ť kµ;Pţ/§a˝íŔ’ĹB<Ľ!gÎď#ĂŘąŁ×ëqôčQ466âĎţsLŢÇą eU*ŐŚÇ=Řl6Ô××ĂfłyĎ‹Ĺ|† Y""""""I˛ ůú ś7.EĹí}ĺP;ŕŮ LŹ'] ëągýÎëĽnĂľŐáŠÉ†C' X""=M«×íxůPţü‡bż°ó÷˙Ż˙ë7-°ö bÓj)¬6^>Ô†—µá?~łŐoŰ oâ‘ďŐĂÚ79b¤KDŢm˙ü‡bď"cˇn­—,Č+Şón{őşűţU‡˙řÍVżđußżę°őYܲá c=ć:”}ˇô~h?şŚ«Ý6|ç…?Ał%{¶­[{˛/Ő}ěť9ű“í›ćüľ1Śť=O0X__˝^÷y®BY•J5Ł.dťN‡ýű÷ű…±ó9Ë—¦–Ŕą‰Öm…Qq[;ŻŰ°"G<«1‡NpâwŹÁzîYt6ěĆüf+¬}Ř÷Żş±źc˛áżh‰źĽý]\řÓßx·˝pŃ‚ü˘Éď{>őă˙.đç?Łła7.üéoĽŰ>˙RË´·Üłrż˙¤.}:vă“·ż Ř˙Űó|ĐŽî0ÖĂĘŠĹbTWWĎx&)¬X–†cż|K a˝íŔO_Âí×¶ë˝íđ†±?ÖlÄ ĄĚé}b;;MMM¨ŞŞÂ#Ź<‚ęęjż0vëÖ­1˙üq¬[·m´.ׂüL›Í†šš”——Ăl6 >Ě06ĚŘ!KDDDDDä#1»Ă×>ŠčŰŘirw9­Ě‘˝ěĐۆ€ó}”˙űO)đäŁ+˝˙ö»JüŕM°ÚĆV¦ůP`߸ŃĎ~W‰ĆżtáĐ ŢţźN<ůčJĽý?ťčĽnĂŢź¨ ~0;`Űt‰;ěu;ŹM«Ą~‹’¬ÉŔÖdhúK°>"%ŚőËNه7äŕěË%řuÝÇxŁ!xűň, žŢ¶O­žóQ cgĆl6ăčŃŁĐjµ~Ý™€»Űł¸¸jµ‰$.ę±3e=]±ž `Wl$a K4‡\Cpڻᴑ.G¢%¤,aaŕ´Ý€ÓŢ×đÄYH\şśE!šG#·®Á5ĐHg"A˛ŚEˇ#˙e’§+¶×'8őđŚ#/ŘGůş›VKýţá’{ľk°1Ď>ĄŔˇ\¸hÁ“Ź®ô΂U?¸Âúď˙oőŘ÷ q»±Ű8çPý@6Yz˝>˘ÂXŹńˇě?ţă?B*•Îč{­X–†Úç‹Pű|¬vGŔĺĎ­ iŃŻér8ř·ű7†±!˛ŮlhjjB]]]ŔH™L†ŇŇR¨ŐjdggÇe}|CYŃ‹Äc͡‘[×0Řvö±.HY‚ä5ßá›!óu¬łwcČpÇ. ‘żjJJJ˘úţDËÂ^ž`ÓWÁ)ţü‡bżóů^}Đë DÓÓ"odĂD‹–qŰß222°hŃ"ôőőˇŻŻ/bYŢŰ乍łőAűuĽßv׺m¸ÚmĂĆĽ ,Y,ĦU(~pŐśß~‘H©T “É„žž˛“Řż?´Z­ßy2™ jµĹĹĹ   »ŰýşŕnĚa(Ë®ŘčÁ@–h–\Cp|r40hńĽ(ěéŔČ­kHy¨śˇě|<µa±–‚"ÔHO‡; fŘÁ¶HüĆz×>ÁbńAs`ŘxC Á˙fôbđ“ŁHÎ߆$ů},Ö<<'rZŤ0Z”d.U±ţşhţtúŞw\€GzšČo ĹÇţ&•JQYY‰ęęj?~`Ű?ťîÄŠŃŕv˛íš>î¦ŐwńAÍiŢ„ux€­MśŃhdĽ9溯¸ük :ţ 'Ž7öŢ[ى Ç®}zäýč/‰ ò4Ű‚Ig/ö ·Ď¶č×ë//EéŹU“úąO%`ËÝĹ»Ë;‚Ľ•‹ĐkDUM zű†pt˙÷ŔÖ.᏿ĘÁ+?ݶ€ŠŁX­Ŕ{ofy´Ýdź+ŮŘ“˛3Őľ`*“±đ‹C_°%cOľ·1âp·Ű©źNúéĽw>»„_újĘů˛'eŮľŔ=‰DµZ µZ “É„ęęjčt:tttŔ`0Ŕ`0 ¬¬ ąąąČÍÍENN$I@ÇÄÉXöŠő¬%zV‡ĹBuđ˙ţđ{c&c˝űĂď®u™q®ůĆ”żFVĘz&!!Đét(//ÇęŐ÷¬­­Eii)Ö¬Y]»vˇ®®. c0ŐÉXVĹVČMŁ 9Ń Ѭú+î˛Ę;y‡(Âłż±NUłü»LcŃ Ţ{+Űă헥ŢöµŰŰĆzś­ë”غγ~„ąO%x´°'ŰŤő<Ý=ź±¶ť­¦»RÖÉXŔÖžYščŃö1âp<ł4_\0áć­!ŻĽVVĘNŚJĄ‚JĄBqq1jkkqčĐ!ŤFÍfčt:čt:ś>}: ^ł7*c:äÔ+°U%k4šI=žBˇ@qq1wĐ™8Td&/Hěüaf¬¶cmODM$–Áz÷´hwóĂčë‚ç%1hD“oˇ@D0Đ7ć=>‰f‰$e˙ţ÷żŁŻŻoĚÇZ°`ŰĹiď%c}ŮD’˛]]]hjjň›×ÖÖćťÖ>Ł[:tµµµčččŇßsá´··{5FáááxüńÇÝŢć­ĽÜŃëőśčü˛D (Fîtđ7î`H8űÇÍ2ˇň'04ŞOĄýtéŃóDPě# Ń~°ŤOÇČÝĹş[‚Śľ €“‰hVń4){ţüy\ż~}ĚÇILLĆ \®÷v26I&Ak—şSÍő„íµ âüŐži‰­§IŮ+W® ˇˇÁŻö›(/?~BBŠ‹‹…ŞŮ© \$Â×_=-1r—ťÎd,ůńçV†€hňDˇK{Cű_Nyî„&˙#OŤ$še‚ç§"(öÜéľ"Ěn GXÚ Ń MţGÜľqYčß<Ö˘^"± ˇÉ˙Č€ѬâiR655%%%.×ďÝ»ýýý.×OGeěłK…Eş2’c‘™2Ěm{-(üÇŃ{kŃ‘ax6=Ń뱝HĄlyyů„_§Óˇşşđ=ĎúÁţf0đÍ7ßŔd2!'' ă·&ÉÍÍť’ß  ŘaŤoůŔ 7×{;Ë…»ő"z@Áó’šöĽ­WäXŰ,P!DţE4 …Ą˝0nő«H,Cřcl§[Ń ĎX QŚ|ě1#GxĆZŠfĄ©^čkşÚĽ•˙$˘#Ăp­ËŚçßú…ż=ŽĎĎ»Vňîűř}őčNÚ۶&sÚbëÍ…ľ˛łłQTT„M2ý`?ÓjµŘ´i“¤^łfŤKŹSťN‡M›6 ‰ć@ŔĘXš&d‰¦@H|:"–oEpüRá P$–!8~)Ÿ܂0Ĺ ‰h–…F ‡ĺ[$‰c ¦jĚED#âń|„¦>gű›DDŮĆ\Úóx<źg¬Ѭ6UIŮéě»0. ˙ń‹8~ _\pMČŢĽ5(,öš:oĺ/źÖŘz+)+•JˇT*±¶ P_¦ŐjˇŐj]®Ż¬¬tşľŁŁĄĄĄŘµk—ߏ«™NĆšÍfčőz Nrľ˛OhpĄ˝ÝmOf¶, šÂ@žr<ýÂŰ€'‰ńTrA>-x~*{VrŽ i"‚g§Lł ‰ aŹmŔÚeR¤&Îc@|xĽMdˇ/wfbŻĚ”ů8ůŢzüâĐW8đŮ%·Ű$É$ŘüÜl^±dZZ¸3‘öčСC…B!śZŻŐja4ˇŐj‘źź‰D‚řřxÄÇÇŁŁŁ:ť999SÖ¶`şÍT2VŻ×ăСCĐëő0›ÍN·%$$@ĄRáŐW_˝o»ňŽNŕĂݶ‚a…,ůµŕyIMLR©d0sŃ …D x^äÉŹŚą ;ůÎx›lĄěL$cíĆEAűĆ ôüšŰę×—V¤AűĆŠKĆÚył}/3™LBbpďŢ˝ČÍÍEnn.ĘËË!‹Ř’ V«ˇÓé°zőj¶ţh¦’±Ť………¨­­uIĆÚß {[©Z0Ť¦˛DDDDDDDłĐ褬»ÄŽ#łŮ>0™LBU¬ăb^ö¤­?¸űo¦8¶z¸ĄRé·=z˛DDDDDD>čý*#r–Ç»TsîÚ§GΓńX”(ÁűUF´\7cŮ)¶¬U &*-íf|tĽ-×-‰ Ăë//ELT¸ËăŰű޶\7cQ˘9OĆ#÷©—ŰŕÚu‹đ{·©=eBÝWÂclÉs­ć­8b;M6oĺ"üöOĐŰ7„¬Xܧ°kź ÄŘşNéôx;·©śŰńőŤćn;{ß]űăÚ˝±%˙öË”nS±J6@=›žíë+°ţ—źŕ_˙Çghxo˝ßż¦úúz444`¶Óă3|řą&$$ ¨¨ŤkÖ¬qZ`J©T˘ŁŁŐŐŐP©Tččč€FŁ`;ß“ŠOš¸ű-ÖG3 Y""""""söb7jOuŕ7ofąÜVú;=~°b!ęľę€Őj»®âĄűô8şďűX»í/°Z¨0\»nAĹ#ţVµÎ)™YqÄ€W~n«ĘYŹŁÇZPú;=¶®SିĘ´\7Łôwz§Ë;·©űTzűńĘĎëPUÓrwQ.©í9üNŹ?ţ*Ç)ZqÔ–Ô}żĘÚS¶Ţ’V«ąO% ôwzä,Źż—ý˛»öéŃrÝŚŠ#F,LŁ·oGڍ8jÄ_˙´ÚéuĽňóZT1:=űď‹–„ą$dsď¶^¨:Ţ‚7¶¤sG Pę§S†sÍÝ8×|çŞd'٧§G8UÝ×»bďÚµKhK`6›QVV沍V«uą®  €;®ě §ét:§–c1›ÍBul||<čC¸¨ůµÁ3‡pâčÜţˇ'"âA4}îXş0xć˙˙űpřđaäŐ~iK\ŽŐ_őŁă×đúËéč=˝˝§·âő——˘·o˙ôr5vnSˇ÷ôV´|¶;·©lÉÍ»IJŔVQúĘĎëąDŠożÚ‚ÚÔč=˝;·©„¤*ä>•«Á–$ÉY«ˇĄ?¶%J§GUM‹í÷žŢŠÚÔřö«-Č\"Ĺ+?ŻCí)çžu_vŕŰ›Cř[Ő:üőO«ď› =z¬«Z‡–Ď6˘÷ôVlY«ŔŮ‹=NŻŁâGŚŘ˛V–Ďň…ç`µg/şďŐą,-Ń’0ˇ‚–ă-pe$Ű'»ykÁF:ťnÂ=aóóóˇV«<ŘăT]]íÔîa,Ť&“ b±ąąą  a…,ů÷‡ŃŢ6ôô= qŽ šQÖáÜémC[/ĘşŹeO:¶p”ąD*$G{í2—Hť’ťąËă± @oß˝¤Ô{ďź·ýŽýßwŞ6-ý± U5-říźÎ;=öh˝}Âďzď­láú¨pTí˙>’WBĹQŁËs/ý± ËŇb=zýď˝™ĺ´í[–âýŁF§×ńŰ?]@´$ ď˝™%ĽŽ¨pĽ÷fţéĺę1{Yšu_vpĽ¸/.„`Oü9ö8ŤŹŹłç©Bˇ`«‚ ČÍÍ…Bˇ€ŃhÄ®]»pć̬^˝Ú©ZÖl6ăĚ™3¨¬¬Ş•7nÜČ8ű&d‰üLLTóAúÝäç˛4©Űë}tüš­ťÉŚk&çŢ‚É $8w©µ§Lc&íŐ§Ź=*EÝ—&·ĎÍ]ÂÓŢ.Ŕ‹ťöäěŮ‹ÝNĎ#gyĽK_ٱž7Íż8ôĄp9:2Ś™F:ťŽ‰?/Ű»w/6n܋ŝNwßJŮśś¶„äV+Â,€\.wąť Ůwřđa\nľ†ÁŢ!„*V H,cPsŃ )++CĎ­a ŢAřăů ÇŰ+AÝÉ]î>é8:‘9ŢăçľTý@ϱâGŚ?w rM8.fĎN?Ď'ů›»<u_vŕěĹnŹ+vif}pü"Z»<[č‹ &|~ţ: I&ńűţ±ţ†ÉXďKHH€N§CII‰PëŽX,ĆĆŤ™Śť!q^‰ {ńEĚY˛Äĺv&d\[[ÚZšŘNk!"âA4sě rÇŰýŚ®€ťjËҤř[Ő??Đcěܦ·µÁL:w©™côß­˝[ŐËd¬˙8püҤZh__ÁŕQ@’H$(//‡ÉdBmm-, Ěf3Ěf3 P( R© ÷aLČÍ"™K¤8{±˝}.U«G ¸f˛ŕő——ŽYŃjŻÂ­űĘŔ5!»kź ÄŘşNéő×qî’ëëhi7ß·ÂWtdžYš·ňźduě xâ‰'¦ôń Xá9Ž„„lܸ‘đCLČůeK¤^;­>oĺ"ś»ÔűUţř«\áúł»ńĘĎë°0QŚťŰĆ®|]´@‚śĺń¨=ŐŞšä­\$ÜöŢűçQú;ý¸÷ź*olYŠW~^çô:zűńĘ›µăŢďÚuËŐłä›>ýĺZh’ ˇ×ëQ^^î´řÍ,&d‰|LŢĘEříź. öËŽ)OČ–ţX…ŞšT1˘öT¶®S ·oG –„ˇj˙÷ť¶_(FÝ—x,ď˙ŕő——bë:%Ţ{3 ą/UcíkAŢĘEX–&Eí—&Ôžę@ć)ŢزÔë1ÚşN‰Ú/;PqÄŞšŰs¸űűÇŇŇnFËu3^y)w2"/ęjV&Ý3›Í·ć±X,\[ů0¶3‹ Y""""""“űT˘%a¨=eÂ[ŇťnËYŹE‰b—űLäúłý3*ŽPqÔßTśÇ˘D ~°bJ·©°hsĎÁŞýßGéďôč5 ‹j-K‹ĹŮŞFé>=Z®›ń›ŠóX–&ĹÎm*Ľ±ĹąÝÁ˛q¤9Ëăťn_”(FÎňx·=tGo żÎEîňxÔ~ŮřÍ›‹đĆ–t”Z·żĎŢ?vë:w2"/`{ďłWĽNTYY™ÓĎ{÷îEnn.:C%"""""ňAolIÇ®}z´´›ť’¤µ¨Ýn?Ńë·®SzÔçuYZ,Şţçsą~Ń *~}˙ů÷ŢĘó¶ŃĎmĽç4zŰŠ#$/¸Ü§·oĐöü]łůíź.Ř»\ĐËŻśkľľţÉő~fi˘mż° â|K7 jnűË’_ެ¬śT2ÖŤFĂ…żf˛DDDDDD>čŤ-KńŢűçQqÔŇóÔŇŃjżěŔ+?ŻĂߪÖ9%X˙íW €ÜĺńÎŰź2áěĹüőO«‚ Y""""""Ž÷ŢĚÂżýŞÁĄ ŮÖU5-x,ďrź˛%_[Ú-hąnĆ–µ —JŰ]űőřÁŠ…Č}*Á#ż"•J!•Já'Ď×l6Łşşµµµş_yy9ßl©ŐjŹŰ Ř“¶JĄ’}c§Ń€.}í퉌„\.wşť Y"ňkaŹmŔ“‹Äx*9ŠÁ "ÎD3(H"CŘc°v™©‰ó)˛uťg/ö Ş¦ĹŁöłÉ˛´X´|–ŹŠŁ¶E˝ sÉCřÍ›YČ[ąČiŰ–v3¬VxÔbăÍ÷ĽűĂďáć­ˇzŚŚäXüÇ/ňŃ‘a@^b6›ńŻ˙úŻ0 †ÄÇÇOř> …­g¶X,f§Q'€đá‡HMMEII‰ÓíLČ‘_ ž—„ŘÄ(•R8GÍ QH‚ç%AžyÜ\d Ť×u¶‹‰ Ç[Ň]>mŃÉ˝t9Ţ|ßTô{Ť‡ăŮôD/ÓjµB2V,óů)VZZŠŇŇŇ Ý§¸¸óALČŃ«®®`«Ę,//ç‚QDc`B6Ŕ­_ż—Ż‹Łg{$‘1 DÄ9‚háÔŐ>|Őba08ŢŠÁ`€ŮlěÝ»—ÉX/ĐjµĐjµ“şoyyąĐCö‰'ž¨T*öîť!LČ8ą\Ž0)‚qđp IDATŻE2DÄ9‚h†)•J4 ő řf/AÄńFDP,–{_€%$pń<˘ń0!KDDDDDDDDÄ^} Ř÷b…¬wb\PP0©ű:.fŚÉ,FS Y""""""""z`999¨««Cmm-Ôj52ĹT*•Sâ{˛&›ÔĄ©Äů¦úúz”••á€F®ĹĹĹ‹ĹĐh40 |ó|€Á`€^Żúű’o`…,‘ŹęééŃh$ůřsíčč@aa!ĘĘʰiÓ&äććBˇPxt_Vmz‡FŁ^ŻwZÔ‹f˛Dä×Ď C.ţ%ĹĹĹ qŽ š!w,]6Çáć0¤¦,ÄúőëýöµćÄ—{úřŢÇŰL»Öه…qQ|ăý@aaˇÓϵµµ¨­­őčľLČŇl„,ů÷‡ŃŢ6ôô= qŽ šQÖáÜémC[/ęßťŃD÷?m]V"ľÁ4%Ú»9Ţ&Łŕ·ÇŃÚeƶ5™ŘôÜÄĂą3Ѭ¤ŐjˇŐj'u_ÇdąD"Z­FQQ:C%"""""r<ďŢIÁí7Dčé¤,ÎŁdhľĽ—Ü’Č” ¸ÖeĆOţý~ňď' ~:/­X‚ŐOĄ00>ćôéÓ ‚0›Í¨¬¬„ŐjĺYd^@nµ"lÁČĺr—Ű™ p‡ĆĺćkěB¨b‚ÄüŁODś#fJYYzn cđÖÂĎg@|xĽ?Ľ·żąĐŐ[˙;cJćł3Îű—($‚AńĐłé‰řâ‚IřYwň*t'ŻbˇLőÓ)xMťÁ–4+¨TŞ ·vĐétčččŔęŐ«‘ŕtݡC‡ő’8/‰D˝ř"ć,Yâr;˛®­­ m-Ml§µqŽ š9ö9Č÷Ç[HŇBB¶áď€Bd˙ăJ“Sń‰gŻśö/ňÜ[ůËńš:şSÍĐťĽŠęSWŘŞf÷}|ű>>‡gÓ±ůą%XýT2[PŔR©T^KŻ×ŁŁŁjµZ¸ŻBˇ@II Ŕ`0@©T2¸ÓŚ Yš·o\Ćí—a¸ ŔÖłŹf/‘x>D!…F x~*‚baçÎÄ9bšXGp§ű n˙W+Ç‚bl§É‰"˘mcnžścΓ¸‰e^đ8n·ŰĘß˙ŰőLĘŇDŐ˙hřÚ!»(›g%MBŚ8/­HĂK+Ňp­łşSW±ďăshí2>?źźżŽČp¨źNĆŹÔČL™ĎŔÍÁ€Ý»wĂ`0”J%ŠŠŠś‰gÎśÁęŐ«!‘H´ićř^X,d0!K“vÇŇ…‘«˙‰Ű7.3äś °Ü€őîeűţ!ĎGhĘ÷›ĘqŽ Îś#Ľŕö·­¸Ý®ç#×ůXHČ· źÁóSĽ@ĺÔ+•\…)VbŔÜëÍv¶¤ěő«łşżţ ş8îĐŞŔţ÷Ž̸(l[“‰›·ń‹C_!:2 Ń‘áhí2Ł÷Ö >8~ ż„…2 ›fz˝Ţiń(Ŕ–|-,,ÄŢ˝{‘›› ¨««¨***‚Z­f𼤨¨‹ …B¸Îń,Çëiú0!K?čĂpó á=‘GűŤĺ†Ź"(FŽää ç"ÎS䎥 ĂĆ㬂Ą ±źą#GŘŁ˙Dě˝8–đĚuÖ*• ĺĺĺ ě `B–.ś#s牌ąóGťzĹFGGăé§źf"–<2wî\¨T*¤ĄĄáäÉ“BĹěH›ÖáAVńŤwđżÁóÁ°ń3áďŢw"čęm­ ˛µb…J9×kš5Î^ŽëmUÓŽ‰XŔVŞxŽĂfXtd8Dúú‡póÖ_ľ†ěěl( tj4ńçë®ő€R©D]]ťĐŞŔÎ^±i6›a2™Ř¶ŕ>T* &ußřřxá˛ý1ŻŁiţLÁĐýŚt\pJ´HĄRdee!44”Áˇ feeáÜąshnn`ë9lüŚś#8Gxhčë?;%c“’’śVĘ%šČ{ć™gĐŘŘ(TËŢţć†#˘¸čĐ8D!{ôÜŽ_ŠáćBoYhřZ„†Żm•˛+TŔ˛GX5zú€†żőś{Ä űHôŰB•ě‡>ŁŞO5ăă— ;yŐéú$™ę§ý«Ź¬T*…T*…ݧöíźG,‹Ëmö…Ł“˛“H$0›Íččč`BÖOĹçľÉ&uÉsşôµ·#&2rąÜév&di\·żmuęMÇ>š ™™™‰‰Á™3gřŕ/ě± xr‘O%óh‡sqŽü9bô )))ČĚä)—4yˇˇˇÂĽmOĘŽ´ÔC4'f§XId{lÖ.“"5q^ŔÇ.x^‚UqűŰVŚ´ťĆťî+Âm=}Ŕ˙ţ«íźb°,•ÉY×Óg«†­ż`Eű ‘Űm¦3;ŰĆ›§Î5ßŔ˙Ô5Bwň*zo9ŻiđĚŇl^±/­Hc ĽHˇPŔ`0¸T»Ú+1;::\î3şj–(t8~ÔÔT”””8Ý΄,ŤÉ:2€á‹ź?GGG###ˇ)±páBôöö Up#-őš—4á°Á󒛥RĘ rŽ Î=GXú\ľa2–¦ŠJĄÂĐĐĐ2dřňqĎdB§Z‹B"ě×5Bw˛Ům5¬ú©Ľ¶&™)\Xv¦čőz(•Jčőzčt:čt:H$( ˇ]FŁFŁqŞŽeŇĐsfł›6mrŠź;&“ %%%P«ŐNŐĘä prąaR_‹śĐýnówŕî©VsćĚÁÂ… LňŠąsç"%%E8-y¸ů‚UÎD9G(•J4 ő řfď„î7ŇzZ¸üđĂcţ|h’wĚź?R©===¶1×úĄ­ęsŤ·©$ ‰@HüRˇďíî˸Ýu·żm]W]A+Ť ¬Ç‰„-“´×Ów/ŰÖi…±]4j1®1’±áQ¶–˛TVÂÎ0ÝÉfě×5âóó×]nK’IđVţ“P?•‚q85Ă ]®3›ÍĐëďsŚN$* 3x*))b¨P(źź…BŤF˝^Ź˘˘"X,čt:ttt@§Ó!>>ž yů&dÉý_ç˝ţ#ii<ÍĽëŃG˝·˘úÍvXGXuŔ9‚s„ŰÝ—ťâAäM©©©BBöNwŕ§ Y_dŻśl}ˇďX:qűż®áNo¬–.Ű÷ô _‹Đđµóő ć[í¤ŤfËŔVmÜsó^ňµű¦»Ę×±+aEâůŠ‘#řˇ…Çůmux Ú÷ń9|qÁ9‰·ůą%ŘĽb žMOd€|ڧíâăăˇR©››+ô”Ąńét:!ą]PPŕ6ÉŞT*ˇR©źźŹŇŇRÔŐŐáСCČĎĎgś}˛äÂ:2ëÍ{çN9®ŚHä ˇˇˇŽŽ¶ő¬í´dV!pŽ âasÇŇĺT‘Íť‚Ľ*>>ˇˇˇ†uŕ&îXş$ć TSM…ŕ¨{ Ú‘Ü1wáη­¸ým«ÓßÚŃě•´gGuAn…|ľŇ(+bcl‰G…üŢíţś´ulí`lłýßÝkEOźm7¬řnĐ]˘uü6˘čž—„ yI’ČXŕ’dl[“‰ÍĎ-a5¬ŹÚąs'Ôj5á%öd¬JĄşoĹ«D"Aii)ÔjµPĄĚÖľ Yr=đ3w —ŁŁŁŮŁŽ¦íŕĎžląÝu™ YÎDś#îşÝe.łUMçkmmµÍű˝mLČNQH„íôřyI°˙e˝ým+¬·nŕN˙·¶żż·nŔz÷ wľÝM\ŢżŞcrV9Ş5·cw,sÂl}nÇŇÖ|7t˙DZ'Wí ­·µß7jŁţ+¶á@ä|Idš;A1rîÓ~ćŮôDl[“ őÓ)łňő××ףˇˇ2îţóULĆz—˝ďęŐ«=Ú^"‘‹©ŤF&d}˛äâη­NƉ¦Cllě˝}đ»^„s绬#Âĺî 4-ćÎť{oúŽ™!Áó’7 Ţţ¶Ö>XżëĹK¬7ݶ<ŹSĹi{`ÄK˝˘Đ‰eŠŠ‚ĂąXl€x+ů¬~ý===0m_ĐâýÄONź>ÍťÝC9KQ©T˘®®ŽAó1LČ‘_Üů®Ö[7„Dúm‡/RÇkŕëDŃ \^ż(lD‘ó4'fÖő{ťmăíăŃÚež˛ÇK’IđŇ ®ŕM&“I¨ćô”ăÂ_žöˇť­Ěfó„Ţ ň=LČ’Űrvaaa M ÇŞ/ë-Ď+<îô¶ˇ§ča9GçÎDSĆ©BvxŔłżĂ¸Óۆ¶^ "4Aśöľ´ŽUµc5rLÔ:žýŘ’}÷}ßoŽ[•+Ď‚ÇďńiŻfuäđÜYÝĘńfwŕř%—E˝Ä3KőÁ€źüä'“J —% >ţřc.B5Šý ĹC‡yÔ~Ŕd2 Ő±999  aB–\^Ne%M§>¤ăôF#ÎÄ9‚sŃ f⓼`÷îÝSR‘i6›QVV†ŇŇRŐAnn.Ş««ˇ×ë±k×.Ť™´ÖëőĐh40›ÍP(¬8žfäV+Â,€\îÚśť Ůwřđa\nľ†ÁŢ!„*V°y=qŽ šAeeečą5ŚÁ[#<ź!âx#"ĽűĂďáć­±W‰;pü|v đżČs»źţű 4^íf@˝Č`0T* <şŹ˝2¶ĽĽśĽŹÜÜ\¨T*čőzčt:ÔÖÖâ÷ż˙˝S˛U«ŐÂb±ď…X,ĆÎť;Ľiŕ%‘˛_Äś%K\ngB6Ŕµµµˇ­Ą €ç§›ç"ňű‚DÄńFDžËL™?îí_\¸.\~6=qĚí˘#Ůnkş@ĄRMč>Ý~¶Ú»w/ a4a6›]*_G÷â-++cu¬bB–¦„X,†X,öx{&b'F"‘ ˛˛:ťZ­vĚćää@­VłŻŹbB–ŘéÓ§'|¶*µZ µZÍ8ú)&d‰hJ™Ífa*łŮ,\/‘H„ ΄„Šf%&d‰hĘhµÚ1O§€ÚÚZhµZäçç{Ľř&“ “şŻBˇ`ű„,ů­čČđ mź‘Ë yŃ®]» Ó鄟Ĺb±Ó˘R‹fłZ­Řąs'çęęęqÝă)//gż^„,ů-ÇëąćČL™ďv»ľţ!@Ś8ÜŻ^_vv6 :5ÄřřsŐëőB2VˇP ¸¸ŘmPŻ×٬¬ FŁ:ť999ČÍÍĺÎLł˛DDDDDDDä·Ę]}ęŞŰ„ěµÎ>śkî0ńŠÚ™&•J!•JáĎUŁŃT*Ő¸‹L©T*TVV˘°°z˝řĂőŔęŐ«Ç­r5›Í0Ť0™L¨­­…ĹbÁĆŤńꫯ˛]Á4Đ Ż˝1‘‘ËĺN·3!KD~-ě± xr‘O%G1DÄ9‚hId{lÖ.“"5qBÄń6mĆE!I&Ak—ű?nDFr,ÔO§·_ëěÆ_}"üüĚR.$ĺ-$! """"""˘@Ăděô[˝z5[°¤¤&“Éív&“ %%%Bg~~>ç‰jµeee †Źa…,=0µZŤÚÚZÔŐŐˇ¶¶µµµP*•‹ĹP©T0Ť0›ÍÂ`€-‰ËęMšm%"""""""źu®ůúú‡¦ěń˘ć†ůUől}}=0 ăî?_VZZО˛2TWW 8%aíVŻ^ŤŇŇRîä4ë0!KDDDDDDD>ë§˙~_\0MŮă=ł4źţr­ßĽţžžŤF@’<_‰D‚ŇŇRäç磺şFŁ‹m‘E…B•J…Ő«W Í6LČ‘_ł4+1!KDDDDDDDD¬ĽĽÜŁíôz=t:::: T*ńꫯ2x4«0!KDDDDDDDDLĄRyĽ]~~>6nÜĘĘJ Ź“ąD€ËźŇ¬°}űvlßľŤŤŤ~ń|;;;ť~>vě¶oߎŚyꦦ&l߾ǎăNÄ9‚sMË~X^^Žm۶áůçźÇżüËż`űöí¨©©™’1éxPfßÇ›ššĽţş,‹Ë› ö1¨Ńh&ôÚ‰•D"Z­`«5 ÍLČRŔ;věŃÔÔä‰Ý»w٬¬Ěĺ ·±±3atëÖ-466zĺ ’sç˘ŃăfëÖ­¨ŞŞ‚ŐjĹĘ•+‘śśŚ¦¦&”••a۶m°X,“~|ű˝ŹßşuË«Ż«ľľŻĽňŠWƉ} ;v GŹőřµQŕaB–^CCd2˛˛˛PSSó@Óő|ÇŁŃh|ţ5qŽŕA«ľľŤ)))ŘłgöďߏââbĽű¨¨ŔĘ•+ŃÔÔtßJP_ÔÜÜ<-㧲˛’_ŽBU,÷˘Ů† Y hťťťhhh@FF˛˛˛Ŕă ¸‰Môŕm˛{éééčěěÄÁůćqŽŕA3â7żů `ÇŽČČČpşM,ُ¸2™ ă&§:ńéÍ19•ż'%%‹Ĺ/ÖDD43˛łłQTT„M2쵕••aďŢ˝(..ćMł ő˘€fO¬dee!;;‘‘‘ř裏°víZ·Ű755A«Ő §üĆĹš  ďĽóŇÓÓńî»ď:diµZ§äMFF °xńbáşĆĆFlßľożý6Nž<é´}^^ ť¶€óçĎăůçźÇ¦M›°yófaűÍ›7ŁĽĽUUUČĘĘr9&"Îś#Č›ęëëa±X°rĺJÄĹĹŤąťý *22ŇĺţZ­VHÔŠĹbäĺĺá?řÄbń¤žŹ»ÇŰ´i“˶ŤŤŤĐjµB;€ŃŰţô§?Ĺůóç@kź|ň‰0ž?úč#TUU IVűřĎÎÎv;ž+++ŃÔÔ±XŚ}űö Űdee!.. 8zôčó ‘ťT*…T*ED€ľľÜÜ\ľÉptčkoGLd$ärąÓíLČR@«©©L&–ě§$766ş$*šššđłźý V«‹‹CMM Ţyç—ǵX,řŮĎ~†¦¦&¬\ą«V­BSS<źýěgřőŻí”plE2™ Âs«ŞŞBdd$6oŢŚ¸¸8lÚ´ „L&ĂŞU«Ü&SŠŠŠ°mŰ6h4ěŰ·oR°$ě± xr‘O%Gq‡'Îś#8GxYss3Ü7ŮďîöŕŕÁHIIAQQ"##QSS ±±{öě™Đs9vě4 d2™Ëăuvv˘¨¨HضľľďĽó"##…/EŞŞŞpŕŔX,bŐŞUl_xŚN8ŰÇsVVV®\‰ÎÎNTUUáťwŢAQQ‘p_»ĘĘJÍf¤§§°%o«…‹ŠŠ°uëVTVV";;{Üä¶? ’Čöج]&Ejâ<"Ž7"˘Y«ŔAřđC¤¦¦˘¤¤Äyg(PŮŻq¬\ÉËËTUUąlđŕAX,ěŮłk×®Evv6věŘ•+Wşl[UU…¦¦& ¸¸X»v-***`µZˇŐj]î3wî\ěßżk×®ĹÚµk…OÇJ;{Ą›ý˛»ÚĹ‹cÓ¦M<-ů®ŕyIMLaż!âÁ9‚sÄ4°'d'š@´ďŹ2™ {öěÁŞU«„1”••%,vĺ){şL&ĂţýűťoÓ¦MÂb}vZ­‘‘‘¨¨¨ŔÚµk‘‘‘;v ==]¨zuü’cŐŞUÂx;zô(ššš°iÓ&ěرŮŮŮX»v-öďß™L­VëŇšŕľ˝Úr IDAT›oľÁţýűńî»ď:UÎۉĹb\ëQH‚ç%AžüKqĽŃ=LČRŔ˛ŘŮ,öDEJJŠŰľv HOOw©Zs<ŘîäÉ“ŕršˇX,FFF†Ű•ĚGWĎĹbČd˛I˝¶Í›7#%%UUUc®¨NDś#8GĐT›lďŐúúzaß]µmoËqżë566 IÔŃŹ7ştSS:;;Ýn[\\|ßJrűórś+ěc4//‹ĹĺągddÜ·:=;;[HF=z”;Ń,–nýúő¸|ý[=Ű ‰lÖĽn‹Ĺ‚ššĹbÔÔÔ¸@ŮÔě‰{Âbt˘p_dď gď1çČždéěěĽoŃś˘8ú´d"Îś#8Gřľ˘˘"śşÚ‡ŻZ,~ůü/^,ôYť[·nŤąOŰŻ›H˛×^©{ňäÉ1źŹ}¬Ů÷č~¶öß}żqÖŐŐ…ČČH· Vűś0ú –””Ź÷‡@l]ŔńFDDD4>&dś\.Ç@Á×"gŐë¶WŞX,8pŔí6555B˛ĹÝAÚýĚť;Wč çȱ_ś·Šíý$<(TqŽŕÁ9Âw)•J4 ő řfŻ_>{˘Ń]źeGŤŤŤn{±Nµääd·Źźžž>eż×b±x­ł˝uÁ;-F3á>şŘ㍲>úč#Ŕ‡~čö j÷îÝhhh@}}=˛łł… űĘËŁÄF‹ŚŚDżŰS•§ÓćÍ›ŃĐĐ ,üCDś#8G7Ů“°Ž_X¸sěŘ1aŃ<Çv“myŕnڶńčÖ ŃŮى††deeŤ™ŔMIIąoîŚ/{낆†¶. """š%ŘC–NSS“°ňX-öExěIŔVMsţüy—ÓÝ-î“‘‘‹Ĺâ¶7ăÖ­[ńüóĎOŮAçýŘW‘ćâ=Dś#8G·ĹĹĹaĺĘ•čěěDyyąŰmęëë]’±ö mÇ1egŻRźH·=1lď×ěčرcxţůç…ç—‘‘ČČH—ö$öń[^^>îx´!ănŃ1űc:.8Ůq‰ĘĘJîdDDDDł˛pěGăŘegg#22Ňia{ĄĎ¶mŰPSSĆĆFh4·IŚM›6Ţyç§Sź5 :;;‘——7éÓSRRpőęU·‹ Ťu h>DÄ9‚sy[aaˇ°hÜöíŰŃĐĐ€ĆĆF466˘ĽĽďĽó"##±cǧý0==]ئłł‹UUUř裏鲰Ýýökű‚XöqŘZ%hµZDFF:-•——‡ÎÎN§mkjjPUUĺv±ľóçĎ _¨äĺĺ!22Z­V?ěŹŐŘŘ8nu­§ě­ ¦ë‹""ň/őőő(++Ă\®•(0°eśššŹěV­Z…ŞŞ*TUUˇ°°xűí·Q^^ޞ˛2¶S÷ěŮíŰ·;%O/^Ś·ß~Ť»wďvzÜĽĽîíU»ź|ň âââ°gĎěŢ˝eeeÂř·Źç©úÂñu‘ŁžžŤF@ĂAĄ€b±XđöŰo{ÔËmÓ¦MČĘĘrÚ6;;ŮŮŮBőL\\śpyôŠÉömíUA)))XĽx±K•LJJ öěŮă¶zĆ]RfóćÍČĘĘ­[·„ç¶jŐ*dddŚ»jóŽ;Ź·íOúSáŕÎń`É~ŕ6ÖTFFƸżsĽç4ÖA«»Äű%Q<Ů& ?Śö¶ˇ§čáîOś#8GpŽvöýĘÓqeßÇÚ·ÇŤ§űo\\śÇí<ŮvĽ1ů ăŮ“ůg"s”/˛ŕNoÚzPvF#âx#"˘±0!Käp€XUU…"==‘‘‘¸ző*´Z-d2Ů„!"ÎDDDDDD4;E[­[°rąÜĺv&dÜáÇqąů{‡ŞX ±ŚACaa!,‹Đ7Î.==ĹĹĹ“^€‡sç"»˛˛2ôÜĆŕ­„?žĎ€qĽQŠđ’HŮ‹/bÎ’%.·3!ŕÚÚÚĐÖŇŔvZ ŤŻ¸¸………Bď¸ŮzŠ/qŽ Îäö9ăŤf/&d‰Fń÷ţmDÄ9‚|»M&d‰¦ ˛DDDDDDDDDDÓ„ Y"""""""""˘iÂE˝|Tvv6 :5Ä0D Y""""""""%•J!•JÁPůŤ]úÚŰ ą\ît;˛Dä×ÂŰ€'‰ńTrADś#fPD†°Ç6`í2)Rç1 DoDDłV'€đá‡HMMEII‰ÓíLČ‘_ ž—„ŘÄ(•R8GÍ QH‚ç%AžyÜ\„ăŤĆŔE˝¦ +dÜúőëqůú·8z¶ABD1Gô÷÷ŁŁŁ&“ ÝÝÝ|#}H||<ž~úib EEE8uµ_µX "Ž7"""šĄ prąaR_‹d0Čďçţţ~\Ľx­­­|ó|TGG0ĄR‰¦ˇßěe08Ţh–bB–üµk×pćĚ‚ü˛DDäóŃÔÔät]jĘĂP.N€â‘Ȥ;'śšA…%ZČLČ‘O»xń˘S2vND¶nČŲĄ‹"""" xőőőhhhŔ€Ś»˙Čż1!KDD>ëÚµk¸té’đsjĘĂx%˙9Hç‰""""šzzz`4I Q@`B–üÚŕ™C8aÁĹż„˘¸¸ ĂĂĂhll~^˙J~´†!ÎD>ꎥ ĂĆă8܆Ԕ…Xż~=BÄńFDDn0!KDţýa´· =˝@Cp®\ą‚‘‘áç­ů˙Ä ç"fŔťŢ6´őˇA ÇŤ Y""ňIW®\.żřlȤ ůĽr«a @.—»Ü΄l€;|ř0.7_Ă`ďB+$–1(DäósÄŤ7„ę؇bÄXńĚRľYĘĘĘĐsk·Fţx>BÄńFDDD(ŔK"d/ľ9K–¸Ü΄l€kkkC[‹murëđBD~1Gtww —Ąq/ ö9ăŤf/6›!םBrŻB®ŻŻŹˇi1<<,\…„3 ł|Žčíí.+'0čÄ9‚縚! ŃD!Â塡!„¦…c‘óY>G8&ß䉱 :qŽ šfýýý÷ćÄ0 DDDDS Y""ňisć„14»?¬ńĚšŽ YQDBDDD4•źńrŮ)ć% —M&BÓ±g(+q8GqŽ¸Ç±*Ý©RČ‹ś˛as"""˘©<®fČe§U‰ăxę0‘·8&ö‚e© ç"ÎÂëU—ÓDÓ5ć‚bä ŃTW34š($˘čn?yĂđđ°Ói¸<đăAÄ9ÂáĂšX&,dÖßߏ›7or§ ŻęččŔČČí‡đ(‰e Ń ĘÎÎFQQ6Čd8ă3>C@î„Ä)…Ë—.]b@Č«ľţúká˛(zÓéąÄ9‚s{Ż*řâĹ‹Ü)Č«®\ą"\ž˙BD4äR)”J%f8ü€VWÚŰŃÖÖćzLÍ‘;Á˙FšOŔ:2ţţ~\»v .d`hĘő÷÷ŁąąYř94ĺ{şŘcđä"1žJć‚#ś#sDŕÎ!IOŕö7ŘŞoܸůóçsˇ)wăĆ §ÖˇIË=ľoD†°Ç6`í2)Rç1D^ÄńFDäŰ:€?Djj*JJJśçq†Ü…D xJřą±±‘}"É+ďíwŃ ě°`”'‚ç%!61JĄ’ÁäAś#vŽËüđRŽ9ňŞááa§1üđR"<˙2CŕyI'?ąśí‡ĽţYĚĎÇ[j꽳?®ôÜńąçwý^·$ÎiD4őźďKHŇ@¸íCřČČ>˙üsüŃ”:wî:::„ź'ZůFś#sÄlâřúűúúśgDSAŻ× ýšE!áU<Ç ‘×8&9“źľŕżľî¶Ň~衇0wî\ľaD4Ą pëׯÇúW^CŘcśVF÷„($aŹľŔ?ňŠk×®9ť†˛({•o4ósDxć:ÎÄ9bŠŠŠđŹk°Ç6Lřľ˘(„¦Ýű»ÜÚÚĘ1GSFŻ×;}’şÂďű5?Čx#"Ť.b¸Ťď†­>óÜ*ĎŽ—YKDŢŔ„l€“Ëĺ'?‚ŕyI“úPO‰ „9Â!AÔ××Ç9‚8GxQb%‚b~¶ŇßßĎŕЄܼyźţąS2–_’Ńt™;w.~ôŁ ?_é±b×ń|zů®ôX§59{ĄÇŠş«wđnÝ0ę®Ţëg»zőjVČ‘wŽŁňčŕďŃ0‡aăqá ůĉŤŤ…JĄbO×đđ0šššpńâE§ë^ŠPĹs~J$ŮD#sçqŽ ÎÓ$ăĎgÎś9Řşu+–-[ć31ŞŻŻGCĂ˙eďţŁ˘ĽďĽ˙ż0*?TftMÂĘŻl1‘Üişë´)»­MµŃî‰f›bŹ5ßfµâŹď÷îŢ{zÔó='Ůű|!-§˝SĎ©¸Ů$­9ŤZmŚą7óc¸Ó­ &]tHt`FŁóýg2Ă 0 030ĎÇ9ášëóą®ë}]3 /?óąŞŐ/éž[_&ůßĎ”ŁqŰülMűĘF]˙ä=Ϩi`¤Ĺůóç}>f gHÓŚ%_c>7^#†ĺrąGA'©Ă‡ó®7r·/ŐmsÉY\®®/ţíîîşM›źĄ™9ßP\B2Ĺ1fłY?ýéOU[[+›Í¦††µ¶¶ŞŻŻ/,ŰĎÎΖŮlÖüůó•źź/“ÉUő±Űí:{ö¬$‰wHŔyO 0ę?–’5óKɵäkşvößärŘ<Łá€!Ż›Ů)!Ëüěqí×ůŻę˝†éúĎ·fhçÎťš×đsŻq ÉJ¸ďűşŃů}nűłn^n䂦ÍĎŇtó—Çő??n^i×őłoë·źĚTö’t­_żžBdŞ>ßňóó•źźĎT¦<YŚéŔř{ľ'Işqůśnt´čfOűŔW/ŔÄňµ1;Eş-^ÓŤš6w‘nKÉš°9 o:l˛;$;eç5ĽFÄřkÄmsé¶ą‹äúĽ_76Ýh?§›}Iň=‹|ÎŇ$IÓŤş-5[ÓŚć yÎą®\{6‡”0{‰çLn˛ź?çgŹű¨&±űŃú¤I,ńÁ˙‡"DPÜô~/” ÉěrifZšĚfłßă˛SÜoű[ťű¤ENÇ5ÍČYˇiłS) €}ŤKţrŔĺéw(cŃíZłŞPţ~•ŚĆ9žÇ zJ§Ţ;-W÷ź'tߪŢ=­˘ď<ĄÝ˙¸Y{ţqsX·Ťđ)--•ýęu9Ż~®ř˙ö÷ŕů¦ ’ž‹SęşuJĽë.żÇ d§8›Í&[óŔMt\×ű)^#$|í>że56¨ęÝÓŞxů¸jŢ{™‹Â}C<ß@ě"ÄśŞ7^ô[ćpô¨đ;O©öĂUĽ|LĹŹ­rÝHî'`rcöo$ŤssĎV˝{ZqÉ_VŮ/^Ń⻿«Â‡žRáCOiń=ßŐŢgĺ×ŢáčŃĆíŐÜEEžuç.* ¸n°Ű>rĽJEßyĘłýĹwWEßy*`Řşý'űü¶˝ý'ű8Ů!LYCn\¨óąkůô Ëm>o¶Žî‚˘OúŚâ>1y^#\ýÝ©AŐ­Ń«ů÷䎸îöźěÓ†ďŻŇá—˙?IŇžgĄ=ĎţJĂl•<ý}ĎzE«ţ/Ő~Ř m?ú{­YU(I*űĹ+ÚóěŻä’<ŁrGcăÓ{µliŽżňśŚ†9*űĹ+:ú‡S*ůI©*ţ×ĎzkżżKGŽWéáďxö©âĺc*űĹ+rtőčŔ/wOH_ĽFL­:Ń'}÷{dR<ßâ“5ýöĄĂ®ďęďÖŤ‹Ł8ú¤Ďp÷ĹĎ7@ď­(A …-ëtc”o¬Żň޸żY§OúŚTź<Ż.—Kqqqv¬§ pt]Ń‘ăUŞxůŘ@ úŁ‘ďĆ˝ěîźđóČ«ĄĘXşZ{˙yżOřYűav˙ăfźŕµđűTřĐSÚűěŻTňŁż—Ń8gTűoHž­#Ż<çiWřŔ}2š uôŤSžuŞŢ=í cŹĽZęłm÷ľíţÉ•‘~Ǹ×wđuÁkÄÔŞ}ŇçpżG&Ăó-Î6bŕułĎ1ŞŃ'}†»Ďh~ľ‚xoE ¦¶ůóçëÜąs+[ź9bű§ţ8şíŃ'}FŞĎyóćń„ŹŃ×P>ôTŔĺé‹n÷ :‡Sň´h[üŘjí}öWŞz÷´ ¸Ď3íšďř­»fUN˝wZUďťöŚś ÖšďúícţÝą>Ał{ŰŢŁu˝÷óŕ+ÇuäUoĽF|!--M­­­ĽćÓç”ď3)))âĎ·yó橣ŁĂoyVj˘vŤp< źk_MđۢOúŚdźfł™7µSśĹbQNNŽÚöí“‘rSě·nÝ:ĺääČn·‡Ô~ŐŞUăľOôIźăÝgRR’ňóóyÂOˇ×«ŐđŹčń˛Űk´ŞŃ0[ůwçüÄTn‹î°ěvI’Ł«çÖżW$I÷~í±!ű©ýčě¨Ů`ăÚŹÎJ’ŠľóÔë¸÷/žď±ň±sçNŐÖÖň{™>§tź&“)*žs»víRmm­z{{}–›L¦ŰÎź?T5˘OúŚTźŃň|ĂÄ2™L2™LJ Ŕ¤Ń/©]Rwk«Śłfůýçě—””$‹ĹrűŐ«WŹű>Ń'}FsźĽFDÇymhhĐ@6”y[ábŰ=Ěö żvß„žămĂL‰0QŰĺş•×~/Ó'}†7ŔX±bEČmÇűxč“>ŁąOŔřj“ô˛$˝öš˛łłµk×.źÇ dA đµąĺ‚¤/FĘ ł%IĹß_ĺ7W«Ă1Đ~´óÇ+˙îťş5‚{ŢXďm;şz&dţXŔđ¦QFď…_ľęółĂŃŁŻWú˘Ű=S¸§"(űĹ«~í7>˝Ws©ęÝÓ˛îvď?˙Ęﱲ_ľŞĹwW/ăD@1B€T˝{Z´WŰ~ô¨şşŻ¨ä'űäpôčŔ/v{ÖYóťB•ÝýŞ^ř嫊‹‹ÓšU7÷Şxů¸ŽŻRÁ×îó˝:^Ö¬*TÁ×îSŐ»§µöű»TňôßË<[G˙pJ{źý•–ÝťŁâÇř¸#„,!xţźwhĎłżňŚ25fëŔ/wűÜ ËhśŁŞ?Ľ¨5ßߥ˛_Ľ˘˛_ĽâylĂ÷W©ěźwNč>yĺ9•ü¤T_€Ý ľvźŽĽň'"€@3\Ýu›Ş7^ ¸<˙î\9lUž)ňďÎ 8¬Ń8GUoĽ(‡ŁGµťrÝÂîóŰżÁŰ´Îpűi4ÎQĹ˙ÚŁ˛ŢéŮvƢۙ;aŐŰŰ«?ţńŹĘÎÎVnn.@Ě#` ‚ťrŔhś3aÓDó¶Űz{{UZZŞÖÖVI҆ d±X( b,0J6›Możý¶ěv{Č}$%%éÁd¤`ĘĆJŇÁ%‰P€Q°Z­Ş®®Vż¤{n}ÜdQ:věÎś93ć~._ľ¬źţô§0ĺ cÝ&c(ŰÝÝ­äädN, "ěv»Îž~jĺ¦Y „?2ÇC ?RDżáćq8Ś˝ăţ9ęhěSçç’&W({ńâEuwwëý÷ßW[[›Š‹‹9ÉY` âçŽîiÔôV'…LIÂŘ´Ż&knf˘Ś™ úädç¤ e+**Ô××§+VH’úűűU]]-I„˛Y` âçN×ě…3) ć ĆJŇm3§iÉ·ćNŠP¶˘˘BŐŐŐZąrĄgY^^ž$éÍ7ß”D( €ˇ%H2»\š™–&łŮě÷8,Ćd¤0Öm2„˛Ţa¬;„u#”@0Hz".N©ëÖ)ń®»üźF‰Ş`ĂX7w(›ŕ5ĺĎÁeµZ#~,Ă…±nyyyZąrĄjjjd·Űą0jŚ@HFĆşEăHŮŢŢ^}ňÉ'ƱnyyyĘĚĚÔőë׹0j˛µPĂX·h e»»»őř㏽~BB‚ş»»%I .ä‚@вŁ2Ö0Ö-Z¦/¸xń˘'\­îîn]ĽxQ \ ,‚6^a¬[$CŮăÇŹ«ąą9ä0Ö­µµUűöíSEEFD € Śwë‰P¶˘˘BÇŽÓÇ<ćľ’““µrĺJUWWĘ`DĚ! €MTëÎ9e+**T]]­•+W*++k\útßěÍ7ß”$sŃĆ…ĹbQNNŽÚöí“‘rS,†5Ńa¬[8BYď0Ö˘ŽďP611Qëׯç⌙Éd’ÉdRĄ&Ť~Ií’ş[[eś5KfłŮçqY )P;mFś:űÔŮŘçY–0o†î¸Ψűżrńšě˙Ů«×nz-uů¬3^ˇ¬Őjť°0ÖÍÝďâĹ‹ąxbT›¤—%éµ×”ťť­]»vůdŔśýä‚çűÜÜ\ 2ąo&ČxŽŠžčëg¨›lőőőÉfłŤ©ołŮě77Č4JBőö»u:{ţ‹@6??ź˘Ŕ0d@HěťWtěäź=?/_ľ|ČQ”ŔTeµZURRB!4¦,!©řMĄúúŻIřŘúęŐ«) &Ąňňreee)//Źb˘ŽŐjUuuµú%Ýsë ŔäF FĹö™]ŻVŞőB‡gŮşuë‹I«±±Q)))•ěv»Îž=+IZD9€)@íř˙ţźi $éÁ”Ĺbˇ8Y@Tký¬CqŠŁŇŰçTë…54~*{ÇŮ;{|_µjSŔ(ȢڡŁVŠ…U\\¬üü|Š^$™].ÍLK“Ůlö{ś@-11Q+V¬ĐŠŃÓ IDAT+”””DAó,‹ÖŻ_O!ŕ±@ŇqqJ]·N‰wÝĺ÷8, ę¬^˝ZÇŽŁQ"))IfłY999ĘÍÍĄ r¶lŮ"§ÓI!˛€¨“››Kđ l˛˛˛dłŮ(Âb%€đ BdłŮtňäI € Č ¦566Ş»»;¤¶6›M'Nś ,bZyyąęęę(‚›zed·Űe±X|–[­Vuttř­ż|ůr™L& 0Y,ĺää¨mß>)0%Čp‹Ýn×Űoż=¦»­›L&-_ľ\ąąą!÷±oß>-Z´HfłŮgyeeĄţň—żř­o4•žž.IjkkÓŰoż­ď˙ű~í0ůL&™L&%P `Ňč—Ô.©»µUĆYłüţ6#ŕ–cÇŽ©şşzĚýś={VĎ<óĚú¸óÎ;Ő××çłě‘Gr}÷ş]]]úä“OÔŰŰË €h“ô˛$˝öš˛łłµk×.źÇ d¸ĺňĺËăŇŹÝn§@Ś0›ÍJNN¦,Üq˙ĹĎݯɦ·:)0 effĘ`0„ÔÖl6űÍ÷ ‡@€âçN×ě…3)¶nÝ:¦ąŁ€Ń  ĘlٲEN§“BŔ4Ť]˛˛˛Bž“2%%E›6mň»‹' :Č0…$$$hÉ’%JJJ˘@X­V•””PŤ) ÓĘËË•••ĄĽĽ<Š:V«UŐŐŐę—tĎ­/“#d2ŤŤŤęîî¦@źs]]]•ěv»Îž=«żHrP`J  Ę”——«®®.¤¶ýýýjjjRoo/…€(D ŔréŇ%íßż_6›Ťb@bY' ’Ě.—f¦ĄÉl6ű=N „Čb±hýúő $=§Ôuë”x×]~ŹČ ¦mٲEN§“B ,C€(“™™)Á@!€0ÉĘĘRrr2…@X0B€(łuëÖoĘŻŚŚ %%%QHBŚ` IMMŐćÍ›N`üŮl6ťSOOŹçgţB?˛D™­[·*///¤¶ńńńĘČČPRR…ŔÝăő}}}˝š››Łbżš››U__p?'“’’].IROOŹŞ««ŁbżśN§Nť:ĺůůË·öă@€)$55U›7o8q<€ńgłŮtňäI ˛ŇĺĆ˝őÖ[˛Z­Ý'«ŐŞ·ŢzËóó—].ꑦ“I‚¤Ő^Óś={VÇŽÓ•+W"¶OÍÍÍzőŐW=Łc“].}Ýk1vÜÔ 1­±±QN§3¤i l6›Ş««µfÍ ˛ľ§ł.—şo…ruuuŞ««Óś9s4{öě°íÇ•+W|>B/MŤ°Đz˙ůÖq\¸pAŻĽňŠfÎś)“ÉÖ}ąpá‚߲Őqq>7ĂŘČ ¦•——kůňĺ˛X,€$mŠ‹Ó1—KçĽÂĎžžż€4ś˛]®)~3.Ns%˝#É=‹ěµkפá’|«ľé<Ć,Q¦ĽĽ\YYY!Ď# €©Ăb±(''GműöÉÁýHôH\śÎJúŔĺ’-‚ŁRÍ.—ţ:.N9Sěcô÷KĘ‘ô–ËĄ6É3"9ÜR%-ş5ň‘±@€(ÓŘب”””Ú¶··ëő×_×cŹ=Ć<˛S€Éd’ÉdŠš`,Gňˇ]’aܶQ’A’¦đ|¦ ß’Ô/©-ĚŰ÷چeÎŘ1é—Ô.©»µUĆYłüţ6#` q:ťjjjRoo/ĹŔ„2ÜúÂÄH.`’j“ô˛$˝öš˛łłµk×.źÇ d€™Íćn@,rʍ%dD¬#@LËĚĚ”ÁÚřłŮĚÍŔ‡’Žßúţî[7‹b,bÚÖ­[ełŮ(Ä;Ś•¤Źââ$BYÄ0Y˘Ě–-[ät:)||(©{ëß­‘çxťč>Ű$5X磸8µ»\ĘŤ‹‹Šý¤OúĎ>;]®aoŚF @”ÉĘĘ y´^JJŠ6mÚäwOL~g\.ŮF1ŞÔ¬‘¤pö™0wşçMWçů~IR[\ś.ş\2ÇĹEŐ~Ň'}ŽąĎú'` IHHĐťwŢ©¤¤$Š„ŐjUuuµ^|ńEŠPé/ľ¨„瞓Ν şÍ‚;”ž›;üűÇ0ő™0wş–|k®n›9M’<ˇl\\śjľô%}}۶¨ŘOú¤Ďńî3Đ`YÄ´ňňreee)//Źb˘šĹbQîÁ·ůóçG¤ĎÔÔTťó §‡±i_oče?ţřcUTT¨¸¸xŇ;}ҧ7“ɤüü|żĺ˛D™ĆĆF9ťN%''S LĎą”” z‹%ęű´Z­z˙ý÷=?c݇˛ŐŐŐ’4d(;Žť>é3XÓx9 ş”——«®®.¤¶ýýýjjjRoo/…@XY­VÖĐĐ0nlnn®VŻ^MÁ1eČ!2›Í!Ý ±‹@1-33S!¤¶fł™›€°˛X,jkkŁ“Ř4J€X¶uëVn˘& ‹Ĺ2¦÷./˝ô’¬V+…Ś FČe¶lŮ"§ÓI!Ś»––Ůív AŚ Ędee…<'eJJŠ6mÚÄ]S JČ0…$$$hÉ’%JJJ˘@X­V•””PŤ@1­ĽĽ\őőőL Ď=÷ś~ó›ßPIŚ@€(ÓŘبîîn „ń9×ŐŐE!˛D™ňňrŐŐŐ…Ô¶żż_MMMęííĄü<ńIJX,"‚dB.]ş¤ýű÷ËfłQ ~ŇÓÓe2™(DČ@L§@h,‹ÖŻ_O!4YÄ´-[¶ČétR0)ěÚµKgĎž ą}KK‹’’’¶ ‚˛€(“™™)Á@!€0ÉĘĘRrr2…1ᥗ^’ŐjĄÄY˘ĚÖ­[Cľ)W||Ľ222”””D! 1B€)$55U›7o–Ůl¦@Řl6ť˘“,Q¦ĽĽś˙ń0!-Z$“ÉD!"@€(ÓŘب®®®Ú¶··k˙ţý!ß ŔÔöä“OĘb±P"` q:ťjjjRoo/Ĺ€(4ťˇ1›ÍJNN¦,bZff¦ CHmÍf3ůae±XÔÖÖF!&1¦,@LŰşu«ňňň(,ËŢ»ĽôŇK˛Z­2‚bb„looŻŞ««Ő××7ˇŰéččĐńăÇ>–––¦üü|®8Ŕ¶lŮ"§ÓI!Ś»––Ůív A1Č–––޵µu·c·Űuěر!ß°aiŚ(++K6›-¤¶)))Ú´i“Ěf3…€(S{§é„ąŁĎ§o›| 'z„. Z˛d‰’’’(V«U%%%A‹‰˛O?ý´JKK}Ń9ińJśç{řó˛GÝwâĽéJűj˛®őÜđY~ů?{uóşËóóňĺ˵bĹ ®8€(S^^®¬¬,ć‘“ÂsĎ=§ŢŢ^=účŁc’Љ˛fłY;wîTbâëŐ¶kJ^” ůł=_3fÝR˙s3}úąvő†_[\\ĚŐJccŁş»»)Ćç\WW…@XL‹•ĘŢĽîŇ'';Ô×ńů¸nÇö~—çű=?ĆF«ĽĽ\uuu!µíďďWSSSĐÓő-O<ń÷8аi±t°ĘĆ"íŇĄKÚżČ70µĄ§§Ëd2QškľuëÖářřxedd())‰Qh%ýô„±0őóI‰ÁfĚŠüŻŐÔÔTmŢĽYfł™“„ÍfÓÉ“')‚F {K°ˇ,a,L^«WŻÖňĺË}ÎSę=ł|ľ˛V™tŰĚŃ˙ŠĚ\9OwÜ?Ç§Ż„ąľÁnZZż7€(ÓŘبîîîÚÚl6ť8q‚"€°±Z­ŞŻŻ§“ŘtJđ…‘¦/ ŚŤv»}Č ®‡š{2gĎž ¸Üd2 ;ż$€Đą_·«««% üܵ«7dţęŘçlť1ë6™ţę‹©:Ď÷©żó‹˙ŘKKKÓÎť;™N2ĺĺĺZľ|ą, ĹQĎjµŞ··Wyyyc’"d¨Pvö™ę¶9=ëĆNí¶†e[ĄĄĄC>ĆŰŔÄĘş˙łm>ÝŐÚľ´··k˙ţýÜ1@@O>ů$ź Š0Ů! eÝc§ľŐ«Wűť÷ł¦))u†ĎWę=ł4cÖčŮ´Ż&űő5řF@‰‰‰Zż~='`ĘŽ5Ś+§Ó©¦¦&őöör‚ 1eÁ0O_@›ç]’n\s)˝(9¤»®673Qs3ż|ű:>×'';ËkkkUTT$Iެ¬ô9?Ń,ŘýĚČČPqq±¶mŰ6áaóhCYÂŘÉĄżsŕ|».¦ľÁˇěD‡±ŇŔh˝ĺË—Ëb±ŚţîďWSS“± ŔĎO<ˇżú«ż˘4=ٸqŁjjj"z 7nTEE…*++ýŮá”””xÂŘôôtOŰŃô.µµµZ»v­š››ýŮ©®ąąY{öěQEE…jjj˘&”%Śť|n^wůn€äĘJŃ;MÁĄK—tčĐ!íرCąąąś<„Ő…ďÖm3™ šĄ§§Ëd2Q K [[[«={öhĎž=;ĐŠŠŠ!sŹŢ 4Š÷čŃŁž‹µ¶¶Ö'č®]$9rÄgZoFŁŃ3 Äd™®Ŕ۲eËTVV6äőuäČť:uJÍÍÍZ»v­*++'|źF e c'ŹÜÜ\%&&ŞŻŻ/¤ö‰‰‰_1Ŕ;”eÎXŕ ŢĎţÎ’nPbÜÍë7uµíÚ¨Ú\»Ę{ÄŽéáÚĐŢ˝{µfÍš¨śku¨ OŔý‡řŕ s¸vŃ&???j¦U…Ńhrú‚ÂÂB•””¨°°P§NťRUU•jkkĂr­ ĘŢń•9úěO=„±“HYY™BjK;Ü7ú2›Í„±€×{ ÷<őc1oŢĽIyü‹EëׯçBŔKç }r˛“BCđ@¶  Ŕsc¦hşS—{.`i`ľŕp…˙BYﻯĆN«ŕ:FĎb±(77W—/_ž´Ď­-[¶Čétr2ń°Ŕŕ‡áíÚµKgĎž ą}KK‹’’’¶ ‚&<-,,T~~ľ^xá…q™ş ąąŮÓ—{ ŚŚ ­YłF6lđĹęľIŰÁ=ˇť{žŐŠŠ µ´´(==]ĹĹĹjnnÖÁ}Úť:uĘÓWAA ýÚrôčQUTTČápxFmfddh÷îÝCÎCëp8tđŕAUUUÉápřÜŚ,??_۶mói[UUĄS§NyŽËű¸Ýűę}L6l¸íŃÖÖ{Ű޵ۻwŻš››UUUĺ9˙÷y" 7{?˝Ď{ ëL}ÜÇçĘşĆEff¦ …Ŕ¤a2™&őú¬¬,Ůl6N$c`6›µ|ůň1˙'mRRRT~şz*y饗´jŐ*­^˝šbDHśËĺrMHÇqq’ÂŻ’’ĺç童ĄE’TSSđɵgĎO h·öîÝ;lk4UYYéÓ·{?qoĂýQ÷‚‚UUU©ŞŞJEEEC¶Ű˝{·öěŮă×ΛĂáĐÚµk‡ť&ŕůçźWII‰Ď˛ŠŠ mßľÝ3UÂPÇyŕŔ­YłĆŻnCí«÷1UVVú…Ť#m×h4ęđáĂ~íÜŰ.((Pqq±6nÜ8ä~>|سϣ˝ŽŐx°ŠŠ Ďö_c#][®[ďk­ŞŞJk×®öĽ8p@+V¬đ„˛„±BŐ××r8ÔŢŢ®wß}WŹ?ţ8Ż?@žsî?@)$›±Ś---%Ť°°Ü×h4úÜTk¸Đn(Ţ#kÓÓÓuŕŔUVVŞ˛˛R»wď–Á`ĂáPQQ‘jkk=íÜë¸=˙üó~ËËĎĎ÷[gÆ žeCŤ†őVTTä ˝Ű>|XË–-“$mßľ]GŽń´©­­ŐĆŤĺp8üޱ˛˛R6l4öz×°¸¸Řçqďăf_ÝA¦{»Ţ5\ۡBŃ3gÎhăĆŤ2 Ú¶m›>ě·OîmL„ŞŞ*mßľ]Ň@x;ž˙›ćׇ–-[ćs^8 ôôtĎńť?^;wîÔŞU«cDDjjŞ6oŢĚë&6›M'Ož¤ZŘnęUXX¨m۶…4uűcđ’´lŮ2UUUů|<˝°°PkÖ¬Qaaˇ'úBLba dCťşŔ}3¦eË– {c(ď0lĽGI«ąąŮž®YłF.—KÍÍÍžc:räjkk‡ <'‚»NéééĂŽŞőU‡ »‡j?7ó2 *((řĺľńŤ{Z…P¦ÄŽ÷ôEEE:zô¨_H[\\ě™+ĆŞĽĽ\őőőŔ¸[´hѤľ!ëTpĘ‚·ß~;čŃ«#©®®Vii©Ď˛x@ďľű®jkkő­o}Kßüć7=ëşy·qďKrr˛__Íť;Wťťť:~üxŔŹŤ:tH§OźöYÖÚÚęůw¨ţG vçĎź÷<ŢŢŢ>âţçüůóęëëÓgź}¦ÎÎN}úé§úěłĎ†<–ˇę7xżĽŰýéO¸¦Oq_333uţüy˝÷Ţ{žu˝·ů»ßý.¤ë!óçĎr˛éŐ«W«ŁŁCGŹU}}˝***TWW§G}4¨ÚłźwÜq‡>űě3ŐÖÖz‚öĚĚL-]şTK–,ŃťwŢÉ« €qÓŘب”””Ú¶··«˘˘BłgĎV||<ĹÂd,ďůFĂfłŤ)P-**RuuµOVËňóóµbĹŠ°nÓ/µŰí:tčиmŔn·űÝůmÉ’%Ş©©Ń•+WôÖ[oićĚŕ*Ş‘ IDAT™şăŽ;d·Ű=ëş[\ooďw‘KHHv]›Í¦7nřő;R˙Ž#P;ďŔ4жFňŮgź©®®nČŹöĎś9S×®] Ř˙pőjżúúú$ Ś^©¶Žw¤sLĚą˙ęWżŞ‹/Ęn·ëĎţł˛łł5gÎśqŮĎżýŰżUuuµĎ˛óçĎ{‚î9sćčľűîSNNŻd"ĘétĘn·űĽîxcąÓ1Ŕh¸\®1µďëëă˝Ë ÷q999a˝ß”—/_žđŤĆÇÇű|ĽýÔ©Sr:ťăŇ·;¬śŚtüřqO;sćLÝ~űíZşt©–/_®ď}ď{žŃÄă­§§gJ<‰Ľ§GĎ0Â}ÍnذAJOO×Ě™3}ęWUU%«ŐĘ+1Äl6‡t30D÷ Äp™>Ü s§ëöűç„ÖóŻţ1f&hń7çú=ĽXsŐ麨÷˙đgőôô¨ńr˝Ś™ Ň­Oŕű´ąŐ—kŽ3`_Ţěżá–ü·;öqűýs´8Ď·Ź„÷§K¤„yÓýűć8µsŐ÷HÇoµůŇ -ţĘĐűűYS›îXĽ@’Ôwµ_˙ňň˙č7)^Oü÷ďiIŢ"ż6źÔ˙ĹÓ˙ŕc9ď˘~öË»ťńp˛—şu}Fßµí?2pqzďpŰÍő0R»€ç&ĐȸČ3Ă ăŐQíçgMmžď‡ŰĎ»´PŇßxÚüGUťNW~¤ţ^§ęęęôĐÓ_ÓÜTŻfBÖôVçűXřĺŮJś7bA¸tşß3'ýh™ÍfÝőŤ4]şrB€°čůĚ©»Óîą}ÂÜéŁËf¦¨ ˙ŢŁţÎĎ#˛íaŮi3â4{áĚ1m`ćěۆěă»[Šôź§Ď©ăb—Ţ˙Ăź•}oşç1ď6Y÷¦«±¦EÍ ­ĂîOëą‹žďdÎ ¸nâĽé~Ëo›9ÍóďPý:Ž@íîY%íľµ?MźęŻż»4`ö ýü˙®$}ďÇß”iˇAýWF ˙]É·tĎŠ¬€íz˙ăęÇŰň©Ďz3gß°~’”xazŔä~9CśřP˙rIÓfÝTŇś„!÷·ł˝K’”ţĄŰ=í‡Űćh݇á wn|®¦O=ßç=°dTűŮöjŔýWÓp˝żţö@ÖqÁˇ˙óĆ™!ű;qŕĎ÷÷<pťˇ¶1ž˛nËľwV˝=ý×ůŕÄź}µ_ěňülş}č:qfś÷5C’Ô×ÓďłOUúŕ‹ó1(”Ś­ç.ę7?ôü|gÖÂ!× xNĽÚz{ă×§ôĆŻßńąľü®ĺŮÜ8ŔřYöť›Ŕx»ři›®őÜ 4-Ň;}o† ůʰëÜó@®ć-)_˙ů[:WÓě·Îż>ó{}řN$鯿}ŹL·öőicŰ„ÓC?řş¤ůĺgďĘž«iÖ‰_„{Y÷¦Ët»QiŮ <ŹW˝ö'ż>{{úőŻĎü>č@y¨Đq°żyh™îĚŘöë?{+`ŕ]őÚž}şű\Ąe/ űuŇĄ_çjZ†üúÝĎŢŇĎ~ü’únŐúŰ?řşĎh_ď€ţĺgüĎɉ_źň\?y˙‡ŔżŢ;řܸĂÚÄŮńCÁ,ăíłCž“2%%E_űî}Jś7ťB@ŠŠżÖľý}řn:ĽF‰zKš“ >»N?Űú/ęëé×϶ľ¤´ěşç\ő^q´˝ŕ$Ý™µ@ßűń·üú·Đ Ž‹]úđťmýÚ˙+Iúů{?ťăq‡ĚUŻýIľÓ s5?×=ä*qv‚kšŐzn NśŻÇ˙Çw=mÜS3|řN~¶ő_ţOߨí§^~ć÷:őÚž‘łkëŢßpk=צźmý— ÖýëoßăwĚiŮ =őm=צ=ë~®´[Atkc›úzúugÖ%ÎIPă ĐűoZ¦NśQcM‹>xăŚÎŐ´(-{Ҳč\M‹§˝$}ďÇßrÚ‡„„Íżcžşz[)V«UŐŐgt÷“ (‚2-vbđÔ¤e/ÔŹţ¤g:€Ösmză×ď¨ęĐę¸ŕPâěx>ňý¤bsŔ@ěď~üM%úXy°ŁHCńwŰľĄ>ógÇLđĆUúŔĆfÝ›®˙~`łĎHŢ>łÎ3Ző\M‹Ţřő;žc”ËĄo˙ŕëÚöó'żKß;ëłÍżyh™§>nÁڍMË^¨=ŻýXwßEÚz®MU‡>đÔV’ ůŠ~üó'Ł:lĽű\=ö?ľ«Ç˙éဏ˙đ™užcěëůbÄm_Ożî~ W?ţů“CöýĂgÖ鯿}ʤ‘˛ľÓ 7~ýާýĽ…ýđ™GüćŃŻöŤŞŻŻ§`R8˙f‡~ó›ßPI,ÎĺrąĽ444hßľ}’¤¤ÔĘ\9/¤ŽÝÓ Ě[hrú€ÁZĎ]Tß•‘†Ů·BÇ@ěúđÝŻuÓugÖÂĂÂŢž~}ÚxQ}=ýJś“ŕiăŢnâ쿏ăwõóöá;˙ĄÖF÷¨Řeß›>ěú­ç.}^Çç]ű‡:.şG­úwëą‹!őśĎľąŹ}¨6Ţ}ź«ińôź–µ@Y÷f »ľ{ÝáÎY(×w»ás~g'čžr=űä>§Cí§ý‚Cźž»č9źÎŚŐ4űl}ĺŽCž¶ŕüŐuľ÷C éŁiÓňĺËe±XFÝv`„l5#d@ŘśłCsg¤ęŃG ©}ii©RůłcľŽ˝í×%I;věPnnnض=a,ÍX¡ţţ~ť±} Ž¤Ý6sĹ&ř9G Âm¬lý'ŞyZŤfĚş-ćë©@–żÔB.]ş¤÷~˙gőu|N1řą=mȦ±‘F a2ťˇ±X,š™×«Žk)‚B €¶ě;™ZzÇR &…Ě•óôÍ”GCnńÓ6]Ź»Á´Ä”DĂÂY2 ăíł•śśL!@L8ńú˙VÇą> AŚ ĘäŻĘRž!/¤¶ńńń2ÝnÔm3ů?WFüµŔ’ššŞľ_‰óř?W l6›ZjÚ(‚F €ć¸pEÝÝÝ!µµŮlj>Í ˝@řtžďS}}=…ÄdÓÎüáĽęęę(:Îő飏>˘“ذźgěďü\źśě J„Qă•.%>p»ňňB›G¶ó|ź>ůßß@8ńž„Kçu)5ôö)©)ęjîĐŐ¶k1^ÇĎ#¶íaŮ›×]şÚvť+€°ş®®®®Z¶··«ćuµ›ßß@8ńž„‹ËĺSű˘Â":tHÎî3Bü¦,ČÍÍUbb"•`r:ť!Ď… ±&11Qfł9¬Ű 8B¶¬¬L ś"`ßľ}cîă‘G ű› źsîçŮňĺË)$‹ßţö·cjź––¦;vPČ[rssĂľÍéŃ´3`|Íf~—AĘĚĚ”Á`ůąf±Xř„›o|ăjkk ą}RR7"l:%@,Űşu«l6…QÁn·«şşÚoůňĺËe2™d±XtöěŮű饗tÇwhţüůžeóćÍ“ĹbˇřaB @”ٲe‹śN'…bĐżýŰżéŹüŁßrŁŃ¨ôôô1÷ßŇҢ––źe‹-ňŚšMMMU||<'bĹąĆzk60®úúúB­×ßß/§Ó©ĽĽ<%%%QL`‚źsŇŔ´LYĆKooŻěv»fĚÖöž8qBwß}·|đAN›F :´dÉÂX L¬V«JJJ(7î9^.\¨äää°lóĉúř㏕Ŕ ¦, Běv»ěv»ßňYłfŤK˙CÍ+•““Cń/ĺĺĺĘĘĘR^^ĹQeáÂ…’¤ÖÖÖ gÝaě† G6Ld€ÎÎÎ!ç†*,,Ô}÷Ýrß˝˝˝’¤ŇŇŇ€Ź˙ň—żÔ´i|HpkllTJJ …QéâŋڿżV®\9î˙L˛D@RR’˛łł}îlęf0ĆÔw__źúúú´nÝ:żÇćÍ›G L"999JKKÓ›oľ)IăĘvwwë“O>!ŚŤY" >>^K–,Qyyą •šš:îŰpß%U¸ŮWFFFŘć 0>’’’´sçN•––Žk(›““Łgź}–űODCdžž]ľ|Y‡R{{ű„mçĉzýő× c€IĘĘşGĘÖ×׏©?÷ Ăc#@€1›ÍÚąs§¦M›6aˇ¬{N¨o~ó›‹EeeeL8w(›ťť­Ĺ‹‡ÔG}}˝'ŚEäÄą\.e rl6›JKKuóćM­[·nܦ/`‚~ 8}ô‘śNgČÍf%&&RHV/^Tww÷¨˙>رc‡rss)`1B€ó);ÖŹ ~łE Ś,++‹Q"`ŇÍHWďżc#ʞD »Ý®ŮłgËfłéćÍ›cꫪŞJK—.%Ś‚Đ××'›Ír{FČ€Húĺ/©… yŁ/kDźé”€č`2™$ „;c e.\¨Í›7SP l6›>ţřc­Ył†b€°ëííŐĺË—U[[+I~ˇ¬Őj%ŚŤBŚ 9ťNť>}Z7oŢŐś˛LĐŚŢXćµZ­Ş®®Ö‹/ľH!@DôööŞ´´T­­­ZąrĄO(k0táÂĺççS¨(2a#dOť:R;ÁŔE2ŽőOOOWFFFČý477ëčŃŁr8žeFŁQ?üđú />>^ż˙ýďŐŰŰÔŤľŞ««µrĺJÂX ĺĺĺZľ|9ŁFŔ¤”””¤ť;wŞ´´Toľů¦¤‘˛îÁ , HQfÂŮÂÂÂÚ¨ŞŞjÜöăŕÁZ¶lYĚ…ĽîúďŢ˝[{öěuűŞŞ*íÝ»wČsQRR˘üü|=˙üó!źë‰ĐÜܬ#Gލ¤¤„g7€IďţáTZZŞC‡ Ęşç„Z¶lo¶€äĘÖÔÔhĹŠ ÖbÓ¦ę9©¸¸Řgt'FVQQˇ˘˘"ź0¶  @JOO÷,«­­UQQ‘***˘bżËĘĘtď˝÷ęČ‘#śDS‚ŮlÖÎť;5mÚ4:tHííí~ëxOĐĎ'L€Řĺeň“źĆFą ż©×˛eËTVVôúFŁq\¶[[[;®#mc…ĂáĐöíŰ% LQVV¦ââbżuĘĘĘTVV¦®®.mܸQůůůŽ9Bř`Ęq‡˛î‘˛›6mRBB‚$î– Ś§ţţ~żeíííşvíšßňůóç{ž‡Ń$))‰"LȍƨúH;†WQQá 5+**Ţ1Řh4jĎž=ĘČČĐĆŤ% ŚNŤ–‘˛0Ő¸C٦¦&%%%éćÍ›ŞŻŻ'ŚĆQgg§L&“ϲßýîwjnnö[wÓ¦MşóÎ;% ĚĎ6^ ¦SxsŹ*6 ĂXoĹĹĹ*++Ó™3gtôčQŠČl6Ël6ËétĘfłiĹŠúŇ—ľ¤ÜÜ\ŠŚŃŽ;”””äČ>ţřăęíí ř|tŹ>1™Lş˙ţű)"‚ŐsČ–””¨¨¨Čo>SoîąbÝóĹJRQQ‘çc÷’´}űvĽŃ“ĂáĐ /Ľ ˘˘"ĹĹĹ)..N÷Ţ{ݶoßpD„ôĹ«îţöîݫŋkîÜąZ»v­g_ÝűU[[p;EEE:xđŕ°5p·[»v­/^쳏7n÷iÜŁc»şş‚Zż¸¸X۶mÓ¶mŰ<ËĘĘĘ<Ç>T źăÁçćČ‘#Z»v­ćÎťëSŻ^xÁoZwgÎś‘$ť9sƧöUUUůÔÓ}ކ;űó>—Ď»»ŽŰ·o×˝÷ŢëYgăĆŤCÖ#Đő±xńâ÷ @쉏Ź×’%K”śśL Ś“ÜÜ\™ÍfżĺfłYąąą~_|câš ’\’\!÷QSSăé'##ĂŐŮŮé·NaaˇgťĘĘJźmţĽ/555.ŁŃ8äú’\đŰćîÝ»=ý=˙üóC¶q˙|řđaW~~ţŰČĎĎňřGÚżˇöŃýŘîÝ»GUsďă)..óy{ţůç‡\Żłł3ŕzĹĹĹĂŻŃhtŐÔÔxÖ/((r]÷5áŢŢš5k†í»°°0ŕuěą<|ř°«¦¦Ć•‘‘Ôľ»\.WSSÓë{_#ö “KT˛.—o@¸m۶!ó+++}{ţůç]•••>Awhh0<ë455ą>ěZ¶l™OČćÍČfddxÓ‚‚WAAË`0x‚3ďN’ëá‡v>|ŘUYYé:pŕ€+==}Č಩©ÉÓÎ`0¸8ŕ9†ĘĘJĎ>¸żšššÖ´lgg§Ë`0řá{öěń Gâ>¶ˇÂćÁçĎ]łx–=üđĂ®ĘĘJWgg§çÝű–‘‘ás.+++=çlٲe®ĘĘJO[7ďđľ  Ŕsľ++˙˙öî&6Şę˙ăř5 Mf:Zt3j䡣&m S]š–&ŇQ·´ˇ†ˇ,Ä-jšLc\´ÓqÓ΄®Ě”'ŤdF…S„ ˇ˝5”Âüż˙=˙;Ó™ét*üŢŻ¤qz>÷Üsn]|9sî±T H+ĘćzžłŤĄs,ĘĘĘLquűöí¦µµµć@ –m·Ë9Îö=;Ď›éXŕßgÎ ˛vAoş?Ů8g@Ú3ťŐlEßcÇŽeť%isÁ˛oÝşe |Ź'­°ç,Ŕĺ:ßŮąf±:‹ź™÷°}űvsn®|gA33˙~ŠxgÎśI+Ę:‹‘UUU©ýű÷OY u¶-łXlłgšÖÖÖNë\…|gnć¸ć;×YčÍ,ěg;&WćęSçxĺ3»}ŹÇl»téRŢgÄyžł €‡Óśdgú“ÍĄK—&ÍŚ´‹yĹĹĹY ~ů ˛ÓýJ˝3ĂY,sds÷ś}đĘ+Żä<Ć9ŇéóĎ?O˝ýöŰ“fSföK®"áýÎŞĽuëV*d-Ě:‹íű÷mŮúČąß9Ů.>f›Ąęě{v«Sľ‚¬ýĽ,[¶,ď}ŰEřĚ™˝ÎüTĎJ®1s>7S=c™ŮöĚY<Üžë5j‹‹‹µzőęűĘ(++Sgg§6mÚ¤ˇˇ!=˙üóćĺHťťť*++›Q^86źëęęrWUUĄââbŤŽŽęřńăćĄa™ÇL%ß1«WŻV$™´}Ďž=óş¶°ÇăQgg§FFFtüřqócż8K’†††ÔÜܬÎÎN;vLŹ'mĚjkk‰D‰D´˙ţ´|ű÷âââ´1(++Ó‰'Ě‹·>üđCŐÖÖŢWßŚŚŚ—qĺo{˙ďż˙n^ÄćĽ'{Ľ¦óĽN—óűä“OôÇ(¤eLçŔĂaÎ ˛«WŻN{űülŐŐŐ™ź]ŚÝľ}ű”¶lśíů裏ň»`ÁI2×Ě”Y°›í1S9q℆††444¤ älÓäńxTWWgúyddDápXÇŹWWW—$i``@ëׯי3g&Ť™=^iĹL»ťYännnV8Öčč¨Âá°Âá°<ŹŞŞŞTUUĄÚÚÚŕíb¬}]gQ9“łO&C§sí™ţDssłľřâ ŤŚŚhĎž=ÚłgŹĘĘĘTUUež{<˛dtčС¬'466jůňĺć÷ .¨ŁŁ#çîÝ»§ńńqIR0ÔĹ‹gťY^^ž6›tË–-úꫯ˛fVVVšĎ:{ö¬É<ţĽŮ7ÝbńůóçuöěY-_ľ\wďŢ5Ű;;;uřđá´c_xá555™ßďŢ˝›łťŹ?ţ¸ů<>>žvď7nÜĐéÓ§s^ź~úiÝşuK’Ô××§k×®™ţt^ŰyďůÚ9ť1zę©§´yóf­]»V˙üóŹ~řáÝĽyÓ/ß{ď=“ąyóf577kttT›7o6ĹÍ«WŻš{Ú˛e‹y>.\¸ C‡Éď÷ëÔ©Sć~ě"p8VssłŢzë-•——gŁĚgîÂ… joo7ÇŘEíéhoo×áÇÓúł´´4k^˝zŐ|^´hQÖgţ·ß~3ÇŘĎ’$}öŮg×Áő÷ß›vvvvŞłłSŹG555úňË/ĺńxîűďh6ăN&™d’I&™d’I&™d’I&™d’ů¨e~ýő×Z·nťü~˙üdŻ\ą"Izíµ×&í›0űíßł÷í·ßN:~ĹŠzîąçfťůÍ7ߤýţńÇkďŢ˝93mׯ_WII‰É­žžłŻ¨¨H/żü˛***äőzĺóůtďŢ=Ő××›Bˇ}Î{ÍŮź™íŚĹb:}ú´&&&ôÎ;﨨¨(ď˝űî»ZłfŤ>ýôSIŇ“O>©+V¤·aĂőööjxxŘ´á»ďľ“$­\ąRĎ<óŚ9ŢŮΚšSľxń˘âń¸ÉíرcÖÁ«V­’$UTT¨»»űľÚŐŐĄÖÖVI’ßďWż©‚gV·íâbCC$) ĄÍÝąs§z{{%IçÎť›q[‚Á ©´ç;ßî\mĚ—őꫯʲ,ą\.8p ­ýÓąĆt®ť©§§G»ví’$8p@ŐŐŐÓ:Ďnk¶±N$fą;Ó>ľĄĄE@`ÚýŤFŐÓÓcĆľ˛˛RˇPČěßşu«âńř¤v8ź…¶¶6SÄžÍó<›çm¦ĎŤÍ˛,EŁQutt(™LJŇŚű ٵ··kăĆŤŞ©©)ču˶±¦¦F»wďţ×tÎđđ°)dUTTčŕÁf*qGG‡b±ŘŚň|>źůŤFóŰŃŃˇŽŽ%‰‚Ýo,“eY’¤M›6ĺ,ĆÎôľdżŘ,Ë2m­¨¨Čši//ŤFŤFÓî-3«··7gWWWëŕÁć:Ó˝g˙Mu_±XLęíí5í,Äłm˙c“ŰíV}}}ÚK膇‡ůż%ŔC챇ˇ‘ŤŤŤf¶h[[›$ißľ}rą\’¤]»vͨxć\Â9Ă2SOOŹ‚Á ‚Á`ÚWć ©´´4çľ|mź źĎgŠť˝˝˝ćĄ]ůŘă!e/ČJ23RűűűMŐď÷ËívO:vçÎť iË5d˛ÇÝţďtŘĹßţţţśĹu˲´k×.3ćŮÚ÷ ő÷÷+ Ş««kZćB´ sgÎ ˛cccŠÇă3úqWÁ ) 555™ĄŰíÖľ}ű$ýw˝ga0“ýw[ii©ůzy,Ó¶mŰ&tc±Étą\“fsÎ%çLU{i…L­­­ÓžĹ:---¦ĐŮÚÚŞ†††IłE-ËRżLá´˘˘"çL^»ď,Ë2ł=ł-ŕv»M±< e˝żh4jĆ3×’ ÉdrŇLR狹&?-ËRCCYŔyü\r>WmmmYgŔN§č €‡Ăs}D"ˇ­[·Îč{ ÎD"‘¶TAćÚ™ŐŐŐf=Ůžžůý~S¤«¬¬”ËĺŇŘŘşşşÔŐŐ%Ż×«ŁGŹJ’vďŢ­x<®ÁÁAEŁQĹăqů|>ą\.%“ISvą\ęîî.čĚD·Ű­††…B!% mذAŐŐŐr»ÝJ&“ćk˙öRlIźĎ§––µ¶¶jllL±XlĘ™›ĺĺĺ:pŕ@Ţű±ÇÉžéś«ÚŇҢx<®±±1m۶M>źO^ŻW’ŇĆĹëőŞĄĄeR;âń¸’ÉdZa·˛˛RĄĄĄjkk3ł©äóůäóůL®s)…٬3;۱¶Ű•H$ä÷űUYY)Ż×k Ëv‘8ßň™>ř@/ľřbÁŻ›u†ěąsçôăŹ?Îk‡X–Ąm۶IRÚR™ň-]Y°ł [¶H$˘ĆĆFą\.Y–ĄX,¦h4jŠ~öˡś3V Ą©©É“ɤşşşĚWůS©”‰DLŰ~ůĺ—¶ć©˝né¦M›ň. `E#‘Č”kg3ßlăŇŇRuww›™ ‰D¬=kŹ‹ßďW(štͦ¦&S¤¶9—š¨ŻŻW(Jřčď8IDATËîéé1köş\.566š™×…R__ݶ¶6Ó×±XĚ´+™LÎ[»eË–-SIIIÁŻ» •JĄ279rD}}}Ú±cǬďç…Sv‘Ń.ŔąÝîĽEQç,BŻ×›¶îŞeYiłGó˝ kppP–eÉëőšŮ“S]/ߌE»2Ű4“¬ááa3ëÓív«ĽĽ<í8çýů|>S¤śÎµg2–ccciłSóőO6‰DBuuu’¤p8<­sť÷.ý·@îĽÇ|ײ‹Óąî¦ŮSőg®qésă|łŤ7Ś… jéŇĄżîśd§˝{÷* ©ĽĽ\‘H„ŔĽšŻ‚ěct=ćšóe^™ëóáňĺËşyófÁŻű]Źą‹Ĺ´`ÁłţíT/ó ©»»[7nTMMMAŻKAs"Ź«ŁŁ#mŰľ}ű¦\˙x”e-Č®\ąRoľů&˝Ys^˝^Ż™ €˙yY ˛«V­Ň˘E‹tçÎzłdYłb€˙ĂK˝0§(Ć˙Ź‚,€˙9K–,QIIIÁŻ» •JĄ˛í¸|ů2Kx$-\¸PK—.-řułÎ=räöîÝ˨ŔÄ’P d @žČ·sxxXĄĄĄiŰ,Ë’eY“Žu»Ýr»Ý“ÎφL2É$“L2É$“L2É$“L2É$“L2É$s>3Oť:Ąőë×ëő×_ź˙‚lQQ‘$éűďżWkkkÚľ3gÎččŃŁ“ÎٰaŞ««Ó¶µ··g˝(™d’I&™d’I&™d’I&™d’I&™d’Ić|gŽŹŹ«Đ¤R©T¶W®\ŃíŰ·µjŐŞ´í7oŢÔŤ7&żxńb•””¤m;wî\Ö‹’I&™d’I&™d’I&™d’I&™d’I&™dÎgfQQ‘–,Yňď)Č,^ęBA „‚,Y(˙µ}?;á@ďIEND®B`‚ceilometer-6.0.0/doc/Makefile0000664000567000056710000001402312701406223017210 0ustar jenkinsjenkins00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " wadl to build a WADL file for api.openstack.org" clean: -rm -rf $(BUILDDIR)/* html: check-dependencies $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: check-dependencies check-dependencies: @python -c 'import sphinxcontrib.autohttp.flask' >/dev/null 2>&1 || (echo "ERROR: Missing Sphinx dependencies. Run: pip install sphinxcontrib-httpdomain" && exit 1) @ld -ltidy >/dev/null 2>&1 || (echo "Error: Missing libtidy dependencies. Pls. install libtidy with system package manager" && exit 1) wadl: $(SPHINXBUILD) -b docbook $(ALLSPHINXOPTS) $(BUILDDIR)/wadl @echo @echo "Build finished. The WADL pages are in $(BUILDDIR)/wadl." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Ceilometer.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Ceilometer.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Ceilometer" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Ceilometer" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." ceilometer-6.0.0/LICENSE0000664000567000056710000002363712701406223016023 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ceilometer-6.0.0/ChangeLog0000664000567000056710000034443312701406363016575 0ustar jenkinsjenkins00000000000000CHANGES ======= 6.0.0 ----- * collector: never allow to lose data * Imported Translations from Zanata 6.0.0.0rc2 ---------- * abort alarms URLs when Aodh is unavailable * Imported Translations from Zanata * Imported Translations from Zanata * remove dns and trove from entry_points * Imported Translations from Zanata * Remove gabbi tests that check content-location * Imported Translations from Zanata * Update .gitreview for stable/mitaka 6.0.0.0rc1 ---------- * Imported Translations from Zanata * add rc1 release notes * Use assertIn and assertNotIn * core status cleanup * tests: remove ceilometer-api bin test cases * gate: add missing sudo * change dns and trove notifications to declarative * Remove en_GB translations * register the config generator default hook with the right name * Imported Translations from Zanata * Updated from global requirements * tempest: migrate api and scnario tests from tempest * mitaka-3 release notes * Adjust log levels for InstanceShutOffException * Fix event_type creationg failure due to race condition * Imported Translations from Zanata * Ignoring cpu measurement when instance's state is SHUTOFF * Add validation for polling_namespaces option * xenapi: support the session when xenserver is slave * Imported Translations from Zanata * gnocchi dispatch: Added new resource type support * remove wrong "#!/usr/bin/env python" header * Fixed corner cases of incorrect use of oslo.config * Updated from global requirements * timedelta plugin for meter definition process * Cast Int64 values to int, float in statistics * Cache getters for the decalarative definitions 6.0.0.0b3 --------- * [sahara] add events definitions regarding new notifications * Moved CORS middleware configuration into oslo-config-generator * Add the meter example file 'lbaas-v2-meter-definitions.yaml' * Change default policy to allow create_samples * Enable the Load Balancer v2 events * Remove unused pngmath Sphinx extension * Updated from global requirements * Fix a minor missing parameter issue * close services in test * Add an update interval to compute discovery * Docs: Configure meters/events dispatch separately * Fix the typo in the gnocchiclient exception * Updated from global requirements * Add gnocchi dispatcher opts to config * Change the SERVICE_TENANT_NAME to SERVICE_PROJECT_NAME * Hyper-V: replaces in-tree hyper-v utils usage with os_win * Initial seed of hacking * Add /usr/local/{sbin,bin} to rootwrap exec_dirs * Gnocchi: fix ResourcesDefinitionException for py3 * Change LOG.warn to LOG.warning * tests: fix unworking debug output * Adds timestamp option to Aggregation transformer * remove default=None for config options * Replace assertEqual(None, *) with assertIsNone in tests * Trivial: Cleanup unused conf variables * Enable the Load Balancer v2 for the Ceilometer(Part Two) * Remove unused variable * Enable the Load Balancer v2 for the Ceilometer(Part One) * Fix footnote reference to Aodh in docs * Updated from global requirements * Set None explicitly to filter options * KEYSTONE_CATALOG_BACKEND is deprecated * Use overtest to setup functional backends * devstack: Fix Keystone v3 configuration typo * Imported Translations from Zanata * Handle malformed resource definitions gracefully * Update the home page * Skip duplicate meter definitions * set higher batching requirement * use retrying to attempt to rejoin group * network: remove deprecated option name * sample: remove deprecated option name * Fix wrong capitalization * rewriting history * Remove unused pytz requirement * devstack: use password with version discovery * fix tempest path * Updated from global requirements * raise coordination error if not registered * do not configure worker specific items in init * integration-gate: fix publicURL retrieval * rolling upgrades * fix locking in ceilometer * enable notification agent partitioning * better support notification coordination * remove useless notification listener helper * Lookup meter definition fields correctly * Enhances get_meters to return unique meters * Imported Translations from Zanata * Updated from global requirements * Fix ceilometer floatingip pollster * Updated from global requirements * tempest: migrate base class for tests * tempest: add ceilometer tempest plugin * tempest: add telemetry client manager * tempest: migrate conf.py from tempest tree * tempest: copy telemetry client from tempest tree * Fix events rbac 6.0.0.0b2 --------- * Don't store events with Gnocchi * add additional mitaka-2 release notes * Corrects typo "a other" -> "another" * Updated from global requirements * add release notes for mitaka-2 * devstack: add support for Gnocchi backend * notification: Use oslo.messaging batch listener * Cleanup of Translations * Added CORS support to Ceilometer * Don't set keystonemiddleware cache * Set None explicitly to filter options * Add OSprofiler-specific events definitions * collector: Use oslo.messaging batch listener * Updated from global requirements * Changes aggregator transformer to allow retention_time w/o size * Replace LOG.warn with LOG.warning * Updated from global requirements * wrong accumulative value of "network.services.lb.incoming.bytes" * Trivial: Remove vim header from source files * Trival: Remove unused logging import * Fix the typos in the source code * gnocchi: fix stack resource type * Misspelling in message * Clean pagination related methods of impl_mongodb * Fix some typos in the snmp.py * remove local hacking check * [MongoDB] add indexes in event collection * Remove unused code in gnocchi dispatcher * remove unnecessary code * recheck cache after acquired gnocchi_resource_lock * collector: remove deprecated RPC code * fix case in function name * Catch the EndpointNotFound in keystoneauth1 than in keystoneclient * Log exception if stevedore fails to load module * Updated from global requirements * Revert "Revert "devstack config for dogpile cache"" * add per resource lock * verify gnocchi connection before processing * [refactor] remove redundant import of options * Added unit test cases for pysnmp 4.3 * Add keystoneauth1 in requirements * gnocchi: fix cache hash logic * gnocchi: use gnocchiclient instead of requests * show queue status on integration test * Updated from global requirements * using a consistent uuid as cache namespace * Duplicate information link for writing agent plugins * Use keystoneauth1 instead of manual setup * Do not mock the memcache interface for auth_token * oslo.messaging option group/name change for notification topics * Correct the host field of instance metadata * fix the bug that gnocchi dispatcher can't process single sample * Replace stackforge with openstack * MAINTAINERS: remove outdated data 6.0.0.0b1 --------- * Remove version from setup.cfg * add initial release notes * fix functional gate * messaging: stop using RequestContextSerializer * Fix ceilometer-test-event.py script * Deduplicate the code about snmp meter loading * Updated from global requirements * Revert "devstack config for dogpile cache" * Revert "Workaround requests/urllib connection leaks" * add cpu.delta to gnocchi resources * simplify collector cache * Consistent publisher_id for polling agent * build metric list on init * re-implement thread safe fnmatch * clean up integration test urls * tools: fix default resource metadata for instance * don't pass ceilometer options to oslo.db engine facade * Use str(name) instead of name.prettyPrint() * Reduce code duplication * remove config files when run clean.sh * fix some test case wrongly skipped for mysql backend * Add WebTest to test-requirements.txt * tests: remove testscenario usage for storage drivers * Remove eventlet usage * Remove alarming code * Clarify the doc about multiple notification_topics usage * Reduced source code by extracting duplicated code * devstack config for dogpile cache * Updated from global requirements * Updated from global requirements * Fix an indent nit of enforce_limit method * Move the content of ReleaseNotes to README.rst * use common cache * A dogpile cache of gnocchi resources * Updated from global requirements * install database when collector is enabled * Updated from global requirements * Updated from global requirements * add reno for release notes management * Updated from global requirements * Support to get hardware's cpu_util from snmp * add rohit_ to MAINTAINERS * gnocchi: set the default archive policy to None * Mv gabbi_pipeline.yaml into test directories * Factorize yaml loading of declarative stuffs * Factorize field definition of declarative code * Wrong result is returned when call events getting API * tox: use pretty_tox in most places * Updated from global requirements * avoid unnecessary inner join in get_resources() for SQL backend * Add sql-expire-samples-only to option list * Updated from global requirements * configure Apache only when ceilometer-api is enabled * Imported Translations from Zanata * avoid using isolation level * specify runtime environment for scripts * Using oslo-config-generator to instead of generate-config-file.sh * Use gnocchiclient for integration script * Enable signature verification for events * Correct the timestamp type when make test samples data * Updated from global requirements * avoid generate temporary table when query samples * Reject posting sample with direct=true if Gnocchi is enabled * make script under tools directory executable * Updated from global requirements * Added the README.rst in devstack folder * fix tools/make_test_event_data.py * fix image_ref attr in gnocchi resource * support mysql+pymysql in functional test * Updated from global requirements * Fix snmp pollster to not ignore valid meters * Block oslo.messaging 2.6.1 release * reset policy per test * Remove dependency on sphinxcontrib-docbookrestapi * gnocchi: remove possible ending / in URL * api: simplify root controller * api: simplify Pecan config * remove instance:FLAVOR related code and docs * Do collector setup and storage cleanup for all backends * change collector_workers to [collector]workers * Enable POST samples API when gnocchi enabled * devstack: fix debug info for Gnocchi * Imported Translations from Zanata * Add Liberty release note link * Fix make_test_data.sh * Imported Translations from Zanata * Be explicit when copying files to /etc/ceilometer * Deprecate event trait plugin 'split' * Updated from global requirements * Clean some log messages when polling neutron resources * Simplify the validation of required fields of pipeline source * doc: service enablement not necessary when using Devstack plugin * Skip bad meter definitions instead of erroring out * Remove the unused network_get_all method * mark logging.info translation accordingly * logging cleanup * Updated from global requirements * Remove last vestiges of devstack from grenade plugin * Add missing ceilometerclient repo location 5.0.0 ----- * Imported Translations from Zanata * Fix for resource polling warnings * SQL: Fix event-list with multiple trait query filters * Fix the bug of "Error spelling of a word" * Imported Translations from Zanata * SQL: Fix event-list with multiple trait query filters * Fix a mistake in a test * Configure collector to only record meter or event * Rename list_events tests to list_samples tests * fix elasticsearch script reference * Fix the deprecation note in meter.yaml * Fix the deprecation note in meter.yaml * Remove deprecated archive policy map for Gnocchi * Remove enable_notification.sh * Parametrize table_prefix_separator in hbase * Imported Translations from Zanata * fix typo in storage/impl_sqlalchemy * devstack: install all configuration files from etc/ * dispatcher: remove deprecated CADF code in HTTP * mongodb: remove deprecated replica_set support * Ensure the test data sample has correct signature * Open Mitaka development 5.0.0.0rc1 ---------- * gnocchi: Don't raise NotImplementedError * Add missing meter and exchange opts * Imported Translations from Zanata * Add test to cover history rule change * Workaround requests/urllib connection leaks * integration tests: additional debugging infos * Coordinator handles ToozError when joining group * Don't create neutron client at loadtime * Delete its corresponding history data when deleting an alarm * update event filter test to validate multiple trait args * Fix variable typos * Updated from global requirements * Change ignore-errors to ignore_errors * Fix reconnecting to libvirt * remove batch processing requirement from arithmetic transformer * Cleanup empty dirs from tests * retain existing listeners on refresh * Override dispatcher option for test_alarm_redirect_keystone * [ceilometer] Update links to Cloud Admin Guide * Adds support for dynamic event pipeline * Updated from global requirements * Imported Translations from Zanata * pollster/api now publish to sample queue * tox: generate config file on test run * tox: Allow to pass some OS_* variables * Refactor keystone handling in discovery manager * Use make_sample_from_instance for net-pollster * apply limit constraint on storage base interface * gnocchi: add two new resources * Fixed tox -egenconfig Error * Add declarative meters to developer docs * add delta transfomer support * do not recreate main queue listeners on partitioning * Validate required fields in meter definition * deprecate cadf_only http dispatcher * Fix the heavy time cost of event-list * Update API Doc to deprecate the alarming part * Deprecate config options of the old alarming functionality * update architecture documentation * Add attribute 'state' to meter metadata when source is polling * doc: update devstack usage * Remove useless base class * Split out image non-meters * Make the gabbi tox target work with modern tox * Avoid 500 errors when duplicating limit queries * Correct test_list_meters_meter_id to work with py3 * Updated from global requirements * Update event_definitions for Cinder Image Cache * Update install docs * Use b64encode to replace of encodestring * Prevent ceilometer expirer from causing deadlocks * remove duplicate log exception message * Spelling mistake of comment in api/controllers/v2/query.py * Fix typos in gnocchi.py and converter.py * Updated from global requirements * Updated from global requirements * Add a py34-functional tox target * doc: update notification_driver * polling: remove deprecated agents * Fix string in limit warning * Typo fixing * missed entrypoint for nova_notifier removal * Imported Translations from Transifex * Fix links in README.rst * integration: Add debugging information * deprecate db2 nosql driver * devstack: add new option to support event-alarm * Sync devstack plugin with devstack:lib/ceilometer * Updated from global requirements * remove old nova_notifier processing code 5.0.0.0b3 --------- * restrict admin event access * Migrate the old snmp pollsters to new declarative pollster * Support to load pollsters extensions at runtime * Added snmp declarative hardware pollster * Requeuing event with workload_partitioning on publish failure * Event filtering for non-admin users * integration: fix typo * gnocchi: cleanup instance resource definition * Updated from global requirements * Adding pradk to MAINTAINERS * Adding liusheng to MAINTAINERS * Add index to metadata_hash column of resource table * Incorrect Links are updated * Removing unused dependency: discover * Use new location of subunit2html * Change tox default targets for quick use * Fixed identity trust event types * gnocchi: quote the resource_id in url * fix metadata for compute cpu notifications * support custom metadata * Move profiler meters to yaml * Control Events RBAC from policy.json * Events RBAC needs scoped token * make telemetry sample payloads dictionaries * Fix requeue process on event handling error * allow configurable pipeline partitioning * Keep the instance_type meta from polling and notification consistent * Add user_id,project_id traits to audit events * Change json path's to start with $. for consistency * Add validation tests for arithmetic, string and prefix expressions * Fix description for "Inapt spelling of 'MongoDB'" * Create conf directory during devstack install phase * support custom timestamp * Add cpu meters to yaml * Fix description for "Incorrect spelling of a word" * integration: add some new tests * Fix disable_non_metric_meters referencing * Update tests to reflect WSME 0.8 fixes * remove jsonpath-rw requirement * Do not use system config file for test * gnocchi: move to jsonpath_rw_ext * Updated from global requirements * Allow to run debug tox job for functional tests * Use jsonpath_rw_ext for meter/event definitions * preload jsonpath_rw parsers * integration test: adjusts timeout * integration test: failfast * Updated from global requirements * Avoid recording whole instance info in log * Fix dependency for doc build * Mark record_type in PaaS Event Format doc as optional * full multi-meter support * add flexible grouping key * Corrected test_fallback_meter_path test case * Add hypervisor inspector sanity check * handle list payloads in notifications * xenapi: support the session to "unix://local" * Introduce Guru Meditation Reports into Ceilometer * Use start status of coodinator in tooz * Fixed event requeuing/ack on publisher failure * Implement consuming metrics from Magnum * Avoid from storing samples with empty or not numerical volumes * use union all when building trait query * Fixed spelling error, retreive -> retrieve * Use min and max on IntOpt option types * Update install docs with gnocchi dispatcher info * Make it possible to run postgresql functional job * Revert "Remove version from os_auth_url in service_credentials" * Updated from global requirements * Use oslo_config PortOpt support * integration: chown ceilometer directory properly * add mandatory limit value to complex query list * add test to validate jsonpath * Remove version from os_auth_url in service_credentials * do not translate debug logs * Updated from global requirements * Grenade plugin using devstack plugin for ceilometer * remove alembic requirement * Convert instance, bandwidth and SwiftMiddleware meters * Change and move the workers options to corresponding service section * Drop the downgrade function of migration scripts * start rpc deprecation * support multiple-meter payloads * add poll history to avoid duplicate samples * Add Kilo release note reference * initialise opencontrail client in tests * Make ConnectionRetryTest more reliable * Correct thread handling in TranslationHook * Updated from global requirements * Correctly intialized olso config fixture for TestClientHTTPBasicAuth * Don't start up mongodb for unit test coverage * disable non-metric meter definitions * Cast Int64 values to float * Convert identity, sahara and volume to meters yaml * Enable entry points for new declarative meters * Fix for rgw still throwing errors * group pollsters by interval * Revert "Revert "remove instance: meter"" * api: fix alarm deletion and update * Fixes the kafka publisher * Sync devstack plugin with devstack:lib/ceilometer * integration: use the right user in gate * Imported Translations from Transifex * Initial separating unit and functional tests * Stop using openstack.common from keystoneclient * minimise scope of hmac mocking * Updated from global requirements * gnocchi: retry with a new token on 401 * Fix some gabbi tests * Improve comments in notification.py * mongo: fix last python3 bugs * postgres isolation level produces inconsistent reads * Masks messaging_urls in logs during debug mode * Corrected unit of snmp based harware disk and memory meters * Provide base method for inspect_memory_resident * Fix Python 3 issue in opendaylight client * Fix more tests on Python 3 * Remove the compute inspector choice restriction * [MongoDB] Refactor indexes for meter and resources * tests: add an integration test * Fix WSGI replacement_start_response() on Python 3 * gnocchi: reduce the number of patch to gnocchi API * Make the partition coordinator log more readable * Drop out-of-time-sequence rate of change samples 5.0.0.0b2 --------- * [MongoDB] Use a aggregate pipeline in statistics * Instance Cache in Node Discovery Pollster * Instance Caching * Imported Translations from Transifex * fix gnocchi resources yaml * Import the api opt group in gabbi fixture * Add a batch_polled_samples configuration item * Remove redundant comma * storage: deprecates mongodb_replica_set option * Improves send_test_data tools * Replace isotime() with utcnow() and isoformat() * distributed coordinated notifications * Imported Translations from Transifex * Close and dispose test database setup connections * Updated from global requirements * api: Redirect request to aodh if available * api: return 410 if only Gnocchi is enabled * Fix broken IPMI agent * add mandatory limit value to meter list * add mandatory limit value to resource list * add mandatory limit value to event list * Move gnocchi resources definition in yaml file * Send a notification per sample, do not batch * Handles dns.domain.exists event in Ceilometer * Pollsters now send notifications without doing transforms * Imported Translations from Transifex * Switch to the oslo_utils.fileutils * Updated from global requirements * Use choices for hypervisor_inspector option * The product name Vsphere should be vSphere * Add necessary executable permission * Store and restore the xtrace option in devstack plugin * gnocchi: Remove useless resources patching * add Trove(DBaaS) events * Set conf.gnocchi_dispatcher.url explicitly in tests * Declarative meters support * Stop the tests if backend hasn't started * Delay the start of the collector until after apache restart * Clean the re-implemented serializers in Ceilometer * monkey_patch thread in tests * make notifier default event publisher * Fix gnocchi DispatcherTest tests * Sort metric data before grouping and processing * Namespace functions in devstack plugin * Added valid values of operator to response body * gnocchi: fixes the instance flavor type * gnocchi dispatcher: fix typo in stevedore endpoint * Imported Translations from Transifex * Tolerate alarm actions set to None * Make ceilometer work correctly when hosted with a SCRIPT_NAME * Implementation of dynamically reloadable pipeline * fix log msg typo in api utils * Updated from global requirements * Add documentation about the usage of api-no-pipline * drop deprecated pipeline * Improve doc strings after changing method for index creation * set default limit to meter/sample queries * collector: fix test raising error * Remove test-requirements-py3.txt * remove unused event query * Create a devstack plugin for ceilometer * Add support for posting samples to notification-agent via API * restore long uuid data type * Revert "Add support for posting samples to notification-agent via API" * Update alarm history only if change in alarm property * test error log - catch dummy error * fix kafka tests from flooding logs * catch warnings from error tests * remove unused notifier * Add support for posting samples to notification-agent via API * Stop dropping deprecated tables while upgrade in mongodb and db2 * Add handler of sample creation notification * Remove the unused get_targets method of plugin base * Replaces methods deprecated in pymongo3.0 * add oslo.service options * Restricts pipeline to have unique source names * drop use of oslo.db private attribute * Fix oslo.service configuration options building * Add fileutils to openstack-common.conf * disable non-metric meters 5.0.0.0b1 --------- * Remove unnecessary executable permission * Imported Translations from Transifex * Switch to oslo.service * Remove unnecessary wrapping of transformer ExtentionManager * Port test_complex_query to Python 3 * Fix expected error message on Python 3 * Fix usage of iterator/list on Python 3 * Replaces ensure_index for create_index * pip has its own download cache by default * For sake of future python3 encode FakeMemcache hashes * Make acl_scenarios tests' keystonemiddleware cache work flexibly * Update version for Liberty * Gnocchi Dispatcher support in Ceilometer 5.0.0a0 ------- * Updated from global requirements * Fix alarm rest notifier logging to include severity * Remove useless execute bit on rst file * Fix unicode/bytes issues in API v2 tests * Fix script name in tox.ini for Elasticsearch * Fix the meter unit types to be consistent * tests: use policy_file in group oslo_policy * Fix publisher test_udp on Python 3 * Fix Ceph object store tests on Python 3 * Port IPMI to Python 3 * Port middleware to Python 3 * [elasticsearch] default trait type to string * Updated from global requirements * Lower down the range for columns which are being used as uuid * Sync with latest oslo-incubator * Fix testing of agent manager with tooz * Remove deprecated Swift middleware * add DNS events * Handle database failures on api startup * Fix more tests on Python 3 * Switch to using pbr's autodoc capability * Remove old oslo.messaging aliases * Remove useless versioninfo and clean ceilometer.conf git exclusion * Register oslo_log options before using them * Add running functional scripts for defined backend * Remove snapshot.update events as they are not sent * WSME version >=0.7 correctly returns a 405 * TraitText value restricted to max length 255 * Cause gabbi to skip on no storage sooner * Updated from global requirements * Move eventlet using commands into own directory * adjust alarm post ut code to adapt to upstream wsme * Disable rgw pollster when aws module not found * Fixes DiskInfoPollster AttributeError exception * remove useless log message * use oslo.log instead of oslo-incubator code * Port test_inspector to Python 3 * Fix usage of dictionary methods on Python 3 * Imported Translations from Transifex * Add oslo.vmware to Python 3 test dependencies * Optionally create trust for alarm actions * Remove iso8601 dependency * Enable test_swift_middleware on Python 3 * Enable more tests on Python 3 * Skip hbase tests on Python 3 * Clear useless exclude from flake8 ignore in tox * Remove pagination code * Stop importing print_function * Remove useless release script in tools * Remove useless dependency on posix_ipc * Remove exceute bit on HTTP dispatcher * Remove oslo.messaging compat from Havana * Fixing event types pattern for Role Noti. handler * Mask database.event_connection details in logs * Switch from MySQL-python to PyMySQL * Python 3: replace long with int * Python 3: Replace unicode with six.text_type * Python 3: generalize the usage of the six module * Update Python 3 requirements * Python 3: set __bool__() method on Namespace * Python 3: encode to UTF-8 when needed * Python 3: sort tables by their full name * Python 3: replace sys.maxint with sys.maxsize * Initial commit for functional tests * Update a test to properly anticipate HTTP 405 for RestController * proposal to add Chris Dent to Ceilometer core * rebuild event model only for database writes * cleanup problem events logic in event db storage * fix incorrect docstring for dispatcher * Imported Translations from Transifex * api: record severity change in alarm history * VMware: verify vCenter server certificate * Add hardware memory buffer and cache metrics * Make interval optional in pipeline * Improve ceilometer-api install documentation * empty non-string values are returned as string traits * Trait_* models have incorrect type for key * small change to development.rst file * Drop use of 'oslo' namespace package * [unittests] Increase agent module unittests coverage * stop mocking os.path in test_setup_events_default_config * Remove py33 tox target * made change to mod_wsgi.rst file * ensure collections created on upgrade * Fix raise error when run "tox -egenconfig" * Updated from global requirements * Fix None TypeError in neutron process notifications 2015.1.0 -------- * Have eventlet monkeypatch the time module * Have eventlet monkeypatch the time module * Add the function of deleting alarm history * Updated from global requirements * Fix valueerror when ceilometer-api start * Override gnocchi_url configuration in test * Move ceilometer/cli.py to ceilometer/cmd/sample.py * Fix valueerror when ceilometer-api start * remove deprecated partitioned alarm service * use message id to generate hbase unique key * gnocchi: fix typo in the aggregation endpoint * Release Import of Translations from Transifex * Fix Copyright date in docs * Replace 'metrics' with 'meters' in option and doc * use message id to generate hbase unique key * update .gitreview for stable/kilo * gnocchi: fix typo in the aggregation endpoint * broadcast data to relevant queues only * Imported Translations from Transifex * fix combination alarm with operator == 'or' * Updated from global requirements 2015.1.0rc1 ----------- * proposal to add ZhiQiang Fan to Ceilometer core * Open Liberty development * Fix a samples xfail test that now succeeds * Cosmetic changes for system architecture docs * Fix a issue for kafka-publisher and refactor the test code * pymongo 3.0 breaks ci gate * use oslo.messaging dispatch filter * Further mock adjustments to deal with intermittent failure * Adds support for default rule in ceilometer policy.json * Updated from global requirements * limit alarm actions * Use oslo_vmware instead of deprecated oslo.vmware * Remove 'samples:groupby' from the Capabilities list * Use old name of 'hardware.ipmi.node.temperature' * Revert "remove instance: meter" * Tweak authenticate event definition * Add project and domain ID to event definition for identity CRUD * Fix the event type for trusts * reset croniter to avoid cur time shift * Imported Translations from Transifex * Avoid a error when py27 and py-mysql tests run in sequence * Stop using PYTHONHASHSEED=0 in ceilometer tests * remove instance: meter * Added ipv6 support for udp publisher * Remove the unnecessary dependency to netaddr * Optimize the flow of getting pollster resources * support ability to skip message signing * Avoid conflict with existing gnocchi_url conf value * Using oslo.db retry decorator for sample create * alarm: Use new gnocchi aggregation API * collector: enable the service to listen on IPv6 * minimise the use of hmac * Typo in pylintrc * Ceilometer retrieve all images by 'all-tenants' * fix incorrect key check in swift notifications * support disabling profiler and http meters * ensure collections created on upgrade * Fix common misspellings * Updated from global requirements * refuse to post sample which is not supported * Enable collector to requeue samples when enabled * drop deprecated novaclient.v1_1 * exclude precise metaquery in query field 2015.1.0b3 ---------- * Imported Translations from Transifex * remove log message when process notification * Add gabbi tests for resources * Fix typos and format in docstrings in http dispatcher * add ability to dispatch events to http target * doc: fix class name * add ability to publish to multiple topics * make field and value attributes mandatory in API Query * Fix db2 upgrade in multi-thread run issue * Add memory.resident libvirt meter for Ceilometer * Update reference * Check the namespaces duplication for ceilometer-polling * Add gabbi tests to explore the Meter and MetersControllers * Imported Translations from Transifex * mysql doesn't understand intersect * order traits returned within events * add network, kv-store, and http events * Add support for additional identity events * Add a Kafka publisher as a Ceilometer publisher * Fix response POST /v2/meters/(meter_name) to 201 status * Attempt to set user_id for identity events * Switch to oslo.policy 0.3.0 * normalise timestamp in query * Add more power and thermal data * Updated from global requirements * Fix formatting error in licence * Added option to allow sample expiration more frequently * add option to store raw notification * use mongodb distinct * remove event_types ordering assumption * Add gabbi tests to cover the SamplesController * api: fix alarm creation if time_constraint is null * fix log message format in event.storage.impl_sqlalchemy * Remove duplications from docco * Tidy up clean-samples.yaml * Fix a few typos in the docs * use default trait type in event list query * fix wrong string format in libvirt inspector * create a developer section and refactor * Do not default pecan_debug to CONF.debug * Adding Gabbi Tests to Events API * fix config opts in objectstore.rgw * Updated from global requirements * support time to live on event database for sql backend * add an option to disable non-metric meters * add missing objectstore entry points * Initial gabbi testing for alarms * reorganise architecture page * Add ceph object storage meters * Use oslo_config choices support * fix inline multiple assignment * alarming: add gnocchi alarm rules * Protect agent startup from import errors in plugins * Revert "Add ceph object storage meters" * api: move alarm rules into they directory * compress events notes * Destroy fixture database after each gabbi TestSuite * Fix unittests for supporting py-pgsql env * Adding links API and CLI query examples * correct column types in events * Be explicit about using /tmp for temporary datafiles * Patch for fixing hardware.memory.used metric * Add ceph object storage meters * [PostgreSQL] Fix regexp operator * Add clean_exit for py-pgsql unit tests * modify events sql schema to reduce empty columns * Remove duplicated resource when pollster polling * check metering_connection attribute by default * unicode error in event converter * cleanup measurements page * api: add missing combination_rule field in sample * Fix test case of self-disabled pollster * update event architecture diagram * use configured max_retries and retry_interval for database connection * Updated from global requirements * Making utilization the default spelling * Add Disk Meters for ceilometer * correctly leave group when process is stopped * Updated from global requirements * enable oslo namespace check for ceilometer project * Add doc for version list API * Enabling self-disabled pollster * Use werkzeug to run the developement API server * Imported Translations from Transifex * switch to oslo_serialization * move non-essential libs to test-requirements * Validate default values in config * fix the value of query_spec.maxSample to advoid to be zero * clean up to use common service code * Add more sql test scenarios * [SQLalchemy] Add regex to complex queries * Fix duplication in sinks names * metering data ttl sql backend breaks resource metadata * Refactor unit test code for disk pollsters * start recording error notifications * Remove no_resource hack for IPMI pollster * Add local node resource for IPMI pollsters * Use stevedore to load alarm rules api * [MongoDB] Add regex to complex queries * Imported Translations from Transifex * support time to live on event database for MongoDB 2015.1.0b2 ---------- * split api.controllers.v2 * add elasticsearch events db * use debug value for pecan_debug default * Shuffle agents to send request * Updated from global requirements * Adds disk iops metrics implementation in Hyper-V Inspector * discovery: allow to discover all endpoints * Declarative HTTP testing for the Ceilometer API * add listener to pick up notification from ceilometermiddleware * Drop deprecated namespace for oslo.rootwrap * remove empty module tests.collector * Add disk latency metrics implementation in Hyper-V Inspector * add event listener to collector * add notifier publisher for events * enable event pipeline * Imported Translations from Transifex * deprecate swift middleware * sync oslo and bring in versionutils * Expose alarm severity in Alarm Model * Hyper-V: Adds memory metrics implementation * Remove mox from requirements * Fix IPMI unit test to cover different platforms * adjust import group order in db2 ut code * add event pipeline * remove unexistent module from doc/source/conf.py * Upgrade to hacking 0.10 * Remove the Nova notifier * Remove argparse from requirements * [MongoDB] Improves get_meter_statistics method * Fix docs repeating measuring units * [DB2 nosql] Create TIMESTAMP type index for 'timestamp' field * remove pytidylib and netifaces from tox.ini external dependency * Avoid unnecessary API dependency on tooz & ceilometerclient * Correct name of "ipmi" options group * Fix Opencontrail pollster according the API changes * enable tests.storage.test_impl_mongodb * Remove lockfile from requirements * Disable eventlet monkey-patching of DNS * Expose vm's metadata to metrics * Adding build folders & sorting gitignore * Disable proxy in unit test case of test_bin * Add Event and Trait API to document * Refactor ipmi agent manager * Use alarm's evaluation periods in sufficient test * Use oslo_config instead of deprecated oslo.config * Avoid executing ipmitool in IPMI unit test * Updated from global requirements * Add a direct to database publisher * Fixed MagnetoDB metrics title * Imported Translations from Transifex * Fix incorrect test case name in test_net.py * Updated from global requirements * notification agent missing CONF option * switch to oslo_i18n * Use right function to create extension list for agent test * Imported Translations from Transifex * Add an exchange for Zaqar in profiler notification plugin * Remove unused pecan configuration options * Updated from global requirements * Use oslo_utils instead of deprecated oslo.utils * Match the meter names for network services * stop using private timeutils attribute * Update measurement docs for network services * Catch exception when evaluate single alarm * Return a meaningful value or raise an excpetion for libvirt * Imported Translations from Transifex * make transformers optional in pipeline * Added metering for magnetodb * Add release notes URL for Juno * Fix release notes URL for Icehouse * remove unnecessary str method when log messages * Revert "Remove Sphinx from py33 requirements" * untie pipeline manager from samples * reset listeners on agent refresh * Remove inspect_instances method from virt * Optimize resource list query * Synchronize Python 3 requirements * Remove unnecessary import_opt|group * Add test data generator via oslo messaging * Check to skip to poll and publish when no resource * Add oslo.concurrency module to tox --env genconfig * add glance events * add cinder events * Manual update from global requirements * Add cmd.polling.CLI_OPTS to option list * Ignore ceilometer.conf * Switch to oslo.context library 2015.1.0b1 ---------- * Revert "Skip to poll and publish when no resources found" * Added missing measurements and corrected errors in doc * Remove Sphinx from py33 requirements * Clean up bin directory * Improve tools/make_test_data.sh correctness * ensure unique pipeline names * implement notification coordination * Make methods static where possible (except openstack.common) * Fix docs to suit merged compute/central agents concept * Drop anyjson * Move central agent code to the polling agent module * RBAC Support for Ceilometer API Implementation * [SQLalchemy] Add groupby ability resource_metadata * Improve links in config docs * Make LBaaS total_connections cumulative * remove useless looping in pipeline * Encompassing one source pollsters with common context * Modify tests to support ordering of wsme types * Make compute discovery pollster-based, not agent-level * Add docs about volume/snapshot measurements * Port to graduated library oslo.i18n * Retry to connect database when DB2 or mongodb is restarted * Updated from global requirements * Standardize timestamp fields of ceilometer API * Workflow documentation is now in infra-manual * Add alarm_name field to alarm notification * Updated from global requirements * Rely on VM UUID to fetch metrics in libvirt * Imported Translations from Transifex * Initializing a longer resource id in DB2 nosql backend * Sync oslo-incubator code to latest * ensure unique list of consumers created * fix import oslo.concurrency issue * Add some rally scenarios * Do not print snmpd password in logs * Miniscule typo in metering_connection help string * add http dispatcher * [MongoDB] Add groupby ability on resource_metadata * [MongoDB] Fix bug with 'bad' chars in metadatas keys * Override retry_interval in MongoAutoReconnectTest * Exclude tools/lintstack.head.py for pep8 check * Add encoding of rows and qualifiers in impl_hbase * Database.max_retries only override on sqlalchemy side * Support to capture network services notifications * Internal error with period overflow * Remove Python 2.6 classifier * Enable pep8 on ./tools directory * Imported Translations from Transifex * Fixes Hyper-V Inspector disk metrics cache issue * fix swift middleware parsing * Fix order of arguments in assertEqual * Updated from global requirements * Adapting pylint runner to the new message format * Validate AdvEnum & return an InvalidInput on error * add sahara and heat events * add keystone events to definitions * Add timeout to all http requests * [MongoDB] Refactor time to live feature * transform samples only when transformers exist * Updated from global requirements * Remove module not really used by Ceilometer * Switch to oslo.concurrency * Skip to poll and publish when no resources found * Change event type for identity trust notifications * Add mysql and postgresql in tox for debug env * Add new notifications types for volumes/snapshots * Add encoding to keys in compute_signature * Tests for system and network aggregate pollsters * Add bandwidth to measurements * Fix wrong example of capabilities * Correct the mongodb_replica_set option's description * Alarms listing based on "timestamp" * Use 'pg_ctl' utility to start and stop database * Correct alarm timestamp field in unittest code * Refactor kwapi unit test * Remove duplicated config doc * VMware: Enable VMware inspector to support any port * Clean event method difinition in meter storage base * Fix some nits or typos found by chance * Add Sample ReST API path in webapi document * Enable filter alarms by their type * Fix storage.hbase.util.prepare_key() for 32-bits system * Add event storage for test_hbase_table_utils * Add per device rate metrics for instances * Fix hacking rule H305 imports not grouped correctly * Add __repr__ method for sample.Sample * remove ordereddict requirement * Improve manual.rst file * Imported Translations from Transifex * Fix columns migrating for PostgreSQL * Updated from global requirements * Updated from global requirements * [MongoDB] Fix bug with reconnection to new master node * Updated from global requirements * support request-id * Update coverage job to references correct file * remove reference to model in migration * Use oslo_debug_helper and remove our own version * Allow collector service database connection retry * refresh ceilometer architecture documentation * Edits assert methods * Adds memory stats meter to libvirt inspector * Edits assert methods * Edits assert methods * Edits assert methods * Edits assert method * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * add script to generate test event data * Handle poorly formed individual sensor readings * refactor hbase storage code * Avoid clobbering existing class definition * Hoist duplicated AlarmService initialization to super * Clarify deprecation comment to be accurate * Work toward Python 3.4 support and testing 2014.2 ------ * Fix recording failure for system pollster * sync and clean up oslo * Add missing notification options to the documentation * Add missing alarm options to the documentation * Add oslo.db to config generator * Add missed control exchange options to the documentation * Add coordination related options to the documentation * Add missing collector options to the documentation * switch to oslo-config-generator * Edit docs for docs.opentack.org/developer/ * Add oslo.db to config generator * Fix signature validation failure when using qpid message queue * clean capabilities * move db2 and mongo driver to event tree * move sql event driver to event tree * move hbase event driver to event tree * Sets default encoding for PostgreSQL testing * update database dispatcher to use events db * Add role assignment notifications for identity * add mailmap to avoid dup of authors * Add user_metadata to network samples * Fix recording failure for system pollster 2014.2.rc2 ---------- * Manually updated translations * Updated from global requirements * Creates one database per sql test * Adds pylint check for critical error in new patches * Fix neutron client to catch 404 exceptions * Fix OrderedDict usage for Python 2.6 * Include a 'node' key and value in ipmi metadata * clean path in swift middleware * Implement redesigned separator in names of columns in HBase * [HBase] Add migration script for new row separate design * Imported Translations from Transifex * Include a 'node' key and value in ipmi metadata * Updated from global requirements * Run unit tests against PostgreSQL * create skeleton files for event storage backends * Imported Translations from Transifex * isolate event storage models * Fix neutron client to catch 404 exceptions * Run unit tests against MySQL * Updated from global requirements * Correct JSON-based query examples in documentation * Open Kilo development * Add cfg.CONF.import_group for service_credentials * Fix OrderedDict usage for Python 2.6 * clean path in swift middleware 2014.2.rc1 ---------- * Partition static resources defined in pipeline.yaml * Per-source separation of static resources & discovery * dbsync: Acknowledge 'metering_connection' option * Fix bug in the documentation * Use oslo.msg retry API in rpc publisher * Describe API versions * Change compute agent recurring logs from INFO to DEBUG * Fix bug with wrong bool opt value interpolation * [HBase] Improves speed of unit tests on real HBase backend * Imported Translations from Transifex * Removed unused abc meta class * update references to auth_token middleware * clean up swift middleware to avoid unicode errors * [HBase] Catch AlreadyExists error in Connection upgrade * Use None instead of mutables in method params default values * Updated from global requirements * Enable to get service types from configuration file * test db2 driver code * Docs: Add description of pipeline discovery section * Typo "possibilites" should be "possibilities" * Modified docs to update DevStack's config filename * Add an API configuration section to docs * Tune up mod_wsgi settings in example configuration * Allow pecan debug middleware to be turned off * Provide __repr__ for SampleFilter * Eliminate unnecessary search for test cases * Switch to a custom NotImplementedError * minimise ceilometer memory usage * Partition swift pollster resources by tenant * Add IPMI pollster * Add IPMI support * Stop using intersphinx * Use central agent manager's keystone token in discoveries * Handle invalid JSON filters from the input gracefully * Sync jsonutils for namedtuple_as_object fix * ceilometer spamming syslog * Timestamp bounds need not be tight (per ceilometer 1288372) * Allow to pass dict from resource discovery * fix network discovery meters * switch to sqlalchemy core * Imported Translations from Transifex * Improve the timestamp validation of ceilometer API * Update docs with Sahara notifications configuration * Migrate the rest of the central agent pollsters to use discoveries * Add documentation for implemented identity meters * Fix tests with testtools>=0.9.39 * Document the standard for PaaS service notifications * Returns 401 when unauthorized project access occurs * Adding another set of hardware metrics * normalise resource data 2014.2.b3 --------- * warn against sorting requirements * Add validate alarm_actions schema in alarm API * Fix help strings * Imported Translations from Transifex * Switch partitioned alarm evaluation to a hash-based approach * Central agent work-load partitioning * collector: Allows to requeue a sample * Typo fixed * Switch to oslo.serialization * Document pipeline publishers configuration * Alarm: Use stevedore to load the service class * Enhance compute diskio tests to handle multi instance * Adding comparison operators in query for event traits * XenAPI support: Update measurements documentation * update requirements * add documentation for setting up api pipeline * Permit usage of notifications for metering * XenAPI support: Disk rates * XenAPI support: Changes for networking metrics * XenAPI support: Memory Usage * XenAPI support: Changes for cpu_util * XenAPI support: List the instances * Rebase hardware pollsters to use new inspector interface * Switch to use oslo.db * Remove oslo middleware * Adding quotas on alarms * Add an exchange for Trove in profiler notification plugin * Simplify chained comparisons * In-code comments should start with `#`, not with `"""` * Remove redundant parentheses * skip polls if service is not registered * re-add hashseed to avoid gate error * Switch to oslo.utils * Switch to oslotest * Handle sqlalchemy connection strings with drivers * Rewrite list creation as a list literal * Rewrite dictionary creation as a dictionary literal * Triple double-quoted strings should be used for docstrings * Add upgrading alarm storage in dbsync * Improving of configuration.rst * Fix typos in transformer docstrings * Update tox.ini pep8 config to ignore i18n functions * Added new hardware inspector interface * compute: fix wrong test assertion * sync olso-incubator code * VMware: Support secret host_password option * refactor filter code in sql backend * Support for per disk volume measurements * Use a FakeRequest object to test middleware * Imported Translations from Transifex * Improve api_paste_config file searching * [Hbase] Add column for source filter in _get_meter_samples * Issue one SQL statement per execute() call * Allow tests to run outside tox * [HBase] Refactor hbase.utils * Set page size when Glance API request is called * Adding init into tools folder * Enhancing the make_test_data script * correct DB2 installation supported features documentation * Avoid duplication of discovery for multi-sink sources * Improve performance of libvirt inspector requests * Documented Stevedore usage and source details * Add notifications for identity authenticate events * Add message translate module in vmware inspector * Handle Cinder attach and detach notifications * [HBase] Improve uniqueness for row in meter table * Doc enhancement for API service deployment with mod_wsgi * Update documentation for new transformer * Add the arithmetic transformer endpoint to setup.cfg * Imported Translations from Transifex * Fix unit for vpn connection metric * Debug env for tox * Change spelling mistakes * Use auth_token from keystonemiddleware * Fix dict and set order related issues in tests * Fix listener for update.start notifications * Sahara integration with Ceilometer * Add notifications for identity CRUD events * Extracting make_resource_metadata method * Fix make_test_data tools script * Add cumulative and gauge to aggregator transformer * Enable some tests against py33 * Remove --tmpdir from mktemp * Replace dict.iteritems() with six.iteritems(dict) * Replace iterator.next() with next(iterator) * Fix aggregator flush method * Automatic discovery of TripleO Overcloud hardware * Set python hash seed to 0 in tox.ini * Don't override the original notification message * Remove ConnectionProxy temporary class * Move sqlalchemy alarms driver code to alarm tree * basestring replaced with six.string_types * Correct misspelled words 2014.2.b2 --------- * Add retry function for alarm REST notifier * Move hbase alarms driver code to alarm tree * Update measurement docs for FWaaS * Update measurement docs for VPNaaS * Follow up fixes to network services pollsters * Updated from global requirements * Implement consuming ipmi notifications from Ironic * Support for metering FWaaS * Adds Content-Type to alarm REST notifier * Multi meter arithmetic transformer * Remove redudent space in doc string * Use None instead of mutables in test method params defaults * Add support for metering VPNaaS * Use resource discovery for Network Services * Change of get_events and get_traits method in MongoDB and Hbase * Fix two out-dated links in doc * Move log alarms driver code to alarm tree * Separate the console scripts * clean up event model * improve expirer performance for sql backend * Move mongodb/db2 alarms driver code to alarm tree * Allow to have different DB for alarm and metering * Replace datetime of time_constraints by aware object * Sync oslo log module and its dependencies * Use hmac.compare_digest to compare signature * Add testcase for multiple discovery-driven sources * Fixes aggregator transformer timestamp and user input handling * Improves pipeline transformer documentation * Fix incorrect use of timestamp in test * Add keystone control exchange * Fix call to meter-list in measurements doc * Remove redundant parentheses * [Mongodb] Implement events on Mongodb and DB2 * Fix typos in code comments & docstrings * Make the error message of alarm-not-found clear * Fix SQL exception getting statitics with metaquery * Remove docutils pin * update default_log_levels set by ceilometer * Fix annoying typo in partition coordinator test * Transform sample_cnt type to int * Remove useless sources.json * Fix H405 violations and re-enable gating * Fix H904 violations and re-enable gating * Fix H307 violations and re-enable gating * Fix the section name in CONTRIBUTING.rst * Added osprofiler notifications plugin * Improve a bit performance of Ceilometer * Revert "Align to openstack python package index mirror" * Fix aggregator _get_unique_key method * Remove meter hardware.network.bandwidth.bytes * Fix F402 violations and re-enable gating * Fix E265 violations and re-enable gating * Fix E251 violations and re-enable gating * Fix E128 violations and re-enable gating * Fix E126,H104 violations and re-enable gating * Bump hacking to 0.9.x * Fixed various import issues exposed by unittest * use urlparse from six * clean up sample index * Fix HBase available capabilities list * Updated from global requirements * VMware:Update the ceilometer doc with VMware opts * Handle non-ascii character in meter name * Add log output of "x-openstack-request-id" from nova * Imported Translations from Transifex * fix StringIO errors in unit test * Fix hacking rule 302 and enable it * Imported Translations from Transifex * sync oslo code * Fixes ceilometer-compute service start failure * Reenables the testr per test timeout * Avoid reading real config files in unit test * Clean up oslo.middleware.{audit,notifier} * Use hacking from test-requirements * Splits hbase storage code base * Splits mongo storage code base * Separate alarm storage models from other models * Iterates swift response earlier to get the correct status * Fix messaging.get_transport caching * Fix method mocked in a test * Don't keep a single global TRANSPORT object * Clean up .gitignore * Fix Sphinx directive name in session.py * Fix list of modules not included in auto-gen docs * Downgrade publisher logging to debug level again 2014.2.b1 --------- * remove default=None for config options * [HBase] get_resource optimization * Fix incorrect trait initialization * Remove unused logging in tests * Revert "Fix the floatingip pollster" * Remove low-value logging from publication codepath * Fix LBaaS connection meter docs * Fix the meter type for LB Bytes * Adding alarm list filtering by state and meter * Adds caches for image and flavor in compute agent * [HBase] Implement events on HBase * Skipping central agent pollster when keystone not available * Respect $TMPDIR environment variable to run tests * Fixed unit test TestRealNotification * Update Measurement Docs for LBaaS * Metering LoadBalancer as a Service * Removes per test testr timeout * Change pipeline_manager to instance attribute in hooks * Change using of limit argument in get_sample * Refactor tests to remove direct access to test DBManagers * Fix notification for NotImplemented record_events * Add missing explicit cfg option import * Fix ceilometer.alarm.notifier.trust import * Use TYPE_GAUGE rather than TYPE_CUMULATIVE * Update doc for sample config file issue * Corrects a flaw in the treatment of swift endpoints * use LOG instead of logger as name for the Logger object * Fix doc gate job false success * Improve performance of api requests with hbase scan * Add new 'storage': {'production_ready': True} capability * Clean tox.ini * Remove (c) and remove unnecessary encoding lines * Fix testing gate due to new keystoneclient release * Ignore the generated file ceilometer.conf.sample * Update the copyright date in doc * Updated from global requirements * reconnect to mongodb on connection failure * refactor sql backend to improve write speed * Don't rely on oslomsg configuration options * replaced unicode() with six.text_type() * Synced jsonutils from oslo-incubator * Fix the floatingip pollster * Fix project authorization check * Update testrepository configuration * Implemented metering for Cinder's snapshots * Use joins instead of subqueries for metadata filtering * Use None instead of mutables in method params defaults * Remove all mostly untranslated PO files * switch SplitResult to use six * Remove unused db code due to api v1 drop * Updated from global requirements * oslo.messaging context must be a dict * Drop deprecated api v1 * Fix network notifications of neutron bulk creation * mongo: remove _id in inserted alarm changes * Clean up openstack-common.conf * Revert "oslo.messaging context must be a dict" * Correct class when stopping partitioned alarm eval svc * oslo.messaging context must be a dict * Corrections of spelling, rephrasing for clarity * Adapt failing tests for latest wsme version * Removed StorageEngine class and it's hierarchy * Correcting formatting and adding period in measurement doc * Initialize dispatcher manager in event endpoint * Replaced CONF object with url in storage engine creation * Synced jsonutils from oslo-incubator * Remove gettextutils._ imports where they are not used * Remove "# noqa" leftovers for gettextutils._ * transformer: Add aggregator transformer * Remove conversion debug message * Fix the return of statistic with getting no sample * Remove eventlet.sleep(0) in collector tests * Don't allow queries with 'IN' predicate with an empty sequence * Check if samples returned by get_sample_data are not None * Opencontrail network statistics driver * Add a alarm notification using trusts * Replace hard coded WSGI application creation * Describe storage backends in the collector installation guide * Made get_capabilities a classmethod instead of object method * Disable reverse dns lookup * Consume notif. from multiple message bus * Use NotificationPlugin as an oslo.msg endpoint * Improve combination rule validation * Remove ceilometer.conf.sample * Use known protocol scheme in keystone tests * cleanup virt pollster code * Add encoding argument to deserialising udp packets in collector * Made get_engine method module-private * Make entities (Resource, User, Project) able to store lists * Remove duplicate alarm from alarm_ids * More accurate meter name and unit for host load averages * Replace oslo.rpc by oslo.messaging * Fix a response header bug in the error middleware * Remove unnecessary escape character in string format * Optimize checks to set image properties in metadata * fix statistics query in postgres * Removed useless code from __init__ method * Refactored fake connection URL classes * Replace assert statements with assert methods * Removes direct access of timeutils.override_time * Disable specifying alarm itself in combination rule * Include instance state in metadata * Allowed nested resource metadata in POST'd samples * Sync oslo-incubator code * Updated from global requirements * Refactor the DB implementation of Capabilities API * Fix Jenkins translation jobs * Align to openstack python package index mirror * User a more accurate max_delay for reconnects * Open Juno development 2014.1.rc1 ---------- * Imported Translations from Transifex * Add note on aggregate duplication to API docco * Use ConectionPool instead of one Connection in HBase * remove dump tables from previous migrations * De-dupe selectable aggregate list in statistics API * ensure dispatcher service is configured before rpc * improve performance of resource-list in sql * SSL errors thrown with Postgres on multi workers * Remove escape character in string format * Verify user/project ID for alarm created by non-admin user * enable a single worker by default * Fix ceilometer.conf.sample mismatch * Metadata in compute.instance.exists fix * Fix order of arguments in assertEquals * Documenting hypervisor support for nova meters * Ensure idempotency of cardinality reduction in mongo * VMware vSphere: Improve the accuracy of queried samples * Use swob instead of webob in swift unit tests * Disable oslo.messaging debug logs * Fix validation error for invalid field name in simple query * fix create_or_update logic to avoid rollbacks * Avoid swallowing AssertionError in test skipping logic * Fix hardware pollster to inspect multiple resources * spawn multiple workers in services * Install global lazy _() * Fixes Hyper-V metrics units * Ensure intended indices on project_id are created for mongo * Fix the type of the disk IO rate measurements * Change the sample_type from tuple to string * Fix order of arguments in assertEquals * Ensure alarm rule conform to alarm type * insecure flag added to novaclient * Fixes duplicated names in alarm time constraints * Use the list when get information from libvirt * Eventlet monkeypatch must be done before anything * 028 migration script incorrectly skips over section * Fix bug in get_capabilities behavior in DB drivers * Added documentation for selectable aggregates * Make sure use IPv6 sockets for ceilometer in IPv6 environment * VMware vSphere: Bug fixes * Ensure insecure config option propagated by alarm evaluator * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Rationalize get_resources for mongodb * Ensure insecure config option propagated by alarm service * add host meters to doc * Add field translation to complex query from OldSample to Sample * Extend test case to cover old alarm style conversion * Updated doc with debug instructions * Refactored the way how testscenarios tests are run * Corrected the sample names in hardware pollsters * Prevent alarm_id in query field of getting history * Make ceilometer work with sqla 0.9.x * Implements monitoring-network-from-opendaylight * Add user-supplied arguments in log_handler * VMware vSphere support: Disk rates * Fix updating alarm can specify existing alarm name * Changes for networking metrics support for vSphere * VMware vSphere: Changes for cpu_util * VMware vSphere support: Memory Usage * Fix broken statistics in sqlalchemy * Fixes Hyper-V Inspector network metrics values * Set storage engine for the trait_type table * Enable monkeypatch for select module * Rename id to alarm_id of Alarm in SqlAlchemy * Fix some spelling mistakes and a incorrect url * Skip central agent interval_task when keystone fails 2014.1.b3 --------- * Ensure user metadata mapped for instance notifications * Per pipeline pluggable resource discovery * Wider selection of aggregates for sqlalchemy * Wider selection of aggregates for mongodb * Adds time constraints to alarms * Remove code duplication Part 3 * Decouple source and sink configuration for pipelines * Selectable aggregate support in mongodb * Selectable aggregation functions for statistics * Add simple capabilities API * Removed global state modification by api test * VMware vSphere support: Performance Mgr APIs * Fix typo * move databases to test requirements * Make recording and scanning data more determined * Implements "not" operator for complex query * Implements metadata query for complex query feature * Alarms support in HBase Part 2 * Alarm support in HBase Part 1 * Remove unused variable * Added hardware pollsters for the central agent * Added hardware agent's inspector and snmp implementation * Updated from global requirements * Pluggable resource discovery for agents * Remove code duplication Part 2 * Imported Translations from Transifex * remove audit logging on flush * Tolerate absent recorded_at on older mongo/db2 samples * api: export recorded_at in returned samples * Fix the way how metadata is stored in HBase * Set default log level of iso8601 to WARN * Sync latest config file generator from oslo-incubator * Fix typo on testing doc page * Remove code duplication * sample table contains redundant/duplicate data * rename meter table to sample * storage: store recording timestamp * Fixed spelling error in Ceilometer * Adds doc string to query validate functions in V2 API * Updated from global requirements * Remove code that works around a (now-resolved) bug in pecan * Fix missing source field content on /v2/samples API * Refactor timestamp existence validation in V2 API * Use the module units to refer bytes type * sync units.py from oslo to ceilometer * Add comments for _build_paginate_query * Implements monitoring-network * Handle Heat notifications for stack CRUD * Alembic migrations not tested * Modify the discription of combination alarm * check domain state before inspecting nics/disks * Adds gettextutils module in converter * Keep py3.X compatibility for urllib.urlencode * Added missing import * Removed useless prints that pollute tests log * Implements in operator for complex query functionality * Implements field validation for complex query functionality * allow hacking to set dependencies * Implements complex query functionality for alarm history * Implements complex query functionality for alarms * Remove None for dict.get() * Replace assertEqual(None, *) with assertIsNone in tests * Update notification_driver * Switch over to oslosphinx * Fix some flaws in ceilometer docstrings * Rename Openstack to OpenStack * Remove start index 0 in range() * Updated from global requirements * Remove blank line in docstring * Use six.moves.urllib.parse instead of urlparse * Propogate cacert and insecure flags to glanceclient * Test case for creating an alarm without auth headers * Refactored run-tests script * Implements complex query functionality for samples * fix column name and alignment * Remove tox locale overrides * Updated from global requirements * Adds flavor_id in the nova_notifier * Improve help strings * service: re-enable eventlet just for sockets * Fixes invalid key in Neutron notifications * Replace BoundedInt with WSME's IntegerType * Replace 'Ceilometer' by 'Telemetry' in the generated doc * Doc: Add OldSample to v2.rst * Fixing some simple documentation typos * Updated from global requirements * Fix for a simple typo * Replace 'a alarm' by 'an alarm' * Move ceilometer-send-counter to a console script * sync oslo common code * Handle engine creation inside of Connection object * Adds additional details to alarm notifications * Fix formating of compute-nova measurements table * Fix string-to-boolean casting in queries * nova notifier: disable tests + update sample conf * Update oslo * Refactored session access * Fix the py27 failure because of "ephemeral_key_uuid" error * Correct a misuse of RestController in the Event API * Fix docs on what an instance meter represents * Fix measurement docs to correctly represent Existance meters * samples: fix test case status code check * Replace non-ascii symbols in docs * Use swift master * Add table prefix for unit tests with hbase * Add documentation for pipeline configuration * Remove unnecessary code from alarm test * Updated from global requirements * Use stevedore's make_test_instance * use common code for migrations * Use explicit http error code for api v2 * Clean .gitignore * Remove unused db engine variable in api * Revert "Ensure we are not exhausting the sqlalchemy pool" * eventlet: stop monkey patching * Update dev docs to include notification-agent * Change meter_id to meter_name in generated docs * Correct spelling of logger for dispatcher.file * Fix some typos in architecture doc * Drop foreign key contraints of alarm in sqlalchemy * Re-enable lazy translation * Sync gettextutils from Oslo * Fix wrong doc string for meter type * Fix recursive_keypairs output * Added abc.ABCMeta metaclass for abstract classes * Removes use of timeutils.set_time_override 2014.1.b2 --------- * tests: kill all started processes on exit * Exclude weak datapoints from alarm threshold evaluation * Move enable_acl and debug config to ceilometer.conf * Fix the Alarm documentation of Web API V2 * StringIO compatibility for python3 * Set the SQL Float precision * Convert alarm timestamp to PrecisionTimestamp * use six.move.xrange replace xrange * Exit expirer earlier if db-ttl is disabled * Added resources support in pollster's interface * Improve consistency of help strings * assertTrue(isinstance) replace by assertIsInstance * Return trait type from Event api * Add new rate-based disk and network pipelines * Name and unit mapping for rate_of_change transformer * Update oslo * Remove dependencies on pep8, pyflakes and flake8 * Implement the /v2/samples/ API * Fix to handle null threshold_rule values * Use DEFAULT section for dispatcher in doc * Insertion in HBase should be fixed * Trivial typo * Update ceilometer.conf.sample * Fix use the fact that empty sequences are false * Remove unused imports * Replace mongo aggregation with plain ol' map-reduce * Remove redundant meter (name,type,unit) tuples from Resource model * Fix work of udp publisher * tests: pass /dev/null as config for mongod * requirements: drop netaddr * tests: allow to skip if no database URL * Fix to tackle instances without an image assigned * Check for pep8 E226 and E24 * Fixed spelling mistake * AlarmChange definition added to doc/source/webapi/v2.rst * 1st & last sample timestamps in Resource representation * Avoid false negatives on message signature comparison * cacert is not picked up correctly by alarm services * Change endpoint_type parameter * Utilizes assertIsNone and assertIsNotNone * Add missing gettextutils import to ceilometer.storage.base * Remove redundant code in nova_client.Client * Allow customized reseller_prefix in Ceilometer middleware for Swift * Fix broken i18n support * Empty files should no longer contain copyright * Add Event API * Ensure we are not exhausting the sqlalchemy pool * Add new meters for swift * Sync config generator workaround from oslo * storage: factorize not implemented methods * Don't assume alarms are returned in insert order * Correct env variable in file oslo.config.generator.rc * Handle the metrics sent by nova notifier * Add a wadl target to the documentation * Sync config generator from oslo-incubator * Convert event timestamp to PrecisionTimestamp * Add metadata query validation limitation * Ensure the correct error message is displayed * Imported Translations from Transifex * Move sphinxcontrib-httpdomain to test-requirements * Ensure that the user/project exist on alarm update * api: raise ClientSideError rather than ValueError * Implement the /v2/sample API * service: fix service alive checking * Oslo sync to recover from db2 server disconnects * Event Storage Layer * config: specify a template for mktemp * test code should be excluded from test coverage summary * doc: remove note about Nova plugin framework * doc: fix formatting of alarm action types * Updated from global requirements * Add configuration-driven conversion to Events * add newly added constraints to expire clear_expired_metering_data * fix unit * Add import for publisher_rpc option * add more test cases to improve the test code coverage #5 * Create a shared queue for QPID topic consumers * Properly reconnect subscribing clients when QPID broker restarts * Don't need session.flush in context managed by session * sql migration error in 020_add_metadata_tables 2014.1.b1 --------- * Remove rpc service from agent manager * Imported Translations from Transifex * organise requirements files * Add a Trait Type model and db table * No module named MySQLdb bug * Add a note about permissions to ceilometer logging directory * sync with oslo-incubator * Rename OpenStack Metering to OpenStack Telemetry * update docs to adjust for naming change * Add i18n warpping for all LOG messages * Imported Translations from Transifex * Removed unused method in compute agent manger * connection is not close in migration script * Fixed a bug in sql migration script 020 * Fixed nova notifier test * Added resources definition in the pipeline * Change metadata_int's value field to type bigint * Avoid intermittent integrity error on alarm creation * Simplify the dispatcher method prototype * Use map_method from stevedore 0.12 * Remove the collector submodule * Move dispatcher a level up * Split collector * Add a specialized Event Type model and db table * Remove old sqlalchemy-migrate workaround * Revert "Support building wheels (PEP-427)" * full pep8 compliance (part 2) * Selectively import RPC backend retry config * Fixes Hyper-V Inspector disk metrics bug * Imported Translations from Transifex * full pep8 compliance (part1) * Replace mox with mock in alarm,central,image tests * Stop ignoring H506 errors * Update hacking for real * Replace mox with mock in tests.collector * Replace mox with mock in publisher and pipeline * Replace mox with mock in novaclient and compute * Remove useless defined Exception in tests * Support building wheels (PEP-427) * Fixes Hyper-V Inspector cpu metrics bug * Replace mox with mock in tests.storage * Document user-defined metadata for swift samples * Replace mox with mock in energy and objectstore * Updated from global requirements * Replace mox with mock in tests.api.v2 * Refactor API error handling * make record_metering_data concurrency safe * Move tests into ceilometer module * Replace mox with mock in tests.api.v1 * Replace mox with mock in tests.api.v2.test_compute * Corrected import order * Use better predicates from testtools instead of plain assert * Stop using openstack.common.exception * Replace mox with mock in tests.network * Replace mox with mocks in test_inspector * Fix failing nova_tests tests * Replace mox with mocks in tests.compute.pollsters * Add an insecure option for Keystone client * Sync log from oslo * Cleanup tests.publisher tests * mongodb, db2: do not print full URL in logs * Use wsme ClientSideError to handle unicode string * Use consistant cache key for swift pollster * Fix the developer documentation of the alarm API * Fix the default rpc policy value * Allow Events without traits to be returned * Replace tests.base part8 * Replace tests.base part7 * Replace tests.base part6 * Imported Translations from Transifex * Imported Translations from Transifex * Sync log_handler from Oslo * Don't use sqlachemy Metadata as global var * enable sql metadata query * Replace tests.base part5 * Replace tests.base part4 * Imported Translations from Transifex * Updated from global requirements * Fix doc typo in volume meter description * Updated from global requirements * Add source to Resource API object * compute: virt: Fix Instance creation * Fix for get_resources with postgresql * Updated from global requirements * Add tests when admin set alarm owner to its own * Replace tests.base part3 * Replace tests.base part2 * Replace tests.base part1 * Fix wrong using of Metadata in 15,16 migrations * api: update for WSME 0.5b6 compliance * Changes FakeMemcache to set token to expire on utcnow + 5 mins * Change test case get_alarm_history_on_create * Change alarm_history.detail to text type * Add support for keystoneclient 0.4.0 * Ceilometer has no such project-list subcommand * Avoid leaking admin-ness into combination alarms * Updated from global requirements * Avoid leaking admin-ness into threshold-oriented alarms * Update Oslo * Set python-six minimum version * Ensure combination alarms can be evaluated * Ensure combination alarm evaluator can be loaded * Apply six for metaclass * add more test cases to improve the test code coverage #6 * Update python-ceilometerclient lower bound to 1.0.6 * Imported Translations from Transifex * add more test cases to improve the test code coverage #4 2013.2.rc1 ---------- * db2 does not allow None as a key for user_id in user collection * Start Icehouse development * Imported Translations from Transifex * Disable lazy translation * Add notifications for alarm changes * Updated from global requirements * api: allow alarm creation for others project by admins * assertEquals is deprecated, use assertEqual * Imported Translations from Transifex * update alarm service setup in dev doc * Add bug number of some wsme issue * api: remove useless comments * issue an error log when cannot import libvirt * add coverage config file to control module coverage report * tests: fix rounding issue in timestamp comparison * api: return 404 if a alarm is not found * remove locals() for stringformat * add more test cases to improve the test code coverage #3 * Remove extraneous vim configuration comments * Return 401 when action is not authorized * api: return 404 if a resource is not found * keystone client changes in AuthProtocol made our test cases failing * Don't load into alarms evaluators disabled alarms * Remove MANIFEST.in * Allow to get a disabled alarm * Add example with return values in API v2 docs * Avoid imposing alembic 6.0 requirement on all distros * tests: fix places check for timestamp equality * Don't publish samples if resource_id in missing * Require oslo.config 1.2.0 final * Don't send unuseful rpc alarm notification * service: check that timestamps are almost equals * Test the response body when deleting a alarm * Change resource.resource_metadata to text type * Adding region name to service credentials * Fail tests early if mongod is not found * add more test cases to improve the test code coverage #2 * add more test cases to improve the test code coverage #1 * Imported Translations from Transifex * Replace OpenStack LLC with OpenStack Foundation * Use built-in print() instead of print statement * Simple alarm partitioning protocol based on AMQP fanout RPC * Handle manually mandatory field * Provide new API endpoint for alarm state * Implement the combination evaluator * Add alarm combination API * Notify with string representation of alarm reason * Convert BoundedInt value from json into int * Fix for timestamp precision in SQLAlchemy * Add source field to Meter model * Refactor threshold evaluator * Alarm API update * Update requirements * WSME 0.5b5 breaking unit tests * Fix failed downgrade in migrations * refactor db2 get_meter_statistics method to support mongodb and db2 * tests: import pipeline config * Fix a tiny mistake in api doc * collector-udp: use dispatcher rather than storage * Imported Translations from Transifex * Drop sitepackages=False from tox.ini * Update sphinxcontrib-pecanwsme to 0.3 * Architecture enhancements * Force MySQL to use InnoDB/utf8 * Update alembic requirement to 0.6.0 version * Correctly output the sample content in the file publisher * Pecan assuming meter names are extensions * Handle inst not found exceptions in pollsters * Catch exceptions from nova client in poll_and_publish * doc: fix storage backend features status * Add timestamp filtering cases in storage tests * Imported Translations from Transifex * Use global openstack requirements * Add group by statistics examples in API v2 docs * Add docstrings to some methods * add tests for _query_to_kwargs func * validate counter_type when posting samples * Include auth_token middleware in sample config * Update config generator * run-tests: fix MongoDB start wait * Imported Translations from Transifex * Fix handling of bad paths in Swift middleware * Drop the *.create.start notification for Neutron * Make the Swift-related doc more explicit * Fix to return latest resource metadata * Update the high level architecture * Alarm history storage implementation for sqlalchemy * Improve libvirt vnic parsing with missing mac! * Handle missing libvirt vnic targets! * Make type guessing for query args more robust * add MAINTAINERS file * nova_notifier: fix tests * Update openstack.common.policy from oslo-incubator * Clean-ups related to alarm history patches * Improved MongoClient pooling to avoid out of connections error * Disable the pymongo pooling feature for tests * Fix wrong migrations * Fixed nova notifier unit test * Add group by statistics in API v2 * Update to tox 1.6 and setup.py develop * Add query support to alarm history API * Reject duplicate events * Fixes a bug in Kwapi pollster * alarm api: rename counter_name to meter_name * Fixes service startup issue on Windows * Handle volume.resize.* notifications * Network: process metering reports from Neutron * Alarm history storage implementation for mongodb * Fix migration with fkeys * Fixes two typos in this measurements.rst * Add a fake UUID to Meter on API level * Append /usr/sbin:/sbin to the path for searching mongodb * Plug alarm history logic into the API * Added upper version boundry for six * db2 distinct call results are different from mongodb call * Sync rpc from oslo-incubator * Imported Translations from Transifex * Add pagination parameter to the database backends of storage * Base Alarm history persistence model * Fix empty metadata issue of instance * alarm: generate alarm_id in API * Import middleware from Oslo * Imported Translations from Transifex * Adds group by statistics for MongoDB driver * Fix wrong UniqueConstraint name * Adds else and TODO in statistics storage tests * Imported Translations from Transifex * Extra indexes cleanup * API FunctionalTest class lacks doc strings * install manual last few sections format needs to be fixed * api: update v1 for Flask >= 0.10 * Use system locale when Accept-Language header is not provided * Adds Hyper-V compute inspector * missing resource in middleware notification * Support for wildcard in pipeline * Refactored storage tests to use testscenarios * doc: replace GitHub by git.openstack.org * api: allow usage of resource_metadata in query * Remove useless doc/requirements * Fixes non-string metadata query issue * rpc: reduce sleep time * Move sqlachemy tests only in test_impl_sqlachemy * Raise Error when pagination/groupby is missing * Raise Error when pagination support is missing * Use timeutils.utcnow in alarm threshold evaluation * db2 support * plugin: remove is_enabled * Doc: improve doc about Nova measurements * Storing events via dispatchers * Imported Translations from Transifex * ceilometer-agent-compute did not catch exception for disk error * Change counter to sample in network tests * Change counter to sample in objectstore tests * Remove no more used code in test_notifier * Change counter to sample vocable in cm.transformer * Change counter to sample vocable in cm.publisher * Change counter to sample vocable in cm.image * Change counter to sample vocable in cm.compute * Change counter to sample vocable in cm.energy * Use samples vocable in cm.publisher.test * Change counter to sample vocable in volume tests * Change counter to sample vocable in api tests * Add the source=None to from_notification * Make RPCPublisher flush method threadsafe * Enhance delayed message translation when _ is imported * Remove use_greenlets argument to MongoClient * Enable concurrency on nova notifier tests * Imported Translations from Transifex * Close database connection for alembic env * Fix typo in 17738166b91 migration * Don't call publisher without sample * message_id is not allowed to be submitted via api * Api V2 post sample refactoring * Add SQLAlchemy implementation of groupby * Fixes failed notification when deleting instance * Reinitialize pipeline manager for service restart * Sync gettextutils from oslo-incubator * Doc: clearly state that one can filter on metadata * Add HTTP request/reply samples * Use new olso fixture in CM tests * Imported Translations from Transifex * Bump hacking to 0.7.0 * Fix the dict type metadata missing issue * Raise error when period with negative value * Imported Translations from Transifex * Import missing gettext _ * Remove 'counter' occurences in pipeline * Remove the mongo auth warning during tests * Change the error message of resource listing in mongodb * Change test_post_alarm case in test_alarm_scenarios * Skeletal alarm history API * Reorg alarms controller to facilitate history API * Fix Jenkins failed due to missing _ * Fix nova test_notifier wrt new notifier API * Remove counter occurences from documentation * Updated from global requirements * Fixes dict metadata query issue of HBase * s/alarm/alarm_id/ in alarm notification * Remove unused abstract class definitions * Removed unused self.counters in storage test class * Initial alarming documentation * Include previous state in alarm notification * Consume notification from the default queue * Change meter.resource_metadata column type * Remove MongoDB TTL support for MongoDB < 2.2 * Add first and last sample timestamp * Use MongoDB aggregate to get resources list * Fix resources/meters pagination test * Handle more Nova and Neutron events * Add support for API message localization * Add the alarm id to the rest notifier body * fix alarm notifier tests * Sync gettextutils from oslo * Fix generating coverage on MacOSX * Use the new nova Instance class * Return message_id in POSTed samples * rpc: remove source argument from message conversion * Remove source as a publisher argument * Add repeat_actions to alarm * Rename get_counters to get_samples * Add pagination support for MongoDB * Doc: measurements: add doc on Cinder/Swift config * Update nova_client.py * objectstore: trivial cleanup in _Base * Add support for CA authentication in Keystone * add unit attribute to statistics * Fix notify method signature on LogAlarmNotifier * Fix transformer's LOG TypeError * Update openstack.common * Fixes Hbase metadata query return wrong result * Fix Hacking 0.6 warnings * Make middleware.py Python 2.6 compatible * Call alembic migrations after sqlalchemy-migrate * Rename ceilometer.counter to ceilometer.sample * Added separate MongoDB database for each test * Relax OpenStack upper capping of client versions * Refactored MongoDB connection pool to use weakrefs * Centralized backends tests scenarios in one place * Added tests to verify that local time is correctly handled * Refactored impl_mongodb to use full connection url * calling distinct on _id field against a collection is slow * Use configured endpoint_type everywhere * Allow use of local conductor * Update nova configuration doc to use notify_on_state_change * doc: how to inject user-defined data * Add documentation on nova user defined metadata * Refactored API V2 tests to use testscenarios * Refactored API V1 tests to use testscenarios * alarm: Per user setting to disable ssl verify * alarm: Global setting to disable ssl verification * Imported Translations from Transifex * Implementation of the alarm RPCAlarmNotifier * Always init cfg.CONF before running a test * Sets storage_conn in CollectorService * Remove replace/preserve logic from rate of change transformer * storage: remove per-driver options * hbase: do not register table_prefix as a global option * mongodb: do not set replica_set as a global option * Change nose to testr in the documentation * Fixed timestamp creation in MongoDB mapreduce * Ensure url is a string for requests.post * Implement a https:// in REST alarm notification * Implement dot in matching_metadata key for mongodb * trailing slash in url causes 404 error * Fix missing foreign keys * Add cleanup migration for indexes * Sync models with migrations * Avoid dropping cpu_util for multiple instances * doc: /statistics fields are not queryable (you cannot filter on them) * fix resource_metadata failure missing image data * Standardize on X-Project-Id over X-Tenant-Id * Default to ctx user/project ID in sample POST API * Multiple dispatcher enablement * storage: fix clear/upgrade order * Lose weight for Ceilometer log in verbose mode * publisher.rpc: queing policies * Remove useless mongodb connection pool comment * Add index for db.meter by descending timestamp * doc: add a bunch of functional examples for the API * api: build the storage connection once and for all * Fix the argument of UnknownArgument exception * make publisher procedure call configurable * Disable mongod prealloc, wait for it to start * Added alembic migrations * Allow to enable time to live on metering sample * Implement a basic REST alarm notification * Imported Translations from Transifex * Ensure correct return code of run-tests.sh * File based publisher * Unset OS_xx variable before generate configuration * Use run-tests.sh for tox coverage tests * Emit cpu_util from transformer instead of pollster * Allow simpler scale exprs in transformer.conversions * Use a real MongoDB instance to run unit tests * Allow to specify the endpoint type to use * Rename README.md to README.rst * Use correct hostname to get instances * Provide CPU number as additional metadata * Remove get_counter_names from the pollster plugins * Sync SQLAlchemy models with migrations * Transformer to measure rate of change * Make sure plugins are named after their meters * Break up the swift pollsters * Split up the glance pollsters * Make visual coding style consistent * Separate power and energy pollsters * Break up compute pollsters * Implement a basic alarm notification service * Optionally store Events in Collector * Fix issue with pip installing oslo.config-1.2.0 * Transformer to convert between units * publisher.rpc: make per counter topic optional * ceilometer tests need to be enabled/cleaned * Also accept timeout parameter in FakeMemCache * Fix MongoDB backward compat wrt units * Use oslo.sphinx and remove local copy of doc theme * Reference setuptools and not distribute * enable v2 api hbase tests * Register all interesting events * Unify Counter generation from notifications * doc: enhance v2 examples * Update glossary * Imported Translations from Transifex * Imported Translations from Transifex * Filter query op:gt does not work as expected * sqlalchemy: fix performance issue on get_meters() * enable v2 api sqlalchemy tests * Update compute vnic pollster to use cache * Update compute CPU pollster to use cache * Update compute disk I/O pollster to use cache * update Quantum references to Neutron * Update swift pollster to use cache * Update kwapi pollster to use cache * Update floating-ip pollster to use cache * Update glance pollster to use cache * Add pollster data cache * Fix flake8 errors * Update Oslo * Enable Ceilometer to support mongodb replication set * Fix return error when resource can't be found * Simple service for singleton threshold eval * Basic alarm threshold evaluation logic * add metadata to nova_client results * Bring in oslo-common rpc ack() changes * Pin the keystone client version * Fix auth logic for PUT /v2/alarms * Imported Translations from Transifex * Change period type in alarms API to int * mongodb: fix limit value not being an integer * Check that the config file sample is always up to date * api: enable v2 tests on SQLAlchemy & HBase * Remove useless periodic_interval option * doc: be more explicit about network counters * Capture instance metadata in reserved namespace * Imported Translations from Transifex * pep8: enable E125 checks * pep8: enable F403 checks * pep8: enable H302 checks * pep8: enable H304 checks * pep8: enable H401 * pep8: enable H402 checks * Rename the MeterPublisher to RPCPublisher * Replace publisher name by URL * Enable pep8 H403 checks * Activate H404 checks * Ceilometer may generate wrong format swift url in some situations * Code cleanup * Update Oslo * Use Flake8 gating for bin/ceilometer-* * Update requirements to fix devstack installation * Update to the latest stevedore * Start gating on H703 * Remove disabled_notification_listeners option * Remove disabled_compute_pollsters option * Remove disabled_central_pollsters option * Longer string columns for Trait and UniqueNames * Fix nova notifier tests * pipeline: switch publisher loading model to driver * Enforce reverse time-order for sample return * Remove explicit distribute depend * Use Python 3.x compatible octal literals * Improve Python 3.x compatibility * Fix requirements * Corrected path for test requirements in docs * Fix some typo in documentation * Add instance_scheduled in entry points * fix session connection * Remove useless imports, reenable F401 checks * service: run common initialization stuff * Use console scripts for ceilometer-api * Use console scripts for ceilometer-dbsync * Use console scripts for ceilometer-agent-compute * Use console scripts for ceilometer-agent-central * agent-central: use CONF.import_opt rather than import * Move os_* options into a group * Use console scripts for ceilometer-collector * sqlalchemy: migration error when running db-sync * session flushing error * api: add limit parameters to meters * python3: Introduce py33 to tox.ini * Start to use Hacking * Session does not use ceilometer.conf's database_connection * Add support for limiting the number of samples returned * Imported Translations from Transifex * Add support policy to installation instructions * sql: fix 003 downgrade * service: remove useless PeriodicService class * Fix nova notifier tests * Explicitly set downloadcache in tox.ini * Imported Translations from Transifex 2013.2.b1 --------- * Switch to sphinxcontrib-pecanwsme for API docs * Update oslo, use new configuration generator * doc: fix hyphens instead of underscores for 'os*' conf options * Allow specifying a listen IP * Log configuration values on API startup * Don't use pecan to configure logging * Mark sensitive config options as secret * Imported Translations from Transifex * ImagePollster record duplicate counter during one poll * Rename requires files to standard names * Add an UDP publisher and receiver * hbase metaquery support * Imported Translations from Transifex * Fix and update extract_opts group extraction * Fix the sample name of 'resource_metadata' * Added missing source variable in storage drivers * Add Event methods to db api * vnics: don't presume existence of filterref/filter * force the test path to a str (sometimes is unicode) * Make sure that v2 api tests have the policy file configured * Imported Translations from Transifex * setup.cfg misses swift filter * Add a counter for instance scheduling * Move recursive_keypairs into utils * Replace nose with testr * Use fixtures in the tests * fix compute units in measurement doc * Allow suppression of v1 API * Restore default interval * Change from unittest to testtools * remove unused tests/skip module * Imported Translations from Transifex * Get all tests to use tests.base.TestCase * Allow just a bit longer to wait for the server to startup * Document keystone_authtoken section * Restore test dependency on Ming * Set the default pipline config file for tests * Imported Translations from Transifex * Fix cross-document references * Fix config setting references in API tests * Restrict pep8 & co to pep8 target * Fix meter_publisher in setup.cfg * Use flake8 instead of pep8 * Imported Translations from Transifex * Use sqlalchemy session code from oslo * Switch to pbr * fix the broken ceilometer.conf.sample link * Add a direct Ceilometer notifier * Do the same auth checks in the v2 API as in the v1 API * Add the sqlalchemy implementation of the alarms collection * Allow posting samples via the rest API (v2) * Updated the ceilometer.conf.sample * Don't use trivial alarm_id's like "1" in the test cases * Fix the nova notifier tests after a nova rename * Document HBase configuration * alarm: fix MongoDB alarm id * Use jsonutils instead of json in test/api.py * Connect the Alarm API to the db * Add the mongo implementation of alarms collection * Move meter signature computing into meter_publish * Update WSME dependency * Imported Translations from Transifex * Add Alarm DB API and models * Imported Translations from Transifex * Remove "extras" again * add links to return values from API methods * Modify limitation on request version * Doc improvements * Rename EventFilter to SampleFilter * Fixes AttributeError of FloatingIPPollster * Add just the most minimal alarm API * Update oslo before bringing in exceptions * Enumerate the meter type in the API Meter class * Remove "extras" as it is not used * Adds examples of CLI and API queries to the V2 documentation * Measurements documentation update * update the ceilometer.conf.sample * Set hbase table_prefix default to None * glance/cinder/quantum counter units are not accurate/consistent * Add some recommendations about database * Pin SQLAlchemy to 0.7.x * Ceilometer configuration.rst file not using right param names for logging * Fix require_map_reduce mim import * Extend swift middleware to collect number of requests * instances: fix counter unit * Remove Folsom support * transformer, publisher: move down base plugin classes * pipeline, publisher, transformer: reorganize code * Fix tests after nova changes * Update to the lastest loopingcall from oslo * Imported Translations from Transifex * update devstack instructions for cinder * Update openstack.common * Reformat openstack-common.conf * storage: move nose out of global imports * storage: get rid of get_event_interval * Remove gettext.install from ceilometer/__init__.py * Prepare for future i18n use of _() in nova notifier * Update part of openstack.common * Convert storage drivers to return models * Adpated to nova's gettext changes * add v2 query examples * storage: remove get_volume_sum and get_volume_max * api: run tests against HBase too * api: run sum unit tests against SQL backend too * Split and fix live db tests * Remove impl_test * api: run max_resource_volume test on SQL backend * Refactor DB tests * fix volume tests to utilize VOLUME_DELETE notification * Open havana development, bump to 2013.2 2013.1 ------ * Change the column counter_volume to Float * tests: disable Ming test if Ming unavailable * Imported Translations from Transifex * enable arguments in tox * api: run max_volume tests on SQL backend too * api: run list_sources tests on SQL and Mongo backend * api: run list_resources test against SQL * api: handle case where metadata is None * Fix statistics period computing with start/end time * Allow publishing arbitrary headers via the "storage.objects.*.bytes" counter * Updated the description of get_counters routine * enable xml error message response * Swift pollster silently return no counter if keystone endpoint is not present * Try to get rid of the "events" & "raw events" naming in the code * Switch to python-keystoneclient 0.2.3 * include a copy of the ASL 2.0 * add keystone configuration instructions to manual install docs * Update openstack.common * remove unused dependencies * Set the default_log_levels to include keystoneclient * Switch to final 1.1.0 oslo.config release * Add deprecation warnings for V1 API * Raise stevedore requirement to 0.7 * Fixed the blocking unittest issues * Fix a pep/hacking error in a swift import * Add sample configuration files for mod_wsgi * Add a tox target for building documentation * Use a non-standard port for the test server * Ensure the statistics are sorted * Start both v1 and v2 api from one daemon * Handle missing units values in mongodb data * Imported Translations from Transifex * Make HACKING compliant * Update manual installation instructions * Fix oslo.config and unittest * Return something sane from the log impl * Fix an invalid test in the storage test suite * Add the etc directory to the sdist manifest * api: run compute duration by resource on SQL backend * api: run list_projects tests against SQL backend too * api: run list users test against SQL backend too * api: run list meters tests against SQL backend too * Kwapi pollster silently return no probre if keystone endpoint is not present * HBase storage driver, initial version * Exclude tests directory from installation * Ensure missing period is treated consistently * Exclude tests when installing ceilometer * Run some APIv1 tests on different backends * Remove old configuration metering_storage_engine * Set where=tests * Decouple the nova notifier from ceilometer code * send-counter: fix & test * Remove nose wrapper script * Fix count type in MongoDB * Make sure that the period is returned as an int as the api expects an int * Imported Translations from Transifex * Remove compat cfg wrapper * compute: fix unknown flavor handling * Allow empty dict as metaquery param for sqlalchemy * Add glossary definitions for additional terms * Support different publisher interval 2013.1.g3 --------- * Fix message envelope keys * Revert recent rpc wire format changes * Document the rules for units * Fix a bug in compute manager test case * plugin: don't use @staticmethod with abc * Support list/tuple as meter message value * Imported Translations from Transifex * Update common to get new kombu serialization code * Disable notifier tests * pipeline: manager publish multiple counters * Imported Translations from Transifex * Use oslo-config-2013.1b3 * mongodb: make count an integer explicitely * tests: allow to run API tests on live db * Update to latest oslo-version * Imported Translations from Transifex * Add directive to MANIFEST.in to include all the html files * Use join_consumer_pool() for notifications * Update openstack.common * Add period support in storage drivers and API * Update openstack/common tree * storage: fix mongo live tests * swift: configure RPC service correctly * Fix tox python version for Folsom * api: use delta_seconds() * transformer: add acculumator transformer * Import service when cfg.CONF.os_* is used * pipeline: flush after publishing call * plugin: format docstring as rst * Use Mongo finalize to compute avg and duration * Code cleanup, remove useless import * api: fix a test * compute: fix notifications test * Move counter_source definition * Allow to publish several counters in a row * Fixed resource api in v2-api * Update meter publish with pipeline framework * Use the same Keystone client instance for pollster * pipeline: fix format error in logging * More robust mocking of nova conductor * Mock more conductor API methods to unblock tests * Update pollsters to return counter list * Update V2 API documentation * Added hacking.py support to pep8 portion of tox * setup: fix typo in package data * Fix formatting issue with v1 API parameters * Multiple publisher pipeline framework * Remove setuptools_git from setup_requires * Removed unused param for get_counters() * Use WSME 0.5b1 * Factorize agent code * Fixed the TemplateNotFound error in v1 api * Ceilometer-api is crashing due to pecan module missing * Clean class variable in compute manager test case * Update nova notifier test after nova change * Fix documentation formatting issues * Simplify ceilometer-api and checks Keystone middleware parsing * Fix nova conf compute_manager unavailable * Rename run_tests.sh to wrap_nosetests.sh * Update openstack.common * Corrected get_raw_event() in sqlalchemy * Higher level test for db backends * Remove useless imports * Flatten the v2 API * Update v2 API for WSME code reorg * Update WebOb version specification * Remove the ImageSizePollster * Add Kwapi pollster (energy monitoring) * Fixes a minor documentation typo * Peg the version of Ming used in tests * Update pep8 to 1.3.3 * Remove leftover useless import * Enhance policy test for init() * Provide the meters unit's in /meters * Fix keystoneclient auth_token middleware changes * policy: fix policy_file finding * Remove the _initialize_config_options * Add pyflakes * Make the v2 API date query parameters consistent * Fix test blocking issue and pin docutils version * Apply the official OpenStack stylesheets and templates to the Doc build * Fixed erroneous source filter in SQLAlchemy * Fix warnings in the documentation build * Handle finish and revert resize notifications * Add support for Folsom version of Swift * Implement user-api * Add support for Swift incoming/outgoing trafic metering * Pass a dict configuration file to auth_keystone * Import only once in nova_notifier * Fix MySQL charset error * Use default configuration file to make test data * Fix Glance control exchange * Move back api-v1 to the main api * Fix WSME arguments handling change * Remove useless gettext call in sql engine * Ground work for transifex-ify ceilometer * Add instance_type information to NetPollster * Fix dbsync API change * Fix image_id in instance resource metadata * Instantiate inspector in compute manager * remove direct nova db access from ceilometer * Make debugging the wsme app a bit easier * Implements database upgrade as storage engine independent * Fix the v1 api importing of acl * Add the ability to filter on metadata * Virt inspector directly layered over hypervisor API * Move meter.py into collector directory * Change mysql schema from latin1 to utf8 * Change default os-username to 'ceilometer' * Restore some metadata to the events and resources * Update documentation URL * Add sql db option to devstack for ceilometer * Remove debug print in V2 API * Start updating documentation for V2 API * Implement V2 API with Pecan and WSME * Move v1 API files into a subdirectory * Add test storage driver * Implement /meters to make discovery "nicer" from the client * Fix sqlalchemy for show_data and v1 web api * Implement object store metering * Make Impl of mongodb and sqlalchemy consistent * add migration migrate.cfg file to the python package * Fixes to enable the jenkins doc job to work * Lower the minimum required version of anyjson * Fix blocking test for nova notifier * network: remove left-over useless nova import * tools: set novaclient minimum version * libvirt: fix Folsom compatibility * Lower pymongo dependency * Remove rickshaw subproject * Remove unused rpc import * Adapted to nova's compute_driver moving * doc: fix cpu counter unit * tools: use tarballs rather than git for Folsom tests * Used auth_token middleware from keystoneclient * Remove cinderclient dependency * Fix latest nova changes * api: replace minified files by complete version * Add Folsom tests to tox * Handle nova.flags removal * Provide default configuration file * Fix mysql_engine option type * Remove nova.flags usage * api: add support for timestamp in _list_resources() * api: add timestamp interval support in _list_events() * tests: simplify api list_resources * Update openstack.common(except policy) * Adopted the oslo's rpc.Service change * Use libvirt num_cpu for CPU utilization calculation * Remove obsolete reference to instance.vcpus * Change references of /etc/ceilometer-{agent,collector}.conf to /etc/ceilometer/ceilometer.conf * Determine instance cores from public flavors API * Determine flavor type from the public nova API * Add comment about folsom compatibility change * Add keystone requirement for doc build * Avoid TypeError when loading libvirt.LibvirtDriver * Don't re-import flags and do parse_args instead of flags.FLAGS() * doc: rename stackforge to openstack * Fix pymongo requirements * Update .gitreview for openstack * Update use of nova config to work with folsom * compute: remove get_disks work-around * Use openstack versioning * Fix documentation build * document utc naive timestamp * Remove database access from agent pollsters * Fix merge error in central/manager.py * Fix nova config parsing * pollster trap error due to zero floating ip * Use the service.py in openstack-common * Allow no configured sources, provide a default file * Add service.py from openstack-common * Update common (except policy) * nova fake libvirt library breaking tests * Move db access out into a seperate file * Remove invalid fixme comments * Add new cpu_util meter recording CPU utilization % * Fix TypeError from old-style publish_counter calls * Fix auth middleware configuration * pin sqlalchemy to 0.7.x but not specifically 0.7.8 * add mongo index names * set tox to ignore global packages * Provide a way to disable some plugins * Use stevedore to load all plugins * implement get_volume_max for sqlalchemy * Add basic text/html renderer * network: floating IP account in Quantum * add unit test for CPUPollster * Clean up context usage * Add dependencies on clients used by pollsters * add ceilometer-send-counter * Update openstack.common.cfg * Fix tests broken by API change with Counter class * api: add source detail retrieval * Set source at publish time * Instance pollster emits instance. meter * timestamp columns in sqlalchemy not timezone aware * Remove obsolete/incorrect install instructions * network: emit router meter * Fix sqlalchemy performance problem * Added a working release-bugs.py script to tools/ * Change default API port * sqlalchemy record_meter merge objs not string * Use glance public API as opposed to registry API * Add OpenStack trove classifier for PyPI * bump version number to 0.2 0.1 --- * Nova libvirt release note * Update metadata for PyPI registration * tox: add missing venv * Fixes a couple typos * Counter renaming * Set correct timestamp on floatingip counter * Fix API change in make_test_data.py * Fix Nova URL in doc * Some more doc fixes * Ignore instances in the ERROR state * Use the right version number in documentation * doc: fix network.*.* resource id * image: handle glance delete notifications * image: handle glance upload notifications * image: add update event, fix ImageServe owner * network: fix create/update counter type & doc * Assorted doc fixes * add max/sum project volume and fix tests * Add general options * compute.libvirt: split read/write counters * API: add Keystone ACL and policy support * Add documentation for configuration options * network: do not emit counter on exists event, fix resource id * Move net function in class method and fix instance id * Prime counter table * Fix the configuration for the nova notifier * Initialize the control_exchange setting * Set version 0.1 * Make the instance counters use the same type * Restore manual install documentation * add quantum release note * Add release notes to docs * Update readme and create release notes * Remove duration field in Counter * Add counter for number of packets per vif * Move instance counter into its own pollster * Add a request counter for instance I/O * Rename instance disk I/O counter * Rename instances network counters * Use constant rather than string from counter type * Update the architecture diagram * Increase default polling interval * Fix compute agent publishing call * network: listen for Quantum exists event * Correct requirements filename * Fix notification subscription logic * Fix quantum notification subscriptions * Split meter publishing from the global config obj * network: add counter for actions * network: listen for Quantum notifications * Rename absolute to gauge * Fix typo in control exchanges help texts * Rework RPC notification mechanism * Update packaging files * Update URL list * Update openstack.common * Add volume/sum API endpoint for resource meters * Add resource volume/max api call * Fix dependency on anyjson * Listen for volume.delete.start instead of end * implement sqlalchemy dbengine backend * Add a notification handler for image downloads * Allow glance pollster tests to run * Create tox env definition for using a live db * Picking up dependencies from pip-requires file * Specify a new queue in manager * Rework RPC connection * Stop using nova's rpc module * Add configuration script to turn on notifications * Pep8 fixes, implement pep8 check on tests subdir * Use standard CLI options & env vars for creds * compute: remove get_metadata_from_event() * Listen for volume notifications * Add pollster for Glance * Fix Nova notifier test case * Fix nova flag parsing * Add nova_notifier notification driver for nova * Split instance polling code * Use stevedore to load storage engine drivers * Implement duration calculation API * Create tool for generating test meter data * Update openstack-common code to latest * Add bin/ceilometer-api for convenience * Add local copy of architecture diagram * Add timestamp parameters to the API docs * Check for doc build dependency before building * Pollster for network internal traffic (n1,n2) * Fix PEP8 issues * Add archicture diagram to documentation * added mongodb auth * Change timestamp management for resources * Log the instance causing the error when a pollster fails * Document how to install with devstack * Remove test skipping logic * Remove dependency on nova test modules * Add date range parameters to resource API * Add setuptools-git support * Add separate notification handler for instance flavor * Change instance meter type * Split the existing notification handlers up * Remove redundancy in the API * Separate the tox coverage test setup from py27 * Do not require user or project argument for event query * Add pymongo dependency for readthedocs.org build * Update openstack.common * Add API documentation * Be explicit about test dir * Add list projects API * Sort list of users and projects returned from queries * Add project arg to event and resource queries * Fix "meter" literal in event list API * collector exception on record_metering_data * Add API endpoint for listing raw event data * Change compute pollster API to work on one instance at a time * Create "central" agent * Skeleton for API server * fix use of source value in mongdb driver * Add {root,ephemeral}_disk_size counters * Implements vcpus counter * Fix nova configuration loading * Implements memory counter * Fix and document counter types * Check compute driver using new flag * Add openstack.common.{context,notifier,log} and update .rpc * Update review server link * Add link to roadmap * Add indexes to MongoDB driver * extend developer documentation * Reset the correct nova dependency URL * Switch .gitreview to use OpenStack gerrit * Add MongoDB engine * Convert timestamps to datetime objects before storing * Reduce complexity of storage engine API * Remove usage of nova.log * Documentation edits: * fix typo in instance properties list * Add Sphinx wrapper around existing docs * Configure nova.flags as well as openstack.common.cfg * First draft of plugin/agent documentation. Fixes bug 1018311 * Essex: update Nova to 2012.1.1, add python-novaclient * Split service preparation, periodic interval configurable * Use the same instance metadata everywhere * Emit meter event for instance "exists" * Start defining DB engine API * Fallback on nova.rpc for Essex * Add instance metadata from notification events * Combined fix to get past broken state of repo * Add more metadata to instance counter * Register storage options on import * Add Essex tests * log more than ceilometer * Remove event_type field from meter messages * fix message signatures for nested dicts * Remove nova.flags usage * Copy openstack.common.cfg * check message signatures in the collector * Sketch out a plugin system for saving metering data * refactor meter event publishing code * Add and use ceilometer own log module * add counter type field * Use timestamp instead of datetime when creating Counter * Use new flag API * Fix a PEP8 error * Make the stand-alone test script mimic tox * Remove unneeded eventlet test requirement * Add listeners for other instance-related events * Add tox configuration * Use openstack.common.cfg for ceilometer options * Publish and receive metering messages * Add floating IP pollster * Fix tests based on DB by importing nova.tests * make the pollsters in the agent plugins * Build ceilometer-agent and ceilometer-collector * Add plugin support to the notification portion of the collector daemon * Add CPU time fetching * Add an example function for converting a nova notification to a counter * add a tool for recording notifications and replaying them * Add an exception handler to deal with errors that occur when the info in nova is out of sync with reality (as on my currently broken system). Also adds a nova prefix to the logger for now so messages from this module make it into the log file * Periodically fetch for disk io stats * Use nova.service, add a manager class * Change license to Apache 2.0 * Add setup.py * Import ceilometer-nova-compute * Ignore pyc files * Add link to blueprint * Add .gitreview file * initial commit ceilometer-6.0.0/PKG-INFO0000664000567000056710000000207512701406364016112 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: ceilometer Version: 6.0.0 Summary: OpenStack Telemetry Home-page: http://docs.openstack.org/developer/ceilometer/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ceilometer ========== Release notes can be read online at: http://docs.openstack.org/developer/ceilometer/releasenotes/index.html Documentation for the project can be found at: http://docs.openstack.org/developer/ceilometer/ The project home is at: http://launchpad.net/ceilometer Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Topic :: System :: Monitoring ceilometer-6.0.0/rally-jobs/0000775000567000056710000000000012701406364017067 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/rally-jobs/ceilometer.yaml0000664000567000056710000000263612701406223022104 0ustar jenkinsjenkins00000000000000--- CeilometerMeters.list_meters: - runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerResource.list_resources: - runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerStats.create_meter_and_get_stats: - args: user_id: "user-id" resource_id: "resource-id" counter_volume: 1.0 counter_unit: "" counter_type: "cumulative" runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerQueries.create_and_query_samples: - args: filter: {"=": {"counter_unit": "instance"}} orderby: !!null limit: 10 counter_name: "cpu_util" counter_type: "gauge" counter_unit: "instance" counter_volume: "1.0" resource_id: "resource_id" runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 ceilometer-6.0.0/rally-jobs/plugins/0000775000567000056710000000000012701406364020550 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/rally-jobs/plugins/plugin_sample.py0000664000567000056710000000171712701406223023761 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Sample of plugin for Ceilometer. For more Ceilometer related benchmarks take a look here: github.com/openstack/rally/blob/master/rally/benchmark/scenarios/ceilometer/ About plugins: https://rally.readthedocs.org/en/latest/plugins.html Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts """ from rally.benchmark.scenarios import base class CeilometerPlugin(base.Scenario): pass ceilometer-6.0.0/rally-jobs/plugins/README.rst0000664000567000056710000000060612701406223022233 0ustar jenkinsjenkins00000000000000Rally plugins ============= All *.py modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators. ceilometer-6.0.0/rally-jobs/extra/0000775000567000056710000000000012701406364020212 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/rally-jobs/extra/fake.img0000664000567000056710000000000012701406223021576 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/rally-jobs/extra/README.rst0000664000567000056710000000025512701406223021675 0ustar jenkinsjenkins00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* ceilometer-6.0.0/rally-jobs/README.rst0000664000567000056710000000157012701406223020553 0ustar jenkinsjenkins00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * plugins - directory where you can add rally plugins. Almost everything in Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute paths in rally tasks. Files will be located in ~/.rally/extra/* * ceilometer is a task that is run in gates against Ceilometer Useful links ------------ * More about Rally: https://rally.readthedocs.org/en/latest/ * How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html * About plugins: https://rally.readthedocs.org/en/latest/plugins.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins ceilometer-6.0.0/tox.ini0000664000567000056710000001035312701406223016320 0ustar jenkinsjenkins00000000000000[tox] minversion = 1.6 skipsdist = True envlist = py27,py34,functional,py34-functional,pep8 [testenv] deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt install_command = pip install -U {opts} {packages} usedevelop = True setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=ceilometer/tests/unit passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE commands = {toxinidir}/tools/pretty_tox.sh "{posargs}" oslo-config-generator --config-file=etc/ceilometer/ceilometer-config-generator.conf whitelist_externals = bash # TODO(ityaptin): With separation tests to unit and functional folders we need # set environment variable OS_TEST_PATH=./ceilometer/tests/functional # in "py-" jobs [testenv:py-mongodb] setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = overtest mongodb {toxinidir}/tools/pretty_tox.sh "{posargs}" [testenv:py-mysql] setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = overtest mysql {toxinidir}/tools/pretty_tox.sh "{posargs}" [testenv:py-pgsql] setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = overtest postgresql {toxinidir}/tools/pretty_tox.sh "{posargs}" # Functional tests for elastic search [testenv:py-elastic] setenv = OS_TEST_PATH=ceilometer/tests/functional/ commands = overtest elasticsearch {toxinidir}/tools/pretty_tox.sh "{posargs}" [testenv:functional] setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=ceilometer/tests/functional/ passenv = CEILOMETER_* commands = bash -x {toxinidir}/run-functional-tests.sh "{posargs}" [testenv:py34-functional] setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=ceilometer/tests/functional/ basepython = python3.4 passenv = CEILOMETER_* commands = bash -x {toxinidir}/run-functional-tests.sh "{posargs}" [testenv:integration] setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=./ceilometer/tests/integration OS_TEST_TIMEOUT=2400 GABBI_LIVE_FAIL_IF_NO_TEST=1 passenv = {[testenv]passenv} HEAT_* CEILOMETER_* GNOCCHI_* AODH_* GLANCE_* NOVA_* ADMIN_* # FIXME(sileht): run gabbi-run to failfast in case of error because testr # doesn't support --failfast, but we loose the testr report. commands = bash -c 'cd ceilometer/tests/integration/gabbi/gabbits-live && gabbi-run -x < autoscaling.yaml' # bash -x {toxinidir}/tools/pretty_tox.sh "{posargs}" # NOTE(chdent): The gabbi tests are also run under the other functional # tox targets. This target simply provides a target to directly run just # gabbi tests without needing to do discovery across the entire body of # tests. [testenv:gabbi] setenv = OS_TEST_PATH=ceilometer/tests/functional/gabbi passenv = CEILOMETER_* commands = overtest mongodb {toxinidir}/tools/pretty_tox.sh "{posargs}" [testenv:cover] setenv = OS_TEST_PATH=ceilometer/tests commands = python setup.py testr --slowest --coverage --testr-args="{posargs}" [testenv:pep8] commands = flake8 # Check that .po and .pot files are valid: bash -c "find ceilometer -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" [testenv:releasenotes] commands = sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:genconfig] commands = oslo-config-generator --config-file=etc/ceilometer/ceilometer-config-generator.conf [testenv:docs] commands = python setup.py build_sphinx setenv = PYTHONHASHSEED=0 [testenv:pylint] commands = bash tools/lintstack.sh [testenv:venv] commands = {posargs} setenv = PYTHONHASHSEED=0 [testenv:debug] commands = bash -x oslo_debug_helper {posargs} [testenv:debug-mongodb] setenv = OS_TEST_PATH=ceilometer/tests/functional commands = overtest mongodb oslo_debug_helper {posargs} [testenv:debug-mysql] setenv = OS_TEST_PATH=ceilometer/tests/functional commands = overtest mysql oslo_debug_helper {posargs} [testenv:debug-pgsql] setenv = OS_TEST_PATH=ceilometer/tests/functional commands = overtest postgresql oslo_debug_helper {posargs} [testenv:debug-elastic] setenv = OS_TEST_PATH=ceilometer/tests/functional commands = overtest elasticsearch oslo_debug_helper {posargs} [flake8] ignore = exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build show-source = True [hacking] import_exceptions = ceilometer.i18n local-check-factory = ceilometer.hacking.checks.factory ceilometer-6.0.0/ceilometer/0000775000567000056710000000000012701406364017141 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/exchange_control.py0000664000567000056710000000414512701406224023034 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg EXCHANGE_OPTS = [ cfg.StrOpt('heat_control_exchange', default='heat', help="Exchange name for Heat notifications"), cfg.StrOpt('glance_control_exchange', default='glance', help="Exchange name for Glance notifications."), cfg.StrOpt('magnetodb_control_exchange', default='magnetodb', help="Exchange name for Magnetodb notifications."), cfg.StrOpt('keystone_control_exchange', default='keystone', help="Exchange name for Keystone notifications."), cfg.StrOpt('cinder_control_exchange', default='cinder', help="Exchange name for Cinder notifications."), cfg.StrOpt('sahara_control_exchange', default='sahara', help="Exchange name for Data Processing notifications."), cfg.StrOpt('swift_control_exchange', default='swift', help="Exchange name for Swift notifications."), cfg.StrOpt('magnum_control_exchange', default='magnum', help="Exchange name for Magnum notifications."), cfg.StrOpt('trove_control_exchange', default='trove', help="Exchange name for DBaaS notifications."), cfg.StrOpt('zaqar_control_exchange', default='zaqar', help="Exchange name for Messaging service notifications."), cfg.StrOpt('dns_control_exchange', default='central', help="Exchange name for DNS service notifications."), ] ceilometer-6.0.0/ceilometer/tests/0000775000567000056710000000000012701406364020303 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/pipeline_base.py0000664000567000056710000025104212701406224023453 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Copyright 2013 Intel Corp. # # Authors: Yunhong Jiang # Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import datetime import traceback import mock from oslo_context import context from oslo_utils import timeutils from oslotest import base from oslotest import mockpatch import six from stevedore import extension from ceilometer import pipeline from ceilometer import publisher from ceilometer.publisher import test as test_publisher from ceilometer import sample from ceilometer import transformer from ceilometer.transformer import accumulator from ceilometer.transformer import arithmetic from ceilometer.transformer import conversions @six.add_metaclass(abc.ABCMeta) class BasePipelineTestCase(base.BaseTestCase): @staticmethod def fake_tem_init(): """Fake a transformerManager for pipeline. The faked entry point setting is below: update: TransformerClass except: TransformerClassException drop: TransformerClassDrop """ pass def fake_tem_get_ext(self, name): class_name_ext = { 'update': self.TransformerClass, 'except': self.TransformerClassException, 'drop': self.TransformerClassDrop, 'cache': accumulator.TransformerAccumulator, 'aggregator': conversions.AggregatorTransformer, 'unit_conversion': conversions.ScalingTransformer, 'rate_of_change': conversions.RateOfChangeTransformer, 'arithmetic': arithmetic.ArithmeticTransformer, 'delta': conversions.DeltaTransformer, } if name in class_name_ext: return extension.Extension(name, None, class_name_ext[name], None, ) raise KeyError(name) def get_publisher(self, url, namespace=''): fake_drivers = {'test://': test_publisher.TestPublisher, 'new://': test_publisher.TestPublisher, 'except://': self.PublisherClassException} return fake_drivers[url](url) class PublisherClassException(publisher.PublisherBase): def publish_samples(self, ctxt, samples): raise Exception() def publish_events(self, ctxt, events): raise Exception() class TransformerClass(transformer.TransformerBase): samples = [] grouping_keys = ['counter_name'] def __init__(self, append_name='_update'): self.__class__.samples = [] self.append_name = append_name def flush(self, ctxt): return [] def handle_sample(self, ctxt, counter): self.__class__.samples.append(counter) newname = getattr(counter, 'name') + self.append_name return sample.Sample( name=newname, type=counter.type, volume=counter.volume, unit=counter.unit, user_id=counter.user_id, project_id=counter.project_id, resource_id=counter.resource_id, timestamp=counter.timestamp, resource_metadata=counter.resource_metadata, ) class TransformerClassDrop(transformer.TransformerBase): samples = [] grouping_keys = ['resource_id'] def __init__(self): self.__class__.samples = [] def handle_sample(self, ctxt, counter): self.__class__.samples.append(counter) class TransformerClassException(object): grouping_keys = ['resource_id'] @staticmethod def handle_sample(ctxt, counter): raise Exception() def setUp(self): super(BasePipelineTestCase, self).setUp() self.test_counter = sample.Sample( name='a', type=sample.TYPE_GAUGE, volume=1, unit='B', user_id="test_user", project_id="test_proj", resource_id="test_resource", timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ) self.useFixture(mockpatch.PatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) self.transformer_manager = mock.MagicMock() self.transformer_manager.__getitem__.side_effect = \ self.fake_tem_get_ext self._setup_pipeline_cfg() self._reraise_exception = True self.useFixture(mockpatch.Patch( 'ceilometer.pipeline.LOG.exception', side_effect=self._handle_reraise_exception)) def _handle_reraise_exception(self, msg): if self._reraise_exception: raise Exception(traceback.format_exc()) @abc.abstractmethod def _setup_pipeline_cfg(self): """Setup the appropriate form of pipeline config.""" @abc.abstractmethod def _augment_pipeline_cfg(self): """Augment the pipeline config with an additional element.""" @abc.abstractmethod def _break_pipeline_cfg(self): """Break the pipeline config with a malformed element.""" @abc.abstractmethod def _dup_pipeline_name_cfg(self): """Break the pipeline config with duplicate pipeline name.""" @abc.abstractmethod def _set_pipeline_cfg(self, field, value): """Set a field to a value in the pipeline config.""" @abc.abstractmethod def _extend_pipeline_cfg(self, field, value): """Extend an existing field in the pipeline config with a value.""" @abc.abstractmethod def _unset_pipeline_cfg(self, field): """Clear an existing field in the pipeline config.""" def _exception_create_pipelinemanager(self): self.assertRaises(pipeline.PipelineException, pipeline.PipelineManager, self.pipeline_cfg, self.transformer_manager) def test_no_counters(self): self._unset_pipeline_cfg('counters') self._exception_create_pipelinemanager() def test_no_transformers(self): self._unset_pipeline_cfg('transformers') pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) def test_no_name(self): self._unset_pipeline_cfg('name') self._exception_create_pipelinemanager() def test_no_interval(self): self._unset_pipeline_cfg('interval') pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] self.assertEqual(600, pipe.get_interval()) def test_no_publishers(self): self._unset_pipeline_cfg('publishers') self._exception_create_pipelinemanager() def test_invalid_resources(self): invalid_resource = {'invalid': 1} self._set_pipeline_cfg('resources', invalid_resource) self._exception_create_pipelinemanager() def test_check_counters_include_exclude_same(self): counter_cfg = ['a', '!a'] self._set_pipeline_cfg('counters', counter_cfg) self._exception_create_pipelinemanager() def test_check_counters_include_exclude(self): counter_cfg = ['a', '!b'] self._set_pipeline_cfg('counters', counter_cfg) self._exception_create_pipelinemanager() def test_check_counters_wildcard_included(self): counter_cfg = ['a', '*'] self._set_pipeline_cfg('counters', counter_cfg) self._exception_create_pipelinemanager() def test_check_publishers_invalid_publisher(self): publisher_cfg = ['test_invalid'] self._set_pipeline_cfg('publishers', publisher_cfg) def test_invalid_string_interval(self): self._set_pipeline_cfg('interval', 'string') self._exception_create_pipelinemanager() def test_check_transformer_invalid_transformer(self): transformer_cfg = [ {'name': "test_invalid", 'parameters': {}} ] self._set_pipeline_cfg('transformers', transformer_cfg) self._exception_create_pipelinemanager() def test_get_interval(self): pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] self.assertEqual(5, pipe.get_interval()) def test_publisher_transformer_invoked(self): pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) def test_multiple_included_counters(self): counter_cfg = ['a', 'b'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.assertEqual(2, len(publisher.samples)) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('b_update', getattr(publisher.samples[1], "name")) @mock.patch('ceilometer.pipeline.LOG') def test_none_volume_counter(self, LOG): self._set_pipeline_cfg('counters', ['empty_volume']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) publisher = pipeline_manager.pipelines[0].publishers[0] test_s = sample.Sample( name='empty_volume', type=self.test_counter.type, volume=None, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([test_s]) LOG.warning.assert_called_once_with( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has no volume (volume: %(counter_volume)s), the ' 'sample will be dropped' % {'counter_name': test_s.name, 'resource_id': test_s.resource_id, 'timestamp': test_s.timestamp, 'counter_volume': test_s.volume}) self.assertEqual(0, len(publisher.samples)) @mock.patch('ceilometer.pipeline.LOG') def test_fake_volume_counter(self, LOG): self._set_pipeline_cfg('counters', ['fake_volume']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) publisher = pipeline_manager.pipelines[0].publishers[0] test_s = sample.Sample( name='fake_volume', type=self.test_counter.type, volume='fake_value', unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([test_s]) LOG.warning.assert_called_once_with( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has volume which is not a number ' '(volume: %(counter_volume)s), the sample will be dropped' % {'counter_name': test_s.name, 'resource_id': test_s.resource_id, 'timestamp': test_s.timestamp, 'counter_volume': test_s.volume}) self.assertEqual(0, len(publisher.samples)) def test_counter_dont_match(self): counter_cfg = ['nomatch'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) self.assertEqual(0, publisher.calls) def test_wildcard_counter(self): counter_cfg = ['*'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) def test_wildcard_excluded_counters(self): counter_cfg = ['*', '!a'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertFalse(pipeline_manager.pipelines[0].support_meter('a')) def test_wildcard_excluded_counters_not_excluded(self): counter_cfg = ['*', '!b'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) def test_all_excluded_counters_not_excluded(self): counter_cfg = ['!b', '!c'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) def test_all_excluded_counters_is_excluded(self): counter_cfg = ['!a', '!c'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertFalse(pipeline_manager.pipelines[0].support_meter('a')) self.assertTrue(pipeline_manager.pipelines[0].support_meter('b')) self.assertFalse(pipeline_manager.pipelines[0].support_meter('c')) def test_wildcard_and_excluded_wildcard_counters(self): counter_cfg = ['*', '!disk.*'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertFalse(pipeline_manager.pipelines[0]. support_meter('disk.read.bytes')) self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu')) def test_included_counter_and_wildcard_counters(self): counter_cfg = ['cpu', 'disk.*'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertTrue(pipeline_manager.pipelines[0]. support_meter('disk.read.bytes')) self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu')) self.assertFalse(pipeline_manager.pipelines[0]. support_meter('instance')) def test_excluded_counter_and_excluded_wildcard_counters(self): counter_cfg = ['!cpu', '!disk.*'] self._set_pipeline_cfg('counters', counter_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertFalse(pipeline_manager.pipelines[0]. support_meter('disk.read.bytes')) self.assertFalse(pipeline_manager.pipelines[0].support_meter('cpu')) self.assertTrue(pipeline_manager.pipelines[0]. support_meter('instance')) def test_multiple_pipeline(self): self._augment_pipeline_cfg() pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, publisher.calls) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) new_publisher = pipeline_manager.pipelines[1].publishers[0] self.assertEqual(1, len(new_publisher.samples)) self.assertEqual(1, new_publisher.calls) self.assertEqual('b_new', getattr(new_publisher.samples[0], "name")) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) self.assertEqual('b', getattr(self.TransformerClass.samples[1], "name")) def test_multiple_pipeline_exception(self): self._reraise_exception = False self._break_pipeline_cfg() pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, publisher.calls) self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) self.assertEqual('b', getattr(self.TransformerClass.samples[1], "name")) def test_none_transformer_pipeline(self): self._set_pipeline_cfg('transformers', None) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, publisher.calls) self.assertEqual('a', getattr(publisher.samples[0], 'name')) def test_empty_transformer_pipeline(self): self._set_pipeline_cfg('transformers', []) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, publisher.calls) self.assertEqual('a', getattr(publisher.samples[0], 'name')) def test_multiple_transformer_same_class(self): transformer_cfg = [ { 'name': 'update', 'parameters': {} }, { 'name': 'update', 'parameters': {} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, publisher.calls) self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update_update', getattr(publisher.samples[0], 'name')) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], 'name')) self.assertEqual('a_update', getattr(self.TransformerClass.samples[1], 'name')) def test_multiple_transformer_same_class_different_parameter(self): transformer_cfg = [ { 'name': 'update', 'parameters': { "append_name": "_update", } }, { 'name': 'update', 'parameters': { "append_name": "_new", } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], 'name')) self.assertEqual('a_update', getattr(self.TransformerClass.samples[1], 'name')) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name')) def test_multiple_transformer_drop_transformer(self): transformer_cfg = [ { 'name': 'update', 'parameters': { "append_name": "_update", } }, { 'name': 'drop', 'parameters': {} }, { 'name': 'update', 'parameters': { "append_name": "_new", } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], 'name')) self.assertEqual(1, len(self.TransformerClassDrop.samples)) self.assertEqual('a_update', getattr(self.TransformerClassDrop.samples[0], 'name')) def test_multiple_publisher(self): self._set_pipeline_cfg('publishers', ['test://', 'new://']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(new_publisher.samples)) self.assertEqual('a_update', getattr(new_publisher.samples[0], 'name')) self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) def test_multiple_publisher_isolation(self): self._reraise_exception = False self._set_pipeline_cfg('publishers', ['except://', 'new://']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(new_publisher.samples)) self.assertEqual('a_update', getattr(new_publisher.samples[0], 'name')) def test_multiple_counter_pipeline(self): self._set_pipeline_cfg('counters', ['a', 'b']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter, sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, )]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(2, len(publisher.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) self.assertEqual('b_update', getattr(publisher.samples[1], 'name')) def test_flush_pipeline_cache(self): CACHE_SIZE = 10 extra_transformer_cfg = [ { 'name': 'cache', 'parameters': { 'size': CACHE_SIZE, } }, { 'name': 'update', 'parameters': { 'append_name': '_new' } }, ] self._extend_pipeline_cfg('transformers', extra_transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, self.test_counter) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.flush(None) self.assertEqual(0, len(publisher.samples)) pipe.publish_data(None, self.test_counter) pipe.flush(None) self.assertEqual(0, len(publisher.samples)) for i in range(CACHE_SIZE - 2): pipe.publish_data(None, self.test_counter) pipe.flush(None) self.assertEqual(CACHE_SIZE, len(publisher.samples)) self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name')) def test_flush_pipeline_cache_multiple_counter(self): CACHE_SIZE = 3 extra_transformer_cfg = [ { 'name': 'cache', 'parameters': { 'size': CACHE_SIZE } }, { 'name': 'update', 'parameters': { 'append_name': '_new' } }, ] self._extend_pipeline_cfg('transformers', extra_transformer_cfg) self._set_pipeline_cfg('counters', ['a', 'b']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter, sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, )]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.assertEqual(CACHE_SIZE, len(publisher.samples)) self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name')) self.assertEqual('b_update_new', getattr(publisher.samples[1], 'name')) def test_flush_pipeline_cache_before_publisher(self): extra_transformer_cfg = [{ 'name': 'cache', 'parameters': {} }] self._extend_pipeline_cfg('transformers', extra_transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] publisher = pipe.publishers[0] pipe.publish_data(None, self.test_counter) self.assertEqual(0, len(publisher.samples)) pipe.flush(None) self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) def test_global_unit_conversion(self): scale = 'volume / ((10**6) * 60)' transformer_cfg = [ { 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': scale}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['cpu']) counters = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=1200000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) pipe.flush(None) self.assertEqual(1, len(publisher.samples)) cpu_mins = publisher.samples[-1] self.assertEqual('cpu_mins', getattr(cpu_mins, 'name')) self.assertEqual('min', getattr(cpu_mins, 'unit')) self.assertEqual(sample.TYPE_CUMULATIVE, getattr(cpu_mins, 'type')) self.assertEqual(20, getattr(cpu_mins, 'volume')) def test_unit_identified_source_unit_conversion(self): transformer_cfg = [ { 'name': 'unit_conversion', 'parameters': { 'source': {'unit': '°C'}, 'target': {'unit': '°F', 'scale': '(volume * 1.8) + 32'}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['core_temperature', 'ambient_temperature']) counters = [ sample.Sample( name='core_temperature', type=sample.TYPE_GAUGE, volume=36.0, unit='°C', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ), sample.Sample( name='ambient_temperature', type=sample.TYPE_GAUGE, volume=88.8, unit='°F', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(2, len(publisher.samples)) core_temp = publisher.samples[0] self.assertEqual('core_temperature', getattr(core_temp, 'name')) self.assertEqual('°F', getattr(core_temp, 'unit')) self.assertEqual(96.8, getattr(core_temp, 'volume')) amb_temp = publisher.samples[1] self.assertEqual('ambient_temperature', getattr(amb_temp, 'name')) self.assertEqual('°F', getattr(amb_temp, 'unit')) self.assertEqual(88.8, getattr(amb_temp, 'volume')) self.assertEqual(96.8, getattr(core_temp, 'volume')) def _do_test_rate_of_change_conversion(self, prev, curr, type, expected, offset=1, weight=None): s = ("(resource_metadata.user_metadata.autoscaling_weight or 1.0)" "* (resource_metadata.non.existent or 1.0)" "* (100.0 / (10**9 * (resource_metadata.cpu_number or 1)))") transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': {}, 'target': {'name': 'cpu_util', 'unit': '%', 'type': sample.TYPE_GAUGE, 'scale': s}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['cpu']) now = timeutils.utcnow() later = now + datetime.timedelta(minutes=offset) um = {'autoscaling_weight': weight} if weight else {} counters = [ sample.Sample( name='cpu', type=type, volume=prev, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=now.isoformat(), resource_metadata={'cpu_number': 4, 'user_metadata': um}, ), sample.Sample( name='cpu', type=type, volume=prev, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource2', timestamp=now.isoformat(), resource_metadata={'cpu_number': 2, 'user_metadata': um}, ), sample.Sample( name='cpu', type=type, volume=curr, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=later.isoformat(), resource_metadata={'cpu_number': 4, 'user_metadata': um}, ), sample.Sample( name='cpu', type=type, volume=curr, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource2', timestamp=later.isoformat(), resource_metadata={'cpu_number': 2, 'user_metadata': um}, ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(2, len(publisher.samples)) pipe.flush(None) self.assertEqual(2, len(publisher.samples)) cpu_util = publisher.samples[0] self.assertEqual('cpu_util', getattr(cpu_util, 'name')) self.assertEqual('test_resource', getattr(cpu_util, 'resource_id')) self.assertEqual('%', getattr(cpu_util, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type')) self.assertEqual(expected, getattr(cpu_util, 'volume')) cpu_util = publisher.samples[1] self.assertEqual('cpu_util', getattr(cpu_util, 'name')) self.assertEqual('test_resource2', getattr(cpu_util, 'resource_id')) self.assertEqual('%', getattr(cpu_util, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type')) self.assertEqual(expected * 2, getattr(cpu_util, 'volume')) def test_rate_of_change_conversion(self): self._do_test_rate_of_change_conversion(120000000000, 180000000000, sample.TYPE_CUMULATIVE, 25.0) def test_rate_of_change_conversion_weight(self): self._do_test_rate_of_change_conversion(120000000000, 180000000000, sample.TYPE_CUMULATIVE, 27.5, weight=1.1) def test_rate_of_change_conversion_negative_cumulative_delta(self): self._do_test_rate_of_change_conversion(180000000000, 120000000000, sample.TYPE_CUMULATIVE, 50.0) def test_rate_of_change_conversion_negative_gauge_delta(self): self._do_test_rate_of_change_conversion(180000000000, 120000000000, sample.TYPE_GAUGE, -25.0) def test_rate_of_change_conversion_zero_delay(self): self._do_test_rate_of_change_conversion(120000000000, 120000000000, sample.TYPE_CUMULATIVE, 0.0, offset=0) def test_rate_of_change_no_predecessor(self): s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': {}, 'target': {'name': 'cpu_util', 'unit': '%', 'type': sample.TYPE_GAUGE, 'scale': s} } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['cpu']) now = timeutils.utcnow() counters = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=120000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=now.isoformat(), resource_metadata={'cpu_number': 4} ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.flush(None) self.assertEqual(0, len(publisher.samples)) @mock.patch('ceilometer.transformer.conversions.LOG') def test_rate_of_change_out_of_order(self, the_log): s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': {}, 'target': {'name': 'cpu_util', 'unit': '%', 'type': sample.TYPE_GAUGE, 'scale': s} } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['cpu']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] now = timeutils.utcnow() earlier = now - datetime.timedelta(seconds=10) later = now + datetime.timedelta(seconds=10) counters = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=125000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=now.isoformat(), resource_metadata={'cpu_number': 4} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=120000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=earlier.isoformat(), resource_metadata={'cpu_number': 4} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=130000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=later.isoformat(), resource_metadata={'cpu_number': 4} ), ] pipe.publish_data(None, counters) publisher = pipe.publishers[0] self.assertEqual(1, len(publisher.samples)) pipe.flush(None) self.assertEqual(1, len(publisher.samples)) cpu_util_sample = publisher.samples[0] self.assertEqual(12.5, cpu_util_sample.volume) the_log.warning.assert_called_with( 'dropping out of time order sample: %s', (counters[1],) ) def test_resources(self): resources = ['test1://', 'test2://'] self._set_pipeline_cfg('resources', resources) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertEqual(resources, pipeline_manager.pipelines[0].resources) def test_no_resources(self): pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertEqual(0, len(pipeline_manager.pipelines[0].resources)) def _do_test_rate_of_change_mapping(self, pipe, meters, units): now = timeutils.utcnow() base = 1000 offset = 7 rate = 42 later = now + datetime.timedelta(minutes=offset) counters = [] for v, ts in [(base, now.isoformat()), (base + (offset * 60 * rate), later.isoformat())]: for n, u, r in [(meters[0], units[0], 'resource1'), (meters[1], units[1], 'resource2')]: s = sample.Sample( name=n, type=sample.TYPE_CUMULATIVE, volume=v, unit=u, user_id='test_user', project_id='test_proj', resource_id=r, timestamp=ts, resource_metadata={}, ) counters.append(s) pipe.publish_data(None, counters) publisher = pipe.publishers[0] self.assertEqual(2, len(publisher.samples)) pipe.flush(None) self.assertEqual(2, len(publisher.samples)) bps = publisher.samples[0] self.assertEqual('%s.rate' % meters[0], getattr(bps, 'name')) self.assertEqual('resource1', getattr(bps, 'resource_id')) self.assertEqual('%s/s' % units[0], getattr(bps, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(bps, 'type')) self.assertEqual(rate, getattr(bps, 'volume')) rps = publisher.samples[1] self.assertEqual('%s.rate' % meters[1], getattr(rps, 'name')) self.assertEqual('resource2', getattr(rps, 'resource_id')) self.assertEqual('%s/s' % units[1], getattr(rps, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(rps, 'type')) self.assertEqual(rate, getattr(rps, 'volume')) def test_rate_of_change_mapping(self): map_from = {'name': 'disk\\.(read|write)\\.(bytes|requests)', 'unit': '(B|request)'} map_to = {'name': 'disk.\\1.\\2.rate', 'unit': '\\1/s'} transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': { 'map_from': map_from }, 'target': { 'map_to': map_to, 'type': sample.TYPE_GAUGE }, }, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['disk.read.bytes', 'disk.write.requests']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] meters = ('disk.read.bytes', 'disk.write.requests') units = ('B', 'request') self._do_test_rate_of_change_mapping(pipe, meters, units) def _do_test_aggregator(self, parameters, expected_length): transformer_cfg = [ { 'name': 'aggregator', 'parameters': parameters, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) counters = [ sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=16, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=53, unit='B', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=42, unit='B', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=15, unit='B', user_id='test_user', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=2, unit='B', user_id='test_user_bis', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '3.0'} ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(expected_length, len(publisher.samples)) return sorted(publisher.samples, key=lambda s: s.volume) def test_aggregator_meter_type(self): volumes = [1.0, 2.0, 3.0] transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': len(volumes) * len(sample.TYPES)} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['testgauge', 'testcumulative', 'testdelta']) counters = [] for sample_type in sample.TYPES: for volume in volumes: counters.append(sample.Sample( name='test' + sample_type, type=sample_type, volume=volume, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} )) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] actual = sorted(s.volume for s in publisher.samples) self.assertEqual([2.0, 3.0, 6.0], actual) def test_aggregator_metadata(self): for conf, expected_version in [('last', '2.0'), ('first', '1.0')]: samples = self._do_test_aggregator({ 'resource_metadata': conf, 'target': {'name': 'aggregated-bytes'} }, expected_length=4) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(2, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) s = samples[1] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(15, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': '2.0'}, s.resource_metadata) s = samples[2] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(42, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': expected_version}, s.resource_metadata) s = samples[3] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(95, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': expected_version}, s.resource_metadata) def test_aggregator_user_last_and_metadata_last(self): samples = self._do_test_aggregator({ 'resource_metadata': 'last', 'user_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=2) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(44, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) s = samples[1] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(110, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': '2.0'}, s.resource_metadata) def test_aggregator_user_first_and_metadata_last(self): samples = self._do_test_aggregator({ 'resource_metadata': 'last', 'user_id': 'first', 'target': {'name': 'aggregated-bytes'} }, expected_length=2) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(44, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) s = samples[1] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(110, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': '2.0'}, s.resource_metadata) def test_aggregator_all_first(self): samples = self._do_test_aggregator({ 'resource_metadata': 'first', 'user_id': 'first', 'project_id': 'first', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '1.0'}, s.resource_metadata) def test_aggregator_all_last(self): samples = self._do_test_aggregator({ 'resource_metadata': 'last', 'user_id': 'last', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) def test_aggregator_all_mixed(self): samples = self._do_test_aggregator({ 'resource_metadata': 'drop', 'user_id': 'first', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({}, s.resource_metadata) def test_aggregator_metadata_default(self): samples = self._do_test_aggregator({ 'user_id': 'last', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) @mock.patch('ceilometer.transformer.conversions.LOG') def test_aggregator_metadata_invalid(self, mylog): samples = self._do_test_aggregator({ 'resource_metadata': 'invalid', 'user_id': 'last', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertTrue(mylog.warning.called) self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) def test_aggregator_sized_flush(self): transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 2}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) counters = [ sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=16, unit='B', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ) ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, [counters[0]]) pipe.flush(None) publisher = pipe.publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.publish_data(None, [counters[1]]) pipe.flush(None) publisher = pipe.publishers[0] self.assertEqual(2, len(publisher.samples)) def test_aggregator_timed_flush(self): timeutils.set_time_override() transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 900, 'retention_time': 60}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) counters = [ sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, counters) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) timeutils.advance_time_seconds(120) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) def test_aggregator_without_authentication(self): transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 2}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['storage.objects.outgoing.bytes']) counters = [ sample.Sample( name='storage.objects.outgoing.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id=None, project_id=None, resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.outgoing.bytes', type=sample.TYPE_DELTA, volume=16, unit='B', user_id=None, project_id=None, resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ) ] pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, [counters[0]]) pipe.flush(None) publisher = pipe.publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.publish_data(None, [counters[1]]) pipe.flush(None) publisher = pipe.publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(42, getattr(publisher.samples[0], 'volume')) self.assertEqual("test_resource", getattr(publisher.samples[0], 'resource_id')) def test_aggregator_to_rate_of_change_transformer_two_resources(self): resource_id = ['1ca738a1-c49c-4401-8346-5c60ebdb03f4', '5dd418a6-c6a9-49c9-9cef-b357d72c71dd'] aggregator = conversions.AggregatorTransformer(size="2", timestamp="last") rate_of_change_transformer = conversions.RateOfChangeTransformer() counter_time = timeutils.parse_isotime('2016-01-01T12:00:00+00:00') for offset in range(2): counter = copy.copy(self.test_counter) counter.timestamp = timeutils.isotime(counter_time) counter.resource_id = resource_id[0] counter.volume = offset counter.type = sample.TYPE_CUMULATIVE counter.unit = 'ns' aggregator.handle_sample(context.get_admin_context(), counter) if offset == 1: test_time = counter_time counter_time = counter_time + datetime.timedelta(0, 1) aggregated_counters = aggregator.flush(context.get_admin_context()) self.assertEqual(len(aggregated_counters), 1) self.assertEqual(aggregated_counters[0].timestamp, timeutils.isotime(test_time)) rate_of_change_transformer.handle_sample(context.get_admin_context(), aggregated_counters[0]) for offset in range(2): counter = copy.copy(self.test_counter) counter.timestamp = timeutils.isotime(counter_time) counter.resource_id = resource_id[offset] counter.volume = 2 counter.type = sample.TYPE_CUMULATIVE counter.unit = 'ns' aggregator.handle_sample(context.get_admin_context(), counter) if offset == 0: test_time = counter_time counter_time = counter_time + datetime.timedelta(0, 1) aggregated_counters = aggregator.flush(context.get_admin_context()) self.assertEqual(len(aggregated_counters), 2) for counter in aggregated_counters: if counter.resource_id == resource_id[0]: rateOfChange = rate_of_change_transformer.handle_sample( context.get_admin_context(), counter) self.assertEqual(counter.timestamp, timeutils.isotime(test_time)) self.assertEqual(rateOfChange.volume, 1) def _do_test_arithmetic_expr_parse(self, expr, expected): actual = arithmetic.ArithmeticTransformer.parse_expr(expr) self.assertEqual(expected, actual) def test_arithmetic_expr_parse(self): expr = '$(cpu) + $(cpu.util)' expected = ('cpu.volume + _cpu_util_ESC.volume', { 'cpu': 'cpu', 'cpu.util': '_cpu_util_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def test_arithmetic_expr_parse_parameter(self): expr = '$(cpu) + $(cpu.util).resource_metadata' expected = ('cpu.volume + _cpu_util_ESC.resource_metadata', { 'cpu': 'cpu', 'cpu.util': '_cpu_util_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def test_arithmetic_expr_parse_reserved_keyword(self): expr = '$(class) + $(cpu.util)' expected = ('_class_ESC.volume + _cpu_util_ESC.volume', { 'class': '_class_ESC', 'cpu.util': '_cpu_util_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def test_arithmetic_expr_parse_already_escaped(self): expr = '$(class) + $(_class_ESC)' expected = ('_class_ESC.volume + __class_ESC_ESC.volume', { 'class': '_class_ESC', '_class_ESC': '__class_ESC_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def _do_test_arithmetic(self, expression, scenario, expected): transformer_cfg = [ { 'name': 'arithmetic', 'parameters': { 'target': {'name': 'new_meter', 'unit': '%', 'type': sample.TYPE_GAUGE, 'expr': expression}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', list(set(s['name'] for s in scenario))) counters = [] test_resources = ['test_resource1', 'test_resource2'] for resource_id in test_resources: for s in scenario: counters.append(sample.Sample( name=s['name'], type=sample.TYPE_CUMULATIVE, volume=s['volume'], unit='ns', user_id='test_user', project_id='test_proj', resource_id=resource_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=s.get('metadata') )) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] for s in counters: pipe.publish_data(None, s) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] expected_len = len(test_resources) * len(expected) self.assertEqual(expected_len, len(publisher.samples)) # bucket samples by resource first samples_by_resource = dict((r, []) for r in test_resources) for s in publisher.samples: samples_by_resource[s.resource_id].append(s) for resource_id in samples_by_resource: self.assertEqual(len(expected), len(samples_by_resource[resource_id])) for i, s in enumerate(samples_by_resource[resource_id]): self.assertEqual('new_meter', getattr(s, 'name')) self.assertEqual(resource_id, getattr(s, 'resource_id')) self.assertEqual('%', getattr(s, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(s, 'type')) self.assertEqual(expected[i], getattr(s, 'volume')) def test_arithmetic_transformer(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [50.0] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_expr_empty(self): expression = '' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_expr_misconfigured(self): expression = '512.0 * 3' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_nan(self): expression = 'float(\'nan\') * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_exception(self): expression = '$(memory) / 0' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_multiple_samples(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=2048.0), dict(name='memory.usage', volume=512.0), dict(name='memory', volume=1024.0), ] expected = [25.0] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_missing(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [dict(name='memory.usage', volume=512.0)] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_more_than_needed(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), dict(name='cpu_util', volume=90.0), ] expected = [50.0] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_cache_cleared(self): transformer_cfg = [ { 'name': 'arithmetic', 'parameters': { 'target': {'name': 'new_meter', 'expr': '$(memory.usage) + 2'} } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['memory.usage']) counter = sample.Sample( name='memory.usage', type=sample.TYPE_GAUGE, volume=1024.0, unit='MB', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata=None ) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, [counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.flush(None) self.assertEqual(1, len(publisher.samples)) self.assertEqual(1026.0, publisher.samples[0].volume) pipe.flush(None) self.assertEqual(1, len(publisher.samples)) counter.volume = 2048.0 pipe.publish_data(None, [counter]) pipe.flush(None) self.assertEqual(2, len(publisher.samples)) self.assertEqual(2050.0, publisher.samples[1].volume) def test_aggregator_timed_flush_no_matching_samples(self): timeutils.set_time_override() transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 900, 'retention_time': 60}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['unrelated-sample']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) timeutils.advance_time_seconds(200) pipe = pipeline_manager.pipelines[0] pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) def _do_test_delta(self, data, expected, growth_only=False): transformer_cfg = [ { 'name': 'delta', 'parameters': { 'target': {'name': 'new_meter'}, 'growth_only': growth_only, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('counters', ['cpu']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[0] pipe.publish_data(None, data) pipe.flush(None) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(expected, len(publisher.samples)) return publisher.samples def test_delta_transformer(self): samples = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=26, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=16, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=53, unit='ns', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] deltas = self._do_test_delta(samples, 2) self.assertEqual('new_meter', deltas[0].name) self.assertEqual('delta', deltas[0].type) self.assertEqual('ns', deltas[0].unit) self.assertEqual({'version': '2.0'}, deltas[0].resource_metadata) self.assertEqual(-10, deltas[0].volume) self.assertEqual('new_meter', deltas[1].name) self.assertEqual('delta', deltas[1].type) self.assertEqual('ns', deltas[1].unit) self.assertEqual({'version': '1.0'}, deltas[1].resource_metadata) self.assertEqual(37, deltas[1].volume) def test_delta_transformer_out_of_order(self): samples = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=26, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=16, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=((timeutils.utcnow() - datetime.timedelta(minutes=5)) .isoformat()), resource_metadata={'version': '2.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=53, unit='ns', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] deltas = self._do_test_delta(samples, 1) self.assertEqual('new_meter', deltas[0].name) self.assertEqual('delta', deltas[0].type) self.assertEqual('ns', deltas[0].unit) self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata) self.assertEqual(27, deltas[0].volume) def test_delta_transformer_growth_only(self): samples = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=26, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=16, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=53, unit='ns', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] deltas = self._do_test_delta(samples, 1, True) self.assertEqual('new_meter', deltas[0].name) self.assertEqual('delta', deltas[0].type) self.assertEqual('ns', deltas[0].unit) self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata) self.assertEqual(37, deltas[0].volume) def test_unique_pipeline_names(self): self._dup_pipeline_name_cfg() self._exception_create_pipelinemanager() def test_get_pipeline_grouping_key(self): transformer_cfg = [ { 'name': 'update', 'parameters': {} }, { 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }, { 'name': 'update', 'parameters': {} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertEqual(set(['resource_id', 'counter_name']), set(pipeline.get_pipeline_grouping_key( pipeline_manager.pipelines[0]))) def test_get_pipeline_duplicate_grouping_key(self): transformer_cfg = [ { 'name': 'update', 'parameters': {} }, { 'name': 'update', 'parameters': {} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) self.assertEqual(['counter_name'], pipeline.get_pipeline_grouping_key( pipeline_manager.pipelines[0])) ceilometer-6.0.0/ceilometer/tests/tempest/0000775000567000056710000000000012701406364021764 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/tempest/plugin.py0000664000567000056710000000310312701406223023623 0ustar jenkinsjenkins00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from tempest import config from tempest.test_discover import plugins import ceilometer from ceilometer.tests.tempest import config as tempest_config class CeilometerTempestPlugin(plugins.TempestPlugin): def load_tests(self): base_path = os.path.split(os.path.dirname( os.path.abspath(ceilometer.__file__)))[0] test_dir = "ceilometer/tests/tempest" full_test_dir = os.path.join(base_path, test_dir) return full_test_dir, base_path def register_opts(self, conf): config.register_opt_group(conf, tempest_config.service_available_group, tempest_config.ServiceAvailableGroup) config.register_opt_group(conf, tempest_config.telemetry_group, tempest_config.TelemetryGroup) def get_opt_lists(self): return [(tempest_config.telemetry_group.name, tempest_config.TelemetryGroup)] ceilometer-6.0.0/ceilometer/tests/tempest/__init__.py0000664000567000056710000000000012701406223024055 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/tempest/config.py0000664000567000056710000000315512701406223023601 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg service_available_group = cfg.OptGroup(name="service_available", title="Available OpenStack Services") ServiceAvailableGroup = [ cfg.BoolOpt('ceilometer_plugin', default=True, help="Whether or not Ceilometer is expected to be available"), ] telemetry_group = cfg.OptGroup(name='telemetry_plugin', title='Telemetry Service Options') TelemetryGroup = [ cfg.StrOpt('catalog_type', default='metering', help="Catalog type of the Telemetry service."), cfg.StrOpt('endpoint_type', default='publicURL', choices=['public', 'admin', 'internal', 'publicURL', 'adminURL', 'internalURL'], help="The endpoint type to use for the telemetry service."), cfg.BoolOpt('event_enabled', default=True, help="Runs Ceilometer event-related tests"), ] ceilometer-6.0.0/ceilometer/tests/tempest/service/0000775000567000056710000000000012701406364023424 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/tempest/service/__init__.py0000664000567000056710000000000012701406223025515 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/tempest/service/client.py0000664000567000056710000001562312701406223025255 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils as json from six.moves.urllib import parse as urllib from tempest import config from tempest.lib.common import rest_client from tempest.lib.services.compute.flavors_client import FlavorsClient from tempest.lib.services.compute.floating_ips_client import FloatingIPsClient from tempest.lib.services.compute.networks_client import NetworksClient from tempest.lib.services.compute.servers_client import ServersClient from tempest import manager from tempest.services.image.v1.json.images_client import ImagesClient from tempest.services.image.v2.json.images_client import ImagesClientV2 from tempest.services.object_storage.container_client import ContainerClient from tempest.services.object_storage.object_client import ObjectClient CONF = config.CONF class TelemetryClient(rest_client.RestClient): version = '2' uri_prefix = "v2" def deserialize(self, body): return json.loads(body.replace("\n", "")) def serialize(self, body): return json.dumps(body) def create_sample(self, meter_name, sample_list): uri = "%s/meters/%s" % (self.uri_prefix, meter_name) body = self.serialize(sample_list) resp, body = self.post(uri, body) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBody(resp, body) def _helper_list(self, uri, query=None, period=None): uri_dict = {} if query: uri_dict = {'q.field': query[0], 'q.op': query[1], 'q.value': query[2]} if period: uri_dict['period'] = period if uri_dict: uri += "?%s" % urllib.urlencode(uri_dict) resp, body = self.get(uri) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBodyList(resp, body) def list_resources(self, query=None): uri = '%s/resources' % self.uri_prefix return self._helper_list(uri, query) def list_meters(self, query=None): uri = '%s/meters' % self.uri_prefix return self._helper_list(uri, query) def list_statistics(self, meter, period=None, query=None): uri = "%s/meters/%s/statistics" % (self.uri_prefix, meter) return self._helper_list(uri, query, period) def list_samples(self, meter_id, query=None): uri = '%s/meters/%s' % (self.uri_prefix, meter_id) return self._helper_list(uri, query) def list_events(self, query=None): uri = '%s/events' % self.uri_prefix return self._helper_list(uri, query) def show_resource(self, resource_id): uri = '%s/resources/%s' % (self.uri_prefix, resource_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBody(resp, body) class Manager(manager.Manager): load_clients = [ 'servers_client', 'compute_networks_client', 'compute_floating_ips_client', 'flavors_client', 'image_client', 'image_client_v2', 'telemetry_client', 'container_client', 'object_client', ] default_params = { 'disable_ssl_certificate_validation': CONF.identity.disable_ssl_certificate_validation, 'ca_certs': CONF.identity.ca_certificates_file, 'trace_requests': CONF.debug.trace_requests } compute_params = { 'service': CONF.compute.catalog_type, 'region': CONF.compute.region or CONF.identity.region, 'endpoint_type': CONF.compute.endpoint_type, 'build_interval': CONF.compute.build_interval, 'build_timeout': CONF.compute.build_timeout, } compute_params.update(default_params) image_params = { 'catalog_type': CONF.image.catalog_type, 'region': CONF.image.region or CONF.identity.region, 'endpoint_type': CONF.image.endpoint_type, 'build_interval': CONF.image.build_interval, 'build_timeout': CONF.image.build_timeout, } image_params.update(default_params) telemetry_params = { 'service': CONF.telemetry_plugin.catalog_type, 'region': CONF.identity.region, 'endpoint_type': CONF.telemetry_plugin.endpoint_type, } telemetry_params.update(default_params) object_storage_params = { 'service': CONF.object_storage.catalog_type, 'region': CONF.object_storage.region or CONF.identity.region, 'endpoint_type': CONF.object_storage.endpoint_type } object_storage_params.update(default_params) def __init__(self, credentials=None, service=None): super(Manager, self).__init__(credentials) for client in self.load_clients: getattr(self, 'set_%s' % client)() def set_servers_client(self): self.servers_client = ServersClient(self.auth_provider, **self.compute_params) def set_compute_networks_client(self): self.compute_networks_client = NetworksClient(self.auth_provider, **self.compute_params) def set_compute_floating_ips_client(self): self.compute_floating_ips_client = FloatingIPsClient( self.auth_provider, **self.compute_params) def set_flavors_client(self): self.flavors_client = FlavorsClient(self.auth_provider, **self.compute_params) def set_image_client(self): self.image_client = ImagesClient(self.auth_provider, **self.image_params) def set_image_client_v2(self): self.image_client_v2 = ImagesClientV2(self.auth_provider, **self.image_params) def set_telemetry_client(self): self.telemetry_client = TelemetryClient(self.auth_provider, **self.telemetry_params) def set_container_client(self): self.container_client = ContainerClient(self.auth_provider, **self.object_storage_params) def set_object_client(self): self.object_client = ObjectClient(self.auth_provider, **self.object_storage_params) ceilometer-6.0.0/ceilometer/tests/tempest/api/0000775000567000056710000000000012701406364022535 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/tempest/api/__init__.py0000664000567000056710000000000012701406223024626 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/tempest/api/base.py0000664000567000056710000001300412701406223024011 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_utils import timeutils from tempest.common import compute from tempest.common.utils import data_utils from tempest import config from tempest import exceptions from tempest.lib import exceptions as lib_exc import tempest.test from ceilometer.tests.tempest.service import client CONF = config.CONF class ClientManager(client.Manager): load_clients = [ 'servers_client', 'compute_networks_client', 'compute_floating_ips_client', 'flavors_client', 'image_client', 'image_client_v2', 'telemetry_client', ] class BaseTelemetryTest(tempest.test.BaseTestCase): """Base test case class for all Telemetry API tests.""" credentials = ['primary'] client_manager = ClientManager @classmethod def skip_checks(cls): super(BaseTelemetryTest, cls).skip_checks() if not CONF.service_available.ceilometer_plugin: raise cls.skipException("Ceilometer support is required") @classmethod def setup_credentials(cls): cls.set_network_resources() super(BaseTelemetryTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseTelemetryTest, cls).setup_clients() cls.telemetry_client = cls.os_primary.telemetry_client cls.servers_client = cls.os_primary.servers_client cls.flavors_client = cls.os_primary.flavors_client cls.image_client = cls.os_primary.image_client cls.image_client_v2 = cls.os_primary.image_client_v2 @classmethod def resource_setup(cls): super(BaseTelemetryTest, cls).resource_setup() cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size', 'disk.ephemeral.size'] cls.glance_notifications = ['image.size'] cls.glance_v2_notifications = ['image.download', 'image.serve'] cls.server_ids = [] cls.image_ids = [] @classmethod def create_server(cls): tenant_network = cls.get_tenant_network() body, server = compute.create_test_server( cls.os_primary, tenant_network=tenant_network, name=data_utils.rand_name('ceilometer-instance'), wait_until='ACTIVE') cls.server_ids.append(body['id']) return body @classmethod def create_image(cls, client, **kwargs): body = client.create_image(name=data_utils.rand_name('image'), container_format='bare', disk_format='raw', **kwargs) # TODO(jswarren) Move ['image'] up to initial body value assignment # once both v1 and v2 glance clients include the full response # object. if 'image' in body: body = body['image'] cls.image_ids.append(body['id']) return body @staticmethod def cleanup_resources(method, list_of_ids): for resource_id in list_of_ids: try: method(resource_id) except lib_exc.NotFound: pass @classmethod def resource_cleanup(cls): cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids) cls.cleanup_resources(cls.image_client.delete_image, cls.image_ids) super(BaseTelemetryTest, cls).resource_cleanup() def await_samples(self, metric, query): """This method is to wait for sample to add it to database. There are long time delays when using Postgresql (or Mysql) database as ceilometer backend """ timeout = CONF.compute.build_timeout start = timeutils.utcnow() while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout: body = self.telemetry_client.list_samples(metric, query) if body: return body time.sleep(CONF.compute.build_interval) raise exceptions.TimeoutException( 'Sample for metric:%s with query:%s has not been added to the ' 'database within %d seconds' % (metric, query, CONF.compute.build_timeout)) class BaseTelemetryAdminTest(BaseTelemetryTest): """Base test case class for admin Telemetry API tests.""" credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseTelemetryAdminTest, cls).setup_clients() cls.telemetry_admin_client = cls.os_adm.telemetry_client def await_events(self, query): timeout = CONF.compute.build_timeout start = timeutils.utcnow() while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout: body = self.telemetry_admin_client.list_events(query) if body: return body time.sleep(CONF.compute.build_interval) raise exceptions.TimeoutException( 'Event with query:%s has not been added to the ' 'database within %d seconds' % (query, CONF.compute.build_timeout)) ceilometer-6.0.0/ceilometer/tests/tempest/api/test_telemetry_notification_api.py0000664000567000056710000000570312701406223031556 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Change-Id: I14e16a1a7d9813b324ee40545c07f0e88fb637b7 import testtools from ceilometer.tests.tempest.api import base from tempest import config from tempest.lib import decorators from tempest import test CONF = config.CONF class TelemetryNotificationAPITest(base.BaseTelemetryTest): @test.idempotent_id('d7f8c1c8-d470-4731-8604-315d3956caae') @test.services('compute') def test_check_nova_notification(self): body = self.create_server() query = ('resource', 'eq', body['id']) for metric in self.nova_notifications: self.await_samples(metric, query) @test.attr(type="smoke") @test.idempotent_id('04b10bfe-a5dc-47af-b22f-0460426bf499') @test.services("image") @testtools.skipIf(not CONF.image_feature_enabled.api_v1, "Glance api v1 is disabled") def test_check_glance_v1_notifications(self): body = self.create_image(self.image_client, is_public=False) self.image_client.update_image(body['id'], data='data') query = 'resource', 'eq', body['id'] self.image_client.delete_image(body['id']) for metric in self.glance_notifications: self.await_samples(metric, query) @test.attr(type="smoke") @test.idempotent_id('c240457d-d943-439b-8aea-85e26d64fe8f') @test.services("image") @testtools.skipIf(not CONF.image_feature_enabled.api_v2, "Glance api v2 is disabled") def test_check_glance_v2_notifications(self): body = self.create_image(self.image_client_v2, visibility='private') self.image_client_v2.store_image_file(body['id'], "file") self.image_client_v2.show_image_file(body['id']) query = 'resource', 'eq', body['id'] for metric in self.glance_v2_notifications: self.await_samples(metric, query) class TelemetryNotificationAdminAPITest(base.BaseTelemetryAdminTest): @test.idempotent_id('29604198-8b45-4fc0-8af8-1cae4f94ebea') @test.services('compute') @decorators.skip_because(bug='1480490') def test_check_nova_notification_event_and_meter(self): body = self.create_server() if CONF.telemetry_plugin.event_enabled: query = ('instance_id', 'eq', body['id']) self.await_events(query) query = ('resource', 'eq', body['id']) for metric in self.nova_notifications: self.await_samples(metric, query) ceilometer-6.0.0/ceilometer/tests/tempest/scenario/0000775000567000056710000000000012701406364023567 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/tempest/scenario/__init__.py0000664000567000056710000000000012701406223025660 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py0000664000567000056710000001324012701406223034453 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from tempest.common.utils import data_utils from tempest import config from tempest import test from ceilometer.tests.tempest.service import client CONF = config.CONF LOG = logging.getLogger(__name__) # Loop for up to 120 seconds waiting on notifications # NOTE(chdent): The choice of 120 seconds is fairly # arbitrary: Long enough to give the notifications the # chance to travel across a highly latent bus but not # so long as to allow excessive latency to never be visible. # TODO(chdent): Ideally this value would come from configuration. NOTIFICATIONS_WAIT = 120 NOTIFICATIONS_SLEEP = 1 class ClientManager(client.Manager): load_clients = [ 'telemetry_client', 'container_client', 'object_client', ] class TestObjectStorageTelemetry(test.BaseTestCase): """Test that swift uses the ceilometer middleware. * create container. * upload a file to the created container. * retrieve the file from the created container. * wait for notifications from ceilometer. """ credentials = ['primary'] client_manager = ClientManager @classmethod def skip_checks(cls): super(TestObjectStorageTelemetry, cls).skip_checks() if not CONF.service_available.swift: skip_msg = ("%s skipped as swift is not available" % cls.__name__) raise cls.skipException(skip_msg) if not CONF.service_available.ceilometer_plugin: skip_msg = ("%s skipped as ceilometer is not available" % cls.__name__) raise cls.skipException(skip_msg) @classmethod def setup_credentials(cls): cls.set_network_resources() super(TestObjectStorageTelemetry, cls).setup_credentials() @classmethod def setup_clients(cls): super(TestObjectStorageTelemetry, cls).setup_clients() cls.telemetry_client = cls.os_primary.telemetry_client cls.container_client = cls.os_primary.container_client cls.object_client = cls.os_primary.object_client def _confirm_notifications(self, container_name, obj_name): # NOTE: Loop seeking for appropriate notifications about the containers # and objects sent to swift. def _check_samples(): # NOTE: Return True only if we have notifications about some # containers and some objects and the notifications are about # the expected containers and objects. # Otherwise returning False will case _check_samples to be # called again. results = self.telemetry_client.list_samples( 'storage.objects.incoming.bytes') LOG.debug('got samples %s', results) # Extract container info from samples. containers, objects = [], [] for sample in results: meta = sample['resource_metadata'] if meta.get('container') and meta['container'] != 'None': containers.append(meta['container']) elif (meta.get('target.metadata:container') and meta['target.metadata:container'] != 'None'): containers.append(meta['target.metadata:container']) if meta.get('object') and meta['object'] != 'None': objects.append(meta['object']) elif (meta.get('target.metadata:object') and meta['target.metadata:object'] != 'None'): objects.append(meta['target.metadata:object']) return (container_name in containers and obj_name in objects) self.assertTrue(test.call_until_true(_check_samples, NOTIFICATIONS_WAIT, NOTIFICATIONS_SLEEP), 'Correct notifications were not received after ' '%s seconds.' % NOTIFICATIONS_WAIT) def create_container(self): name = data_utils.rand_name('swift-scenario-container') self.container_client.create_container(name) # look for the container to assure it is created self.container_client.list_container_contents(name) LOG.debug('Container %s created' % (name)) self.addCleanup(self.container_client.delete_container, name) return name def upload_object_to_container(self, container_name): obj_name = data_utils.rand_name('swift-scenario-object') obj_data = data_utils.arbitrary_string() self.object_client.create_object(container_name, obj_name, obj_data) self.addCleanup(self.object_client.delete_object, container_name, obj_name) return obj_name @test.idempotent_id('6d6b88e5-3e38-41bc-b34a-79f713a6cb85') @test.services('object_storage', 'telemetry') def test_swift_middleware_notifies(self): container_name = self.create_container() obj_name = self.upload_object_to_container(container_name) self._confirm_notifications(container_name, obj_name) ceilometer-6.0.0/ceilometer/tests/__init__.py0000664000567000056710000000000012701406223022374 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/base.py0000664000567000056710000000706712701406223021573 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test base classes. """ import functools import os.path import oslo_messaging.conffixture from oslo_utils import timeutils from oslotest import base from oslotest import mockpatch import six from testtools import testcase import webtest import ceilometer from ceilometer import messaging class BaseTestCase(base.BaseTestCase): def setup_messaging(self, conf, exchange=None): self.useFixture(oslo_messaging.conffixture.ConfFixture(conf)) conf.set_override("notification_driver", "messaging") if not exchange: exchange = 'ceilometer' conf.set_override("control_exchange", exchange) # NOTE(sileht): Ensure a new oslo.messaging driver is loaded # between each tests self.transport = messaging.get_transport("fake://", cache=False) self.useFixture(mockpatch.Patch( 'ceilometer.messaging.get_transport', return_value=self.transport)) def assertTimestampEqual(self, first, second, msg=None): """Checks that two timestamps are equals. This relies on assertAlmostEqual to avoid rounding problem, and only checks up the first microsecond values. """ return self.assertAlmostEqual( timeutils.delta_seconds(first, second), 0.0, places=5) def assertIsEmpty(self, obj): try: if len(obj) != 0: self.fail("%s is not empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) def assertIsNotEmpty(self, obj): try: if len(obj) == 0: self.fail("%s is empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) @staticmethod def path_get(project_file=None): root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', ) ) if project_file: return os.path.join(root, project_file) else: return root def _skip_decorator(func): @functools.wraps(func) def skip_if_not_implemented(*args, **kwargs): try: return func(*args, **kwargs) except ceilometer.NotImplementedError as e: raise testcase.TestSkipped(six.text_type(e)) except webtest.app.AppError as e: if 'not implemented' in six.text_type(e): raise testcase.TestSkipped(six.text_type(e)) raise return skip_if_not_implemented class SkipNotImplementedMeta(type): def __new__(cls, name, bases, local): for attr in local: value = local[attr] if callable(value) and ( attr.startswith('test_') or attr == 'setUp'): local[attr] = _skip_decorator(value) return type.__new__(cls, name, bases, local) ceilometer-6.0.0/ceilometer/tests/db.py0000664000567000056710000002116312701406223021237 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for API tests.""" import os import uuid import warnings import fixtures import mock from oslo_config import fixture as fixture_config from oslotest import mockpatch import six from six.moves.urllib import parse as urlparse import sqlalchemy from testtools import testcase from ceilometer import storage from ceilometer.tests import base as test_base try: from ceilometer.tests import mocks except ImportError: mocks = None # happybase module is not Python 3 compatible yet class MongoDbManager(fixtures.Fixture): def __init__(self, url): self._url = url def setUp(self): super(MongoDbManager, self).setUp() with warnings.catch_warnings(): warnings.filterwarnings( action='ignore', message='.*you must provide a username and password.*') try: self.connection = storage.get_connection( self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( self.url, 'ceilometer.event.storage') except storage.StorageBadVersion as e: raise testcase.TestSkipped(six.text_type(e)) @property def url(self): return '%(url)s_%(db)s' % { 'url': self._url, 'db': uuid.uuid4().hex } class SQLManager(fixtures.Fixture): def __init__(self, url): db_name = 'ceilometer_%s' % uuid.uuid4().hex engine = sqlalchemy.create_engine(url) conn = engine.connect() self._create_database(conn, db_name) conn.close() engine.dispose() parsed = list(urlparse.urlparse(url)) parsed[2] = '/' + db_name self.url = urlparse.urlunparse(parsed) def setUp(self): super(SQLManager, self).setUp() self.connection = storage.get_connection( self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( self.url, 'ceilometer.event.storage') class PgSQLManager(SQLManager): @staticmethod def _create_database(conn, db_name): conn.connection.set_isolation_level(0) conn.execute('CREATE DATABASE %s WITH TEMPLATE template0;' % db_name) conn.connection.set_isolation_level(1) class MySQLManager(SQLManager): @staticmethod def _create_database(conn, db_name): conn.execute('CREATE DATABASE %s;' % db_name) class ElasticSearchManager(fixtures.Fixture): def __init__(self, url): self.url = url def setUp(self): super(ElasticSearchManager, self).setUp() self.connection = storage.get_connection( 'sqlite://', 'ceilometer.metering.storage') self.event_connection = storage.get_connection( self.url, 'ceilometer.event.storage') # prefix each test with unique index name self.event_connection.index_name = 'events_%s' % uuid.uuid4().hex # force index on write so data is queryable right away self.event_connection._refresh_on_write = True class HBaseManager(fixtures.Fixture): def __init__(self, url): self._url = url def setUp(self): super(HBaseManager, self).setUp() self.connection = storage.get_connection( self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( self.url, 'ceilometer.event.storage') # Unique prefix for each test to keep data is distinguished because # all test data is stored in one table data_prefix = str(uuid.uuid4().hex) def table(conn, name): return mocks.MockHBaseTable(name, conn, data_prefix) # Mock only real HBase connection, MConnection "table" method # stays origin. mock.patch('happybase.Connection.table', new=table).start() # We shouldn't delete data and tables after each test, # because it last for too long. # All tests tables will be deleted in setup-test-env.sh mock.patch("happybase.Connection.disable_table", new=mock.MagicMock()).start() mock.patch("happybase.Connection.delete_table", new=mock.MagicMock()).start() mock.patch("happybase.Connection.create_table", new=mock.MagicMock()).start() @property def url(self): return '%s?table_prefix=%s&table_prefix_separator=%s' % ( self._url, os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test"), os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX_SEPARATOR", "_") ) class SQLiteManager(fixtures.Fixture): def __init__(self, url): self.url = url def setUp(self): super(SQLiteManager, self).setUp() self.connection = storage.get_connection( self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( self.url, 'ceilometer.event.storage') @six.add_metaclass(test_base.SkipNotImplementedMeta) class TestBase(test_base.BaseTestCase): DRIVER_MANAGERS = { 'mongodb': MongoDbManager, 'mysql': MySQLManager, 'postgresql': PgSQLManager, 'db2': MongoDbManager, 'sqlite': SQLiteManager, 'es': ElasticSearchManager, } if mocks is not None: DRIVER_MANAGERS['hbase'] = HBaseManager def setUp(self): super(TestBase, self).setUp() db_url = os.environ.get('OVERTEST_URL', "sqlite://").replace( "mysql://", "mysql+pymysql://") engine = urlparse.urlparse(db_url).scheme # in case some drivers have additional specification, for example: # PyMySQL will have scheme mysql+pymysql engine = engine.split('+')[0] # NOTE(Alexei_987) Shortcut to skip expensive db setUp test_method = self._get_test_method() if (hasattr(test_method, '_run_with') and engine not in test_method._run_with): raise testcase.TestSkipped( 'Test is not applicable for %s' % engine) self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF([], project='ceilometer', validate_default_values=True) manager = self.DRIVER_MANAGERS.get(engine) if not manager: self.skipTest("missing driver manager: %s" % engine) self.db_manager = manager(db_url) self.useFixture(self.db_manager) self.conn = self.db_manager.connection self.conn.upgrade() self.event_conn = self.db_manager.event_connection self.event_conn.upgrade() self.useFixture(mockpatch.Patch('ceilometer.storage.get_connection', side_effect=self._get_connection)) # Set a default location for the pipeline config file so the # tests work even if ceilometer is not installed globally on # the system. self.CONF.import_opt('pipeline_cfg_file', 'ceilometer.pipeline') self.CONF.set_override( 'pipeline_cfg_file', self.path_get('etc/ceilometer/pipeline.yaml') ) def tearDown(self): self.event_conn.clear() self.event_conn = None self.conn.clear() self.conn = None super(TestBase, self).tearDown() def _get_connection(self, url, namespace): if namespace == "ceilometer.event.storage": return self.event_conn return self.conn def run_with(*drivers): """Used to mark tests that are only applicable for certain db driver. Skips test if driver is not available. """ def decorator(test): if isinstance(test, type) and issubclass(test, TestBase): # Decorate all test methods for attr in dir(test): value = getattr(test, attr) if callable(value) and attr.startswith('test_'): if six.PY3: value._run_with = drivers else: value.__func__._run_with = drivers else: test._run_with = drivers return test return decorator ceilometer-6.0.0/ceilometer/tests/mocks.py0000664000567000056710000000667412701406223022000 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import happybase class MockHBaseTable(happybase.Table): def __init__(self, name, connection, data_prefix): # data_prefix is added to all rows which are written # in this test. It allows to divide data from different tests self.data_prefix = data_prefix # We create happybase Table with prefix from # CEILOMETER_TEST_HBASE_TABLE_PREFIX prefix = os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", 'test') separator = os.getenv( "CEILOMETER_TEST_HBASE_TABLE_PREFIX_SEPARATOR", '_') super(MockHBaseTable, self).__init__( "%s%s%s" % (prefix, separator, name), connection) def put(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).put(row, *args, **kwargs) def scan(self, row_start=None, row_stop=None, row_prefix=None, columns=None, filter=None, timestamp=None, include_timestamp=False, batch_size=10, scan_batching=None, limit=None, sorted_columns=False): # Add data prefix for row parameters # row_prefix could not be combined with row_start or row_stop if not row_start and not row_stop: row_prefix = self.data_prefix + (row_prefix or "") row_start = None row_stop = None elif row_start and not row_stop: # Adding data_prefix to row_start and row_stop does not work # if it looks like row_start = %data_prefix%foo, # row_stop = %data_prefix, because row_start > row_stop filter = self._update_filter_row(filter) row_start = self.data_prefix + row_start else: row_start = self.data_prefix + (row_start or "") row_stop = self.data_prefix + (row_stop or "") gen = super(MockHBaseTable, self).scan(row_start, row_stop, row_prefix, columns, filter, timestamp, include_timestamp, batch_size, scan_batching, limit, sorted_columns) data_prefix_len = len(self.data_prefix) # Restore original row format for row, data in gen: yield (row[data_prefix_len:], data) def row(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).row(row, *args, **kwargs) def delete(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).delete(row, *args, **kwargs) def _update_filter_row(self, filter): if filter: return "PrefixFilter(%s) AND %s" % (self.data_prefix, filter) else: return "PrefixFilter(%s)" % self.data_prefix ceilometer-6.0.0/ceilometer/tests/unit/0000775000567000056710000000000012701406364021262 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/test_coordination.py0000664000567000056710000002441412701406223025362 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import mock from oslo_config import fixture as fixture_config import tooz.coordination from ceilometer import coordination from ceilometer.tests import base from ceilometer import utils class MockToozCoordinator(object): def __init__(self, member_id, shared_storage): self._member_id = member_id self._groups = shared_storage self.is_started = False def start(self): self.is_started = True def stop(self): pass def heartbeat(self): pass def create_group(self, group_id): if group_id in self._groups: return MockAsyncError( tooz.coordination.GroupAlreadyExist(group_id)) self._groups[group_id] = {} return MockAsyncResult(None) def join_group(self, group_id, capabilities=b''): if group_id not in self._groups: return MockAsyncError( tooz.coordination.GroupNotCreated(group_id)) if self._member_id in self._groups[group_id]: return MockAsyncError( tooz.coordination.MemberAlreadyExist(group_id, self._member_id)) self._groups[group_id][self._member_id] = { "capabilities": capabilities, } return MockAsyncResult(None) def leave_group(self, group_id): return MockAsyncResult(None) def get_members(self, group_id): if group_id not in self._groups: return MockAsyncError( tooz.coordination.GroupNotCreated(group_id)) return MockAsyncResult(self._groups[group_id]) class MockToozCoordExceptionRaiser(MockToozCoordinator): def start(self): raise tooz.coordination.ToozError('error') def heartbeat(self): raise tooz.coordination.ToozError('error') def join_group(self, group_id, capabilities=b''): raise tooz.coordination.ToozError('error') def get_members(self, group_id): raise tooz.coordination.ToozError('error') class MockToozCoordExceptionOnJoinRaiser(MockToozCoordinator): def __init__(self, member_id, shared_storage, retry_count=None): super(MockToozCoordExceptionOnJoinRaiser, self).__init__(member_id, shared_storage) self.tooz_error_count = retry_count self.count = 0 def join_group(self, group_id, capabilities=b''): if self.count == self.tooz_error_count: return MockAsyncResult(None) else: self.count += 1 raise tooz.coordination.ToozError('error') class MockAsyncResult(tooz.coordination.CoordAsyncResult): def __init__(self, result): self.result = result def get(self, timeout=0): return self.result @staticmethod def done(): return True class MockAsyncError(tooz.coordination.CoordAsyncResult): def __init__(self, error): self.error = error def get(self, timeout=0): raise self.error @staticmethod def done(): return True class MockLoggingHandler(logging.Handler): """Mock logging handler to check for expected logs.""" def __init__(self, *args, **kwargs): self.reset() logging.Handler.__init__(self, *args, **kwargs) def emit(self, record): self.messages[record.levelname.lower()].append(record.getMessage()) def reset(self): self.messages = {'debug': [], 'info': [], 'warning': [], 'error': [], 'critical': []} class TestPartitioning(base.BaseTestCase): def setUp(self): super(TestPartitioning, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.str_handler = MockLoggingHandler() coordination.LOG.logger.addHandler(self.str_handler) self.shared_storage = {} def _get_new_started_coordinator(self, shared_storage, agent_id=None, coordinator_cls=None, retry_count=None): coordinator_cls = coordinator_cls or MockToozCoordinator self.CONF.set_override('backend_url', 'xxx://yyy', group='coordination') with mock.patch('tooz.coordination.get_coordinator', lambda _, member_id: coordinator_cls(member_id, shared_storage, retry_count) if retry_count else coordinator_cls(member_id, shared_storage)): pc = coordination.PartitionCoordinator(agent_id) pc.start() return pc def _usage_simulation(self, *agents_kwargs): partition_coordinators = [] for kwargs in agents_kwargs: partition_coordinator = self._get_new_started_coordinator( self.shared_storage, kwargs['agent_id'], kwargs.get( 'coordinator_cls')) partition_coordinator.join_group(kwargs['group_id']) partition_coordinators.append(partition_coordinator) for i, kwargs in enumerate(agents_kwargs): all_resources = kwargs.get('all_resources', []) expected_resources = kwargs.get('expected_resources', []) actual_resources = partition_coordinators[i].extract_my_subset( kwargs['group_id'], all_resources) self.assertEqual(expected_resources, actual_resources) def test_single_group(self): agents = [dict(agent_id='agent1', group_id='group'), dict(agent_id='agent2', group_id='group')] self._usage_simulation(*agents) self.assertEqual(['group'], sorted(self.shared_storage.keys())) self.assertEqual(['agent1', 'agent2'], sorted(self.shared_storage['group'].keys())) def test_multiple_groups(self): agents = [dict(agent_id='agent1', group_id='group1'), dict(agent_id='agent2', group_id='group2')] self._usage_simulation(*agents) self.assertEqual(['group1', 'group2'], sorted(self.shared_storage.keys())) def test_partitioning(self): all_resources = ['resource_%s' % i for i in range(1000)] agents = ['agent_%s' % i for i in range(10)] expected_resources = [list() for _ in range(len(agents))] hr = utils.HashRing(agents) for r in all_resources: key = agents.index(hr.get_node(r)) expected_resources[key].append(r) agents_kwargs = [] for i, agent in enumerate(agents): agents_kwargs.append(dict(agent_id=agent, group_id='group', all_resources=all_resources, expected_resources=expected_resources[i])) self._usage_simulation(*agents_kwargs) def test_coordination_backend_offline(self): agents = [dict(agent_id='agent1', group_id='group', all_resources=['res1', 'res2'], expected_resources=[], coordinator_cls=MockToozCoordExceptionRaiser)] self._usage_simulation(*agents) expected_errors = ['Error getting group membership info from ' 'coordination backend.', 'Error connecting to coordination backend.'] for e in expected_errors: self.assertIn(e, self.str_handler.messages['error']) def test_coordination_backend_connection_fail_on_join(self): coord = self._get_new_started_coordinator( {'group'}, 'agent1', MockToozCoordExceptionOnJoinRaiser, retry_count=2) with mock.patch('tooz.coordination.get_coordinator', return_value=MockToozCoordExceptionOnJoinRaiser): coord.join_group(group_id='group') expected_errors = ['Error joining partitioning group group,' ' re-trying', 'Error joining partitioning group group,' ' re-trying'] self.assertEqual(expected_errors, self.str_handler.messages['error']) def test_reconnect(self): coord = self._get_new_started_coordinator({}, 'a', MockToozCoordExceptionRaiser) with mock.patch('tooz.coordination.get_coordinator', return_value=MockToozCoordExceptionRaiser('a', {})): coord.heartbeat() expected_errors = ['Error connecting to coordination backend.', 'Error sending a heartbeat to coordination ' 'backend.'] for e in expected_errors: self.assertIn(e, self.str_handler.messages['error']) self.str_handler.messages['error'] = [] with mock.patch('tooz.coordination.get_coordinator', return_value=MockToozCoordinator('a', {})): coord.heartbeat() for e in expected_errors: self.assertNotIn(e, self.str_handler.messages['error']) def test_group_id_none(self): coord = self._get_new_started_coordinator({}, 'a') self.assertTrue(coord._coordinator.is_started) with mock.patch.object(coord._coordinator, 'join_group') as mocked: coord.join_group(None) self.assertEqual(0, mocked.call_count) with mock.patch.object(coord._coordinator, 'leave_group') as mocked: coord.leave_group(None) self.assertEqual(0, mocked.call_count) def test_stop(self): coord = self._get_new_started_coordinator({}, 'a') self.assertTrue(coord._coordinator.is_started) coord.join_group("123") coord.stop() self.assertIsEmpty(coord._groups) self.assertIsNone(coord._coordinator) ceilometer-6.0.0/ceilometer/tests/unit/meter/0000775000567000056710000000000012701406364022376 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/meter/__init__.py0000664000567000056710000000000012701406223024467 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/meter/test_notifications.py0000664000567000056710000007673512701406223026674 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer.meter.notifications """ import copy import mock import os import six import yaml from oslo_config import fixture as fixture_config from oslo_utils import encodeutils from oslo_utils import fileutils import ceilometer from ceilometer import declarative from ceilometer.meter import notifications from ceilometer import service as ceilometer_service from ceilometer.tests import base as test NOTIFICATION = { 'event_type': u'test.create', 'timestamp': u'2015-06-1909: 19: 35.786893', 'payload': {u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', u'resource_id': u'bea70e51c7340cb9d555b15cbfcaec23', u'timestamp': u'2015-06-19T09:19:35.785330', u'created_at': u'2015-06-19T09:25:35.785330', u'launched_at': u'2015-06-19T09:25:40.785330', u'message_signature': u'fake_signature1', u'resource_metadata': {u'foo': u'bar'}, u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', u'volume': 1.0, u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', }, u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e', 'publisher_id': "foo123" } MIDDLEWARE_EVENT = { u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', u'_context_quota_class': None, u'event_type': u'objectstore.http.request', u'_context_service_catalog': [], u'_context_auth_token': None, u'_context_user_id': None, u'priority': u'INFO', u'_context_is_admin': True, u'_context_user': None, u'publisher_id': u'ceilometermiddleware', u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', u'_context_remote_address': None, u'_context_roles': [], u'timestamp': u'2013-07-29 06:51:34.474815', u'_context_timestamp': u'2013-07-29T06:51:34.348091', u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', u'_context_project_name': None, u'_context_read_deleted': u'no', u'_context_tenant': None, u'_context_instance_lock_checked': False, u'_context_project_id': None, u'_context_user_name': None, u'payload': { 'typeURI': 'http: //schemas.dmtf.org/cloud/audit/1.0/event', 'eventTime': '2015-01-30T16: 38: 43.233621', 'target': { 'action': 'get', 'typeURI': 'service/storage/object', 'id': 'account', 'metadata': { 'path': '/1.0/CUSTOM_account/container/obj', 'version': '1.0', 'container': 'container', 'object': 'obj' } }, 'observer': { 'id': 'target' }, 'eventType': 'activity', 'measurements': [ { 'metric': { 'metricId': 'openstack: uuid', 'name': 'storage.objects.outgoing.bytes', 'unit': 'B' }, 'result': 28 }, { 'metric': { 'metricId': 'openstack: uuid2', 'name': 'storage.objects.incoming.bytes', 'unit': 'B' }, 'result': 1 } ], 'initiator': { 'typeURI': 'service/security/account/user', 'project_id': None, 'id': 'openstack: 288f6260-bf37-4737-a178-5038c84ba244' }, 'action': 'read', 'outcome': 'success', 'id': 'openstack: 69972bb6-14dd-46e4-bdaf-3148014363dc' } } FULL_MULTI_MSG = { u'_context_domain': None, u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', 'event_type': u'full.sample', 'timestamp': u'2015-06-1909: 19: 35.786893', u'_context_auth_token': None, u'_context_read_only': False, 'payload': [{ u'counter_name': u'instance1', u'user_id': u'user1', u'resource_id': u'res1', u'counter_unit': u'ns', u'counter_volume': 28.0, u'project_id': u'proj1', u'counter_type': u'gauge' }, { u'counter_name': u'instance2', u'user_id': u'user2', u'resource_id': u'res2', u'counter_unit': u'%', u'counter_volume': 1.0, u'project_id': u'proj2', u'counter_type': u'delta' }], u'_context_resource_uuid': None, u'_context_user_identity': u'fake_user_identity---', u'_context_show_deleted': False, u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', 'priority': 'info', u'_context_is_admin': True, u'_context_project_domain': None, u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', u'_context_user_domain': None, 'publisher_id': u'ceilometer.api', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' } METRICS_UPDATE = { u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', u'_context_quota_class': None, u'event_type': u'compute.metrics.update', u'_context_service_catalog': [], u'_context_auth_token': None, u'_context_user_id': None, u'payload': { u'metrics': [ {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.frequency', 'value': 1600, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.user.time', 'value': 17421440000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.kernel.time', 'value': 7852600000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.idle.time', 'value': 1307374400000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.iowait.time', 'value': 11697470000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.user.percent', 'value': 0.012959045637294348, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.kernel.percent', 'value': 0.005841204961898534, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.idle.percent', 'value': 0.9724985141658965, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.iowait.percent', 'value': 0.008701235234910634, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.percent', 'value': 0.027501485834103515, 'source': 'libvirt.LibvirtDriver'}], u'nodename': u'tianst.sh.intel.com', u'host': u'tianst', u'host_id': u'10.0.1.1'}, u'priority': u'INFO', u'_context_is_admin': True, u'_context_user': None, u'publisher_id': u'compute.tianst.sh.intel.com', u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', u'_context_remote_address': None, u'_context_roles': [], u'timestamp': u'2013-07-29 06:51:34.474815', u'_context_timestamp': u'2013-07-29T06:51:34.348091', u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', u'_context_project_name': None, u'_context_read_deleted': u'no', u'_context_tenant': None, u'_context_instance_lock_checked': False, u'_context_project_id': None, u'_context_user_name': None } class TestMeterDefinition(test.BaseTestCase): def test_config_definition(self): cfg = dict(name="test", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id") handler = notifications.MeterDefinition(cfg, mock.Mock()) self.assertTrue(handler.match_type("test.create")) sample = list(handler.to_samples(NOTIFICATION))[0] self.assertEqual(1.0, sample["volume"]) self.assertEqual("bea70e51c7340cb9d555b15cbfcaec23", sample["resource_id"]) self.assertEqual("30be1fc9a03c4e94ab05c403a8a377f2", sample["project_id"]) def test_config_required_missing_fields(self): cfg = dict() try: notifications.MeterDefinition(cfg, mock.Mock()) except declarative.DefinitionException as e: self.assertEqual("Required fields ['name', 'type', 'event_type'," " 'unit', 'volume', 'resource_id']" " not specified", encodeutils.exception_to_unicode(e)) def test_bad_type_cfg_definition(self): cfg = dict(name="test", type="foo", event_type="bar.create", unit="foo", volume="bar", resource_id="bea70e51c7340cb9d555b15cbfcaec23") try: notifications.MeterDefinition(cfg, mock.Mock()) except declarative.DefinitionException as e: self.assertEqual("Invalid type foo specified", encodeutils.exception_to_unicode(e)) class TestMeterProcessing(test.BaseTestCase): def setUp(self): super(TestMeterProcessing, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf ceilometer_service.prepare_service(argv=[], config_files=[]) self.handler = notifications.ProcessMeterNotifications(mock.Mock()) def test_fallback_meter_path(self): self.CONF.set_override('meter_definitions_cfg_file', '/not/existing/path', group='meter') with mock.patch('ceilometer.declarative.open', mock.mock_open(read_data='---\nmetric: []'), create=True) as mock_open: self.handler._load_definitions() if six.PY3: path = os.path.dirname(ceilometer.__file__) else: path = "ceilometer" mock_open.assert_called_with(path + "/meter/data/meters.yaml") def _load_meter_def_file(self, cfg): if six.PY3: cfg = cfg.encode('utf-8') meter_cfg_file = fileutils.write_to_tempfile(content=cfg, prefix="meters", suffix="yaml") self.CONF.set_override('meter_definitions_cfg_file', meter_cfg_file, group='meter') self.handler.definitions = self.handler._load_definitions() @mock.patch('ceilometer.meter.notifications.LOG') def test_bad_meter_definition_skip(self, LOG): cfg = yaml.dump( {'metric': [dict(name="good_test_1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="bad_test_2", type="bad_type", event_type="bar.create", unit="foo", volume="bar", resource_id="bea70e51c7340cb9d555b15cbfcaec23"), dict(name="good_test_3", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) self.assertEqual(2, len(self.handler.definitions)) LOG.error.assert_called_with( "Error loading meter definition : " "Invalid type bad_type specified") def test_jsonpath_values_parsed(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('test1', s1['name']) self.assertEqual(1.0, s1['volume']) self.assertEqual('bea70e51c7340cb9d555b15cbfcaec23', s1['resource_id']) self.assertEqual('30be1fc9a03c4e94ab05c403a8a377f2', s1['project_id']) def test_multiple_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) data = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(2, len(data)) expected_names = ['test1', 'test2'] for s in data: self.assertIn(s.as_dict()['name'], expected_names) def test_unmatched_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.update", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(0, len(c)) def test_regex_match_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) def test_default_timestamp(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", multi="name")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(MIDDLEWARE_EVENT['timestamp'], s1['timestamp']) def test_custom_timestamp(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", multi="name", timestamp='$.payload.eventTime')]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(MIDDLEWARE_EVENT['payload']['eventTime'], s1['timestamp']) def test_custom_timestamp_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="'prefix-' + $.payload.nodename", timestamp="$.payload.metrics" "[?(@.name='cpu.frequency')].timestamp")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual("2013-07-29T06:51:34.472416", s1['timestamp']) def test_default_metadata(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = NOTIFICATION['payload'].copy() meta['host'] = NOTIFICATION['publisher_id'] meta['event_type'] = NOTIFICATION['event_type'] self.assertEqual(meta, s1['resource_metadata']) def test_datetime_plugin(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="gauge", unit="sec", volume={"fields": ["$.payload.created_at", "$.payload.launched_at"], "plugin": "timedelta"}, resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(5.0, s1['volume']) def test_custom_metadata(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id", metadata={'proj': '$.payload.project_id', 'dict': '$.payload.resource_metadata'})]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = {'proj': s1['project_id'], 'dict': NOTIFICATION['payload']['resource_metadata']} self.assertEqual(meta, s1['resource_metadata']) def test_multi_match_event_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(2, len(c)) def test_multi_meter_payload(self): cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "volume", "unit"])]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(MIDDLEWARE_EVENT)) self.assertEqual(2, len(c)) s1 = c[0].as_dict() self.assertEqual('storage.objects.outgoing.bytes', s1['name']) self.assertEqual(28, s1['volume']) self.assertEqual('B', s1['unit']) s2 = c[1].as_dict() self.assertEqual('storage.objects.incoming.bytes', s2['name']) self.assertEqual(1, s2['volume']) self.assertEqual('B', s2['unit']) def test_multi_meter_payload_single(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit"])]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('storage.objects.outgoing.bytes', s1['name']) self.assertEqual(28, s1['volume']) self.assertEqual('B', s1['unit']) def test_multi_meter_payload_none(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup="name")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(0, len(c)) def test_multi_meter_payload_all_multi(self): cfg = yaml.dump( {'metric': [dict(name="$.payload.[*].counter_name", event_type="full.sample", type="$.payload.[*].counter_type", unit="$.payload.[*].counter_unit", volume="$.payload.[*].counter_volume", resource_id="$.payload.[*].resource_id", project_id="$.payload.[*].project_id", user_id="$.payload.[*].user_id", lookup=['name', 'type', 'unit', 'volume', 'resource_id', 'project_id', 'user_id'])]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(FULL_MULTI_MSG)) self.assertEqual(2, len(c)) msg = FULL_MULTI_MSG['payload'] for idx, val in enumerate(c): s1 = val.as_dict() self.assertEqual(msg[idx]['counter_name'], s1['name']) self.assertEqual(msg[idx]['counter_volume'], s1['volume']) self.assertEqual(msg[idx]['counter_unit'], s1['unit']) self.assertEqual(msg[idx]['counter_type'], s1['type']) self.assertEqual(msg[idx]['resource_id'], s1['resource_id']) self.assertEqual(msg[idx]['project_id'], s1['project_id']) self.assertEqual(msg[idx]['user_id'], s1['user_id']) @mock.patch('ceilometer.meter.notifications.LOG') def test_multi_meter_payload_invalid_missing(self, LOG): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][0]['result'] del event['payload']['measurements'][1]['result'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit", "volume"])]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(0, len(c)) LOG.warning.assert_called_with('Only 0 fetched meters contain ' '"volume" field instead of 2.') @mock.patch('ceilometer.meter.notifications.LOG') def test_multi_meter_payload_invalid_short(self, LOG): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][0]['result'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit", "volume"])]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(event)) self.assertEqual(0, len(c)) LOG.warning.assert_called_with('Only 1 fetched meters contain ' '"volume" field instead of 2.') def test_arithmetic_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.percent', event_type="compute.metrics.update", type='gauge', unit="percent", volume="$.payload.metrics[" "?(@.name='cpu.percent')].value" " * 100", resource_id="$.payload.host + '_'" " + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.percent', s1['name']) self.assertEqual(2.7501485834103514, s1['volume']) self.assertEqual("tianst_tianst.sh.intel.com", s1['resource_id']) def test_string_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="$.payload.host + '_'" " + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual(1600, s1['volume']) self.assertEqual("tianst_tianst.sh.intel.com", s1['resource_id']) def test_prefix_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="'prefix-' + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual(1600, s1['volume']) self.assertEqual("prefix-tianst.sh.intel.com", s1['resource_id']) def test_duplicate_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.process_notification(NOTIFICATION)) self.assertEqual(1, len(c)) ceilometer-6.0.0/ceilometer/tests/unit/meter/test_meter_plugins.py0000664000567000056710000000617312701406223026665 0ustar jenkinsjenkins00000000000000# # Copyright 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from ceilometer.event import trait_plugins class TestTimedeltaPlugin(base.BaseTestCase): def setUp(self): super(TestTimedeltaPlugin, self).setUp() self.plugin = trait_plugins.TimedeltaPlugin() def test_timedelta_transformation(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T16:04:32')] value = self.plugin.trait_value(match_list) self.assertEqual(3600, value) def test_timedelta_missing_field(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32')] with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: self.assertIsNone(self.plugin.trait_value(match_list)) log.warning.assert_called_once_with( 'Timedelta plugin is required two timestamp fields to create ' 'timedelta value.') def test_timedelta_exceed_field(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T16:04:32'), ('test.timestamp3', '2016-03-02T16:10:32')] with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: self.assertIsNone(self.plugin.trait_value(match_list)) log.warning.assert_called_once_with( 'Timedelta plugin is required two timestamp fields to create ' 'timedelta value.') def test_timedelta_invalid_timestamp(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T15:004:32')] with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: self.assertIsNone(self.plugin.trait_value(match_list)) msg = log.warning._mock_call_args[0][0] self.assertTrue(msg.startswith('Failed to parse date from set ' 'fields, both fields ') ) def test_timedelta_reverse_timestamp_order(self): match_list = [('test.timestamp1', '2016-03-02T15:15:32'), ('test.timestamp2', '2016-03-02T15:10:32')] value = self.plugin.trait_value(match_list) self.assertEqual(300, value) def test_timedelta_precise_difference(self): match_list = [('test.timestamp1', '2016-03-02T15:10:32.786893'), ('test.timestamp2', '2016-03-02T15:10:32.786899')] value = self.plugin.trait_value(match_list) self.assertEqual(0.000006, value) ceilometer-6.0.0/ceilometer/tests/unit/ipmi/0000775000567000056710000000000012701406364022220 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/ipmi/notifications/0000775000567000056710000000000012701406364025071 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py0000664000567000056710000007274512701406223030442 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample data for test_ipmi. This data is provided as a sample of the data expected from the ipmitool driver in the Ironic project, which is the publisher of the notifications being tested. """ TEMPERATURE_DATA = { 'DIMM GH VR Temp (0x3b)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '26 (+/- 0.500) degrees C', 'Entity ID': '20.6 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM GH VR Temp (0x3b)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'CPU1 VR Temp (0x36)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '32 (+/- 0.500) degrees C', 'Entity ID': '20.1 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'CPU1 VR Temp (0x36)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM EF VR Temp (0x3a)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '26 (+/- 0.500) degrees C', 'Entity ID': '20.5 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM EF VR Temp (0x3a)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'CPU2 VR Temp (0x37)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '31 (+/- 0.500) degrees C', 'Entity ID': '20.2 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'CPU2 VR Temp (0x37)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'Ambient Temp (0x32)': { 'Status': 'ok', 'Sensor Reading': '25 (+/- 0) degrees C', 'Entity ID': '12.1 (Front Panel Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Upper non-critical': '43.000', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Upper non-recoverable': '50.000', 'Positive Hysteresis': '4.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '46.000', 'Sensor ID': 'Ambient Temp (0x32)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '25.000' }, 'Mezz Card Temp (0x35)': { 'Status': 'Disabled', 'Sensor Reading': 'Disabled', 'Entity ID': '44.1 (I/O Module)', 'Event Message Control': 'Per-threshold', 'Upper non-critical': '70.000', 'Upper non-recoverable': '85.000', 'Positive Hysteresis': '4.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'Mezz Card Temp (0x35)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '25.000' }, 'PCH Temp (0x3c)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '46 (+/- 0.500) degrees C', 'Entity ID': '45.1 (Processor/IO Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '93.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '103.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '98.000', 'Sensor ID': 'PCH Temp (0x3c)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM CD VR Temp (0x39)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '27 (+/- 0.500) degrees C', 'Entity ID': '20.4 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM CD VR Temp (0x39)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'PCI Riser 2 Temp (0x34)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '30 (+/- 0) degrees C', 'Entity ID': '16.2 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 2 Temp (0x34)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM AB VR Temp (0x38)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '28 (+/- 0.500) degrees C', 'Entity ID': '20.3 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM AB VR Temp (0x38)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '38 (+/- 0) degrees C', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } CURRENT_DATA = { 'Avg Power (0x2e)': { 'Status': 'ok', 'Sensor Reading': '130 (+/- 0) Watts', 'Entity ID': '21.0 (Power Management)', 'Assertions Enabled': '', 'Event Message Control': 'Per-threshold', 'Readable Thresholds': 'No Thresholds', 'Positive Hysteresis': 'Unspecified', 'Sensor Type (Analog)': 'Current', 'Negative Hysteresis': 'Unspecified', 'Maximum sensor range': 'Unspecified', 'Sensor ID': 'Avg Power (0x2e)', 'Assertion Events': '', 'Minimum sensor range': '2550.000', 'Settable Thresholds': 'No Thresholds' } } FAN_DATA = { 'Fan 4A Tach (0x46)': { 'Status': 'ok', 'Sensor Reading': '6900 (+/- 0) RPM', 'Entity ID': '29.4 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 4A Tach (0x46)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 5A Tach (0x48)': { 'Status': 'ok', 'Sensor Reading': '7140 (+/- 0) RPM', 'Entity ID': '29.5 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 5A Tach (0x48)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 3A Tach (0x44)': { 'Status': 'ok', 'Sensor Reading': '6900 (+/- 0) RPM', 'Entity ID': '29.3 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 3A Tach (0x44)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 1A Tach (0x40)': { 'Status': 'ok', 'Sensor Reading': '6960 (+/- 0) RPM', 'Entity ID': '29.1 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 1A Tach (0x40)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 3B Tach (0x45)': { 'Status': 'ok', 'Sensor Reading': '7104 (+/- 0) RPM', 'Entity ID': '29.3 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 3B Tach (0x45)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 2A Tach (0x42)': { 'Status': 'ok', 'Sensor Reading': '7080 (+/- 0) RPM', 'Entity ID': '29.2 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 2A Tach (0x42)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 4B Tach (0x47)': { 'Status': 'ok', 'Sensor Reading': '7488 (+/- 0) RPM', 'Entity ID': '29.4 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 4B Tach (0x47)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 2B Tach (0x43)': { 'Status': 'ok', 'Sensor Reading': '7168 (+/- 0) RPM', 'Entity ID': '29.2 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 2B Tach (0x43)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 5B Tach (0x49)': { 'Status': 'ok', 'Sensor Reading': '7296 (+/- 0) RPM', 'Entity ID': '29.5 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 5B Tach (0x49)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 1B Tach (0x41)': { 'Status': 'ok', 'Sensor Reading': '7296 (+/- 0) RPM', 'Entity ID': '29.1 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 1B Tach (0x41)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 6B Tach (0x4b)': { 'Status': 'ok', 'Sensor Reading': '7616 (+/- 0) RPM', 'Entity ID': '29.6 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 6B Tach (0x4b)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 6A Tach (0x4a)': { 'Status': 'ok', 'Sensor Reading': '7080 (+/- 0) RPM', 'Entity ID': '29.6 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 6A Tach (0x4a)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' } } VOLTAGE_DATA = { 'Planar 12V (0x18)': { 'Status': 'ok', 'Sensor Reading': '12.312 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.108', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '10.692', 'Negative Hysteresis': '0.108', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '13.446', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 12V (0x18)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '12.042' }, 'Planar 3.3V (0x16)': { 'Status': 'ok', 'Sensor Reading': '3.309 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.028', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '3.039', 'Negative Hysteresis': '0.028', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '3.564', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 3.3V (0x16)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3.309' }, 'Planar VBAT (0x1c)': { 'Status': 'ok', 'Sensor Reading': '3.137 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lnc- lcr-', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Readable Thresholds': 'lcr lnc', 'Positive Hysteresis': '0.025', 'Deassertions Enabled': 'lnc- lcr-', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '2.095', 'Negative Hysteresis': '0.025', 'Lower non-critical': '2.248', 'Maximum sensor range': 'Unspecified', 'Sensor ID': 'Planar VBAT (0x1c)', 'Settable Thresholds': 'lcr lnc', 'Threshold Read Mask': 'lcr lnc', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3.010' }, 'Planar 5V (0x17)': { 'Status': 'ok', 'Sensor Reading': '5.062 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.045', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '4.475', 'Negative Hysteresis': '0.045', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '5.582', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 5V (0x17)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4.995' } } SENSOR_DATA = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': TEMPERATURE_DATA, 'Current': CURRENT_DATA, 'Fan': FAN_DATA, 'Voltage': VOLTAGE_DATA } } } EMPTY_PAYLOAD = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { } } } MISSING_SENSOR = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } } } } BAD_SENSOR = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': 'some bad stuff', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } } } } NO_SENSOR_ID = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Sensor Reading': '26 C', }, } } } } NO_NODE_ID = { 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '20140223134852', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Sensor Reading': '26 C', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', }, } } } } ceilometer-6.0.0/ceilometer/tests/unit/ipmi/notifications/__init__.py0000664000567000056710000000000012701406223027162 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/ipmi/notifications/test_ironic.py0000664000567000056710000002046712701406223027770 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for producing IPMI sample messages from notification events. """ import mock from oslotest import base from ceilometer.ipmi.notifications import ironic as ipmi from ceilometer import sample from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data class TestNotifications(base.BaseTestCase): def test_ipmi_temperature_notification(self): """Test IPMI Temperature sensor data. Based on the above ipmi_testdata the expected sample for a single temperature reading has:: * a resource_id composed from the node_uuid Sensor ID * a name composed from 'hardware.ipmi.' and 'temperature' * a volume from the first chunk of the Sensor Reading * a unit from the last chunk of the Sensor Reading * some readings are skipped if the value is 'Disabled' * metatata with the node id """ processor = ipmi.TemperatureSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(10, len(counters), 'expected 10 temperature readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-dimm_gh_vr_temp_(0x3b)' ) test_counter = counters[resource_id] self.assertEqual(26.0, test_counter.volume) self.assertEqual('C', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.temperature', test_counter.name) self.assertEqual('hardware.ipmi.metrics.update', test_counter.resource_metadata['event_type']) self.assertEqual('f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', test_counter.resource_metadata['node']) def test_ipmi_current_notification(self): """Test IPMI Current sensor data. A single current reading is effectively the same as temperature, modulo "current". """ processor = ipmi.CurrentSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(1, len(counters), 'expected 1 current reading') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-avg_power_(0x2e)' ) test_counter = counters[resource_id] self.assertEqual(130.0, test_counter.volume) self.assertEqual('W', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.current', test_counter.name) def test_ipmi_fan_notification(self): """Test IPMI Fan sensor data. A single fan reading is effectively the same as temperature, modulo "fan". """ processor = ipmi.FanSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(12, len(counters), 'expected 12 fan readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-fan_4a_tach_(0x46)' ) test_counter = counters[resource_id] self.assertEqual(6900.0, test_counter.volume) self.assertEqual('RPM', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.fan', test_counter.name) def test_ipmi_voltage_notification(self): """Test IPMI Voltage sensor data. A single voltage reading is effectively the same as temperature, modulo "voltage". """ processor = ipmi.VoltageSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(4, len(counters), 'expected 4 volate readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-planar_vbat_(0x1c)' ) test_counter = counters[resource_id] self.assertEqual(3.137, test_counter.volume) self.assertEqual('V', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.voltage', test_counter.name) def test_disabed_skips_metric(self): """Test that a meter which a disabled volume is skipped.""" processor = ipmi.TemperatureSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(10, len(counters), 'expected 10 temperature readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-mezz_card_temp_(0x35)' ) self.assertNotIn(resource_id, counters) def test_empty_payload_no_metrics_success(self): processor = ipmi.TemperatureSensorNotification(None) counters = dict([(counter.resource_id, counter) for counter in processor.process_notification( ipmi_test_data.EMPTY_PAYLOAD)]) self.assertEqual(0, len(counters), 'expected 0 readings') @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_sensor_data(self, mylog): processor = ipmi.TemperatureSensorNotification(None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.process_notification(ipmi_test_data.MISSING_SENSOR)) self.assertEqual( 'invalid sensor data for ' 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' "missing 'Sensor Reading' in payload", messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_sensor_data_malformed(self, mylog): processor = ipmi.TemperatureSensorNotification(None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.process_notification(ipmi_test_data.BAD_SENSOR)) self.assertEqual( 'invalid sensor data for ' 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' 'unable to parse sensor reading: some bad stuff', messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_node_uuid(self, mylog): """Test for desired error message when 'node_uuid' missing. Presumably this will never happen given the way the data is created, but better defensive than dead. """ processor = ipmi.TemperatureSensorNotification(None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.process_notification(ipmi_test_data.NO_NODE_ID)) self.assertEqual( 'invalid sensor data for missing id: missing key in payload: ' "'node_uuid'", messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_sensor_id(self, mylog): """Test for desired error message when 'Sensor ID' missing.""" processor = ipmi.TemperatureSensorNotification(None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.process_notification(ipmi_test_data.NO_SENSOR_ID)) self.assertEqual( 'invalid sensor data for missing id: missing key in payload: ' "'Sensor ID'", messages[0] ) ceilometer-6.0.0/ceilometer/tests/unit/ipmi/platform/0000775000567000056710000000000012701406364024044 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/ipmi/platform/fake_utils.py0000664000567000056710000000747212701406223026550 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii from ceilometer.ipmi.platform import exception as nmexcept from ceilometer.ipmi.platform import intel_node_manager as node_manager from ceilometer.tests.unit.ipmi.platform import ipmitool_test_data as test_data def get_sensor_status_init(parameter=''): return (' 01\n', '') def get_sensor_status_uninit(parameter=''): return (' 00\n', '') def init_sensor_agent(parameter=''): return (' 00\n', '') def get_nm_version_v2(parameter=''): return test_data.nm_version_v2 def get_nm_version_v3(parameter=''): return test_data.nm_version_v3 def sdr_dump(data_file=''): if data_file == '': raise ValueError("No file specified for ipmitool sdr dump") fake_slave_address = '2c' fake_channel = '60' hexstr = node_manager.INTEL_PREFIX + fake_slave_address + fake_channel data = binascii.unhexlify(hexstr) with open(data_file, 'wb') as bin_fp: bin_fp.write(data) return ('', '') def _execute(funcs, *cmd, **kwargs): datas = { test_data.device_id_cmd: test_data.device_id, test_data.nm_device_id_cmd: test_data.nm_device_id, test_data.get_power_cmd: test_data.power_data, test_data.get_inlet_temp_cmd: test_data.inlet_temperature_data, test_data.get_outlet_temp_cmd: test_data.outlet_temperature_data, test_data.get_airflow_cmd: test_data.airflow_data, test_data.get_cups_index_cmd: test_data.cups_index_data, test_data.get_cups_util_cmd: test_data.cups_util_data, test_data.sdr_info_cmd: test_data.sdr_info, test_data.read_sensor_temperature_cmd: test_data.sensor_temperature, test_data.read_sensor_voltage_cmd: test_data.sensor_voltage, test_data.read_sensor_current_cmd: test_data.sensor_current, test_data.read_sensor_fan_cmd: test_data.sensor_fan, } if cmd[1] == 'sdr' and cmd[2] == 'dump': # ipmitool sdr dump /tmp/XXXX cmd_str = "".join(cmd[:3]) par_str = cmd[3] else: cmd_str = "".join(cmd) par_str = '' try: return datas[cmd_str] except KeyError: return funcs[cmd_str](par_str) def execute_with_nm_v3(*cmd, **kwargs): """test version of execute on Node Manager V3.0 platform.""" funcs = {test_data.sensor_status_cmd: get_sensor_status_init, test_data.init_sensor_cmd: init_sensor_agent, test_data.sdr_dump_cmd: sdr_dump, test_data.nm_version_cmd: get_nm_version_v3} return _execute(funcs, *cmd, **kwargs) def execute_with_nm_v2(*cmd, **kwargs): """test version of execute on Node Manager V2.0 platform.""" funcs = {test_data.sensor_status_cmd: get_sensor_status_init, test_data.init_sensor_cmd: init_sensor_agent, test_data.sdr_dump_cmd: sdr_dump, test_data.nm_version_cmd: get_nm_version_v2} return _execute(funcs, *cmd, **kwargs) def execute_without_nm(*cmd, **kwargs): """test version of execute on Non-Node Manager platform.""" funcs = {test_data.sensor_status_cmd: get_sensor_status_uninit, test_data.init_sensor_cmd: init_sensor_agent, test_data.sdr_dump_cmd: sdr_dump} return _execute(funcs, *cmd, **kwargs) def execute_without_ipmi(*cmd, **kwargs): raise nmexcept.IPMIException ceilometer-6.0.0/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py0000664000567000056710000001075112701406223030002 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from ceilometer.ipmi.platform import ipmi_sensor from ceilometer.tests.unit.ipmi.platform import fake_utils from ceilometer import utils class TestIPMISensor(base.BaseTestCase): def setUp(self): super(TestIPMISensor, self).setUp() utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) self.ipmi = ipmi_sensor.IPMISensor() @classmethod def tearDownClass(cls): # reset inited to force an initialization of singleton for next test ipmi_sensor.IPMISensor()._inited = False super(TestIPMISensor, cls).tearDownClass() def test_read_sensor_temperature(self): sensors = self.ipmi.read_sensor_any('Temperature') self.assertTrue(self.ipmi.ipmi_support) # only temperature data returned. self.assertIn('Temperature', sensors) self.assertEqual(1, len(sensors)) # 4 sensor data in total, ignore 1 without 'Sensor Reading'. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(3, len(sensors['Temperature'])) sensor = sensors['Temperature']['BB P1 VR Temp (0x20)'] self.assertEqual('25 (+/- 0) degrees C', sensor['Sensor Reading']) def test_read_sensor_voltage(self): sensors = self.ipmi.read_sensor_any('Voltage') # only voltage data returned. self.assertIn('Voltage', sensors) self.assertEqual(1, len(sensors)) # 4 sensor data in total, ignore 1 without 'Sensor Reading'. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(3, len(sensors['Voltage'])) sensor = sensors['Voltage']['BB +5.0V (0xd1)'] self.assertEqual('4.959 (+/- 0) Volts', sensor['Sensor Reading']) def test_read_sensor_current(self): sensors = self.ipmi.read_sensor_any('Current') # only Current data returned. self.assertIn('Current', sensors) self.assertEqual(1, len(sensors)) # 2 sensor data in total. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(2, len(sensors['Current'])) sensor = sensors['Current']['PS1 Curr Out % (0x58)'] self.assertEqual('11 (+/- 0) unspecified', sensor['Sensor Reading']) def test_read_sensor_fan(self): sensors = self.ipmi.read_sensor_any('Fan') # only Fan data returned. self.assertIn('Fan', sensors) self.assertEqual(1, len(sensors)) # 2 sensor data in total. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(4, len(sensors['Fan'])) sensor = sensors['Fan']['System Fan 2 (0x32)'] self.assertEqual('4704 (+/- 0) RPM', sensor['Sensor Reading']) class TestNonIPMISensor(base.BaseTestCase): def setUp(self): super(TestNonIPMISensor, self).setUp() utils.execute = mock.Mock(side_effect=fake_utils.execute_without_ipmi) self.ipmi = ipmi_sensor.IPMISensor() @classmethod def tearDownClass(cls): # reset inited to force an initialization of singleton for next test ipmi_sensor.IPMISensor()._inited = False super(TestNonIPMISensor, cls).tearDownClass() def test_read_sensor_temperature(self): sensors = self.ipmi.read_sensor_any('Temperature') self.assertFalse(self.ipmi.ipmi_support) # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_voltage(self): sensors = self.ipmi.read_sensor_any('Voltage') # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_current(self): sensors = self.ipmi.read_sensor_any('Current') # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_fan(self): sensors = self.ipmi.read_sensor_any('Fan') # Non-IPMI platform return empty data self.assertEqual({}, sensors) ceilometer-6.0.0/ceilometer/tests/unit/ipmi/platform/__init__.py0000664000567000056710000000000012701406223026135 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py0000664000567000056710000001315412701406224031266 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from oslotest import base import six from ceilometer.ipmi.platform import intel_node_manager as node_manager from ceilometer.tests.unit.ipmi.platform import fake_utils from ceilometer import utils @six.add_metaclass(abc.ABCMeta) class _Base(base.BaseTestCase): @abc.abstractmethod def init_test_engine(self): """Prepare specific ipmitool as engine for different NM version.""" def setUp(self): super(_Base, self).setUp() self.init_test_engine() self.nm = node_manager.NodeManager() @classmethod def tearDownClass(cls): # reset inited to force an initialization of singleton for next test node_manager.NodeManager()._inited = False super(_Base, cls).tearDownClass() class TestNodeManagerV3(_Base): def init_test_engine(self): utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v3) def test_read_airflow(self): airflow = self.nm.read_airflow() avg_val = node_manager._hex(airflow["Average_value"]) max_val = node_manager._hex(airflow["Maximum_value"]) min_val = node_manager._hex(airflow["Minimum_value"]) cur_val = node_manager._hex(airflow["Current_value"]) # get NM 3.0 self.assertEqual(5, self.nm.nm_version) # see ipmi_test_data.py for raw data self.assertEqual(190, cur_val) self.assertEqual(150, min_val) self.assertEqual(550, max_val) self.assertEqual(203, avg_val) def test_read_outlet_temperature(self): temperature = self.nm.read_outlet_temperature() avg_val = node_manager._hex(temperature["Average_value"]) max_val = node_manager._hex(temperature["Maximum_value"]) min_val = node_manager._hex(temperature["Minimum_value"]) cur_val = node_manager._hex(temperature["Current_value"]) # get NM 3.0 self.assertEqual(5, self.nm.nm_version) # see ipmi_test_data.py for raw data self.assertEqual(25, cur_val) self.assertEqual(24, min_val) self.assertEqual(27, max_val) self.assertEqual(25, avg_val) def test_read_cups_utilization(self): cups_util = self.nm.read_cups_utilization() cpu_util = node_manager._hex(cups_util["CPU_Utilization"]) mem_util = node_manager._hex(cups_util["Mem_Utilization"]) io_util = node_manager._hex(cups_util["IO_Utilization"]) # see ipmi_test_data.py for raw data self.assertEqual(51, cpu_util) self.assertEqual(5, mem_util) self.assertEqual(0, io_util) def test_read_cups_index(self): cups_index = self.nm.read_cups_index() index = node_manager._hex(cups_index["CUPS_Index"]) self.assertEqual(46, index) class TestNodeManager(_Base): def init_test_engine(self): utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) def test_read_power_all(self): power = self.nm.read_power_all() avg_val = node_manager._hex(power["Average_value"]) max_val = node_manager._hex(power["Maximum_value"]) min_val = node_manager._hex(power["Minimum_value"]) cur_val = node_manager._hex(power["Current_value"]) # get NM 2.0 self.assertEqual(3, self.nm.nm_version) # see ipmi_test_data.py for raw data self.assertEqual(87, cur_val) self.assertEqual(3, min_val) self.assertEqual(567, max_val) self.assertEqual(92, avg_val) def test_read_inlet_temperature(self): temperature = self.nm.read_inlet_temperature() avg_val = node_manager._hex(temperature["Average_value"]) max_val = node_manager._hex(temperature["Maximum_value"]) min_val = node_manager._hex(temperature["Minimum_value"]) cur_val = node_manager._hex(temperature["Current_value"]) # see ipmi_test_data.py for raw data self.assertEqual(23, cur_val) self.assertEqual(22, min_val) self.assertEqual(24, max_val) self.assertEqual(23, avg_val) def test_read_airflow(self): airflow = self.nm.read_airflow() self.assertEqual({}, airflow) def test_read_outlet_temperature(self): temperature = self.nm.read_outlet_temperature() self.assertEqual({}, temperature) def test_read_cups_utilization(self): cups_util = self.nm.read_cups_utilization() self.assertEqual({}, cups_util) def test_read_cups_index(self): cups_index = self.nm.read_cups_index() self.assertEqual({}, cups_index) class TestNonNodeManager(_Base): def init_test_engine(self): utils.execute = mock.Mock(side_effect=fake_utils.execute_without_nm) def test_read_power_all(self): # no NM support self.assertEqual(0, self.nm.nm_version) power = self.nm.read_power_all() # Non-Node Manager platform return empty data self.assertEqual({}, power) def test_read_inlet_temperature(self): temperature = self.nm.read_inlet_temperature() # Non-Node Manager platform return empty data self.assertEqual({}, temperature) ceilometer-6.0.0/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py0000664000567000056710000003223312701406223030277 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample data for test_intel_node_manager and test_ipmi_sensor. This data is provided as a sample of the data expected from the ipmitool binary, which produce Node Manager/IPMI raw data """ sensor_temperature_data = """Sensor ID : SSB Therm Trip (0xd) Entity ID : 7.1 (System Board) Sensor Type (Discrete): Temperature Assertions Enabled : Digital State [State Asserted] Deassertions Enabled : Digital State [State Asserted] Sensor ID : BB P1 VR Temp (0x20) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Temperature Sensor Reading : 25 (+/- 0) degrees C Status : ok Nominal Reading : 58.000 Normal Minimum : 10.000 Normal Maximum : 105.000 Upper critical : 115.000 Upper non-critical : 110.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : Front Panel Temp (0x21) Entity ID : 12.1 (Front Panel Board) Sensor Type (Analog) : Temperature Sensor Reading : 23 (+/- 0) degrees C Status : ok Nominal Reading : 28.000 Normal Minimum : 10.000 Normal Maximum : 45.000 Upper critical : 55.000 Upper non-critical : 50.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : SSB Temp (0x22) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Temperature Sensor Reading : 43 (+/- 0) degrees C Status : ok Nominal Reading : 52.000 Normal Minimum : 10.000 Normal Maximum : 93.000 Upper critical : 103.000 Upper non-critical : 98.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ """ sensor_voltage_data = """Sensor ID : VR Watchdog (0xb) Entity ID : 7.1 (System Board) Sensor Type (Discrete): Voltage Assertions Enabled : Digital State [State Asserted] Deassertions Enabled : Digital State [State Asserted] Sensor ID : BB +12.0V (0xd0) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : 11.831 (+/- 0) Volts Status : ok Nominal Reading : 11.935 Normal Minimum : 11.363 Normal Maximum : 12.559 Upper critical : 13.391 Upper non-critical : 13.027 Lower critical : 10.635 Lower non-critical : 10.947 Positive Hysteresis : 0.052 Negative Hysteresis : 0.052 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : BB +1.35 P1LV AB (0xe4) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : Disabled Status : Disabled Nominal Reading : 1.342 Normal Minimum : 1.275 Normal Maximum : 1.409 Upper critical : 1.488 Upper non-critical : 1.445 Lower critical : 1.201 Lower non-critical : 1.244 Positive Hysteresis : 0.006 Negative Hysteresis : 0.006 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Event Status : Unavailable Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : BB +5.0V (0xd1) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : 4.959 (+/- 0) Volts Status : ok Nominal Reading : 4.981 Normal Minimum : 4.742 Normal Maximum : 5.241 Upper critical : 5.566 Upper non-critical : 5.415 Lower critical : 4.416 Lower non-critical : 4.546 Positive Hysteresis : 0.022 Negative Hysteresis : 0.022 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ """ sensor_current_data = """Sensor ID : PS1 Curr Out % (0x58) Entity ID : 10.1 (Power Supply) Sensor Type (Analog) : Current Sensor Reading : 11 (+/- 0) unspecified Status : ok Nominal Reading : 50.000 Normal Minimum : 0.000 Normal Maximum : 100.000 Upper critical : 118.000 Upper non-critical : 100.000 Positive Hysteresis : Unspecified Negative Hysteresis : Unspecified Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : unc ucr Settable Thresholds : unc ucr Threshold Read Mask : unc ucr Assertion Events : Assertions Enabled : unc+ ucr+ Deassertions Enabled : unc+ ucr+ Sensor ID : PS2 Curr Out % (0x59) Entity ID : 10.2 (Power Supply) Sensor Type (Analog) : Current Sensor Reading : 0 (+/- 0) unspecified Status : ok Nominal Reading : 50.000 Normal Minimum : 0.000 Normal Maximum : 100.000 Upper critical : 118.000 Upper non-critical : 100.000 Positive Hysteresis : Unspecified Negative Hysteresis : Unspecified Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : unc ucr Settable Thresholds : unc ucr Threshold Read Mask : unc ucr Assertion Events : Assertions Enabled : unc+ ucr+ Deassertions Enabled : unc+ ucr+ """ sensor_fan_data = """Sensor ID : System Fan 1 (0x30) Entity ID : 29.1 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 2 (0x32) Entity ID : 29.2 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 3 (0x34) Entity ID : 29.3 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 4 (0x36) Entity ID : 29.4 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4606 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- """ sensor_status_cmd = 'ipmitoolraw0x0a0x2c0x00' init_sensor_cmd = 'ipmitoolraw0x0a0x2c0x01' sdr_dump_cmd = 'ipmitoolsdrdump' sdr_info_cmd = 'ipmitoolsdrinfo' read_sensor_all_cmd = 'ipmitoolsdr-v' read_sensor_temperature_cmd = 'ipmitoolsdr-vtypeTemperature' read_sensor_voltage_cmd = 'ipmitoolsdr-vtypeVoltage' read_sensor_current_cmd = 'ipmitoolsdr-vtypeCurrent' read_sensor_fan_cmd = 'ipmitoolsdr-vtypeFan' device_id_cmd = 'ipmitoolraw0x060x01' nm_device_id_cmd = 'ipmitool-b0x6-t0x2craw0x060x01' nm_version_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xca0x570x010x00' get_power_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x010x000x00' get_inlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x020x000x00' get_outlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x050x000x00' get_airflow_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x040x000x00' get_cups_index_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x01' get_cups_util_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x05' device_id = (' 21 01 01 04 02 bf 57 01 00 49 00 01 07 50 0b', '') nm_device_id = (' 50 01 02 15 02 21 57 01 00 02 0b 02 09 10 01', '') nm_version_v2 = (' 57 01 00 03 02 00 02 15', '') nm_version_v3 = (' 57 01 00 05 03 00 03 06', '') # start from byte 3, get cur- 57 00(87), min- 03 00(3) # max- 37 02(567), avg- 5c 00(92) power_data = (' 57 01 00 57 00 03 00 37 02 5c 00 cc 37 f4 53 ce\n' ' 9b 12 01 50\n', '') # start from byte 3, get cur- 17 00(23), min- 16 00(22) # max- 18 00(24), avg- 17 00(23) inlet_temperature_data = (' 57 01 00 17 00 16 00 18 00 17 00 f3 6f fe 53 85\n' ' b7 02 00 50\n', '') # start from byte 3, get cur- 19 00(25), min- 18 00(24) # max- 1b 00(27), avg- 19 00(25) outlet_temperature_data = (' 57 01 00 19 00 18 00 1b 00 19 00 f3 6f fe 53 85\n' ' b7 02 00 50\n', '') # start from byte 3, get cur- be 00(190), min- 96 00(150) # max- 26 02(550), avg- cb 00(203) airflow_data = (' 57 01 00 be 00 96 00 26 02 cb 00 e1 65 c1 54 db\n' ' b7 02 00 50\n', '') # start from byte 3, cups index 2e 00 (46) cups_index_data = (' 57 01 00 2e 00\n', '') # start from byte 3, get cup_util - 33 00 ...(51), mem_util - 05 00 ...(5) # io_util - 00 00 ...(0) cups_util_data = (' 57 01 00 33 00 00 00 00 00 00 00 05 00 00 00 00\n' ' 00 00 00 00 00 00 00 00 00 00 00\n', '') sdr_info = ('', '') sensor_temperature = (sensor_temperature_data, '') sensor_voltage = (sensor_voltage_data, '') sensor_current = (sensor_current_data, '') sensor_fan = (sensor_fan_data, '') ceilometer-6.0.0/ceilometer/tests/unit/ipmi/pollsters/0000775000567000056710000000000012701406364024247 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/ipmi/pollsters/__init__.py0000664000567000056710000000000012701406223026340 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/ipmi/pollsters/test_node.py0000664000567000056710000001144412701406223026603 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from ceilometer.ipmi.pollsters import node from ceilometer.tests.unit.ipmi.pollsters import base CONF = cfg.CONF CONF.import_opt('host', 'ceilometer.service') class TestPowerPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['13', '00']} def make_pollster(self): return node.PowerPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 19(0x13 as current_value) self._verify_metering(1, 19, CONF.host) class TestInletTemperaturePollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['23', '00']} def make_pollster(self): return node.InletTemperaturePollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 35(0x23 as current_value) self._verify_metering(1, 35, CONF.host) class TestOutletTemperaturePollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['25', '00']} def make_pollster(self): return node.OutletTemperaturePollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 37(0x25 as current_value) self._verify_metering(1, 37, CONF.host) class TestAirflowPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['be', '00']} def make_pollster(self): return node.AirflowPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 190(0xbe as current_value) self._verify_metering(1, 190, CONF.host) class TestCUPSIndexPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"CUPS_Index": ['2e', '00']} def make_pollster(self): return node.CUPSIndexPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 190(0xbe) self._verify_metering(1, 46, CONF.host) class CPUUtilPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"CPU_Utilization": ['33', '00', '00', '00', '00', '00', '00', '00']} def make_pollster(self): return node.CPUUtilPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 190(0xbe) self._verify_metering(1, 51, CONF.host) class MemUtilPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Mem_Utilization": ['05', '00', '00', '00', '00', '00', '00', '00']} def make_pollster(self): return node.MemUtilPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 5(0x05) self._verify_metering(1, 5, CONF.host) class IOUtilPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"IO_Utilization": ['00', '00', '00', '00', '00', '00', '00', '00']} def make_pollster(self): return node.IOUtilPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 0(0x00) self._verify_metering(1, 0, CONF.host) ceilometer-6.0.0/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py0000664000567000056710000001007512701406223027166 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from ceilometer.ipmi.pollsters import sensor from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data from ceilometer.tests.unit.ipmi.pollsters import base CONF = cfg.CONF CONF.import_opt('host', 'ceilometer.service') TEMPERATURE_SENSOR_DATA = { 'Temperature': ipmi_test_data.TEMPERATURE_DATA } CURRENT_SENSOR_DATA = { 'Current': ipmi_test_data.CURRENT_DATA } FAN_SENSOR_DATA = { 'Fan': ipmi_test_data.FAN_DATA } VOLTAGE_SENSOR_DATA = { 'Voltage': ipmi_test_data.VOLTAGE_DATA } MISSING_SENSOR_DATA = ipmi_test_data.MISSING_SENSOR['payload']['payload'] MALFORMED_SENSOR_DATA = ipmi_test_data.BAD_SENSOR['payload']['payload'] MISSING_ID_SENSOR_DATA = ipmi_test_data.NO_SENSOR_ID['payload']['payload'] class TestTemperatureSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return TEMPERATURE_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(10, float(32), CONF.host) class TestMissingSensorData(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MISSING_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestMalformedSensorData(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MALFORMED_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestMissingSensorId(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MISSING_ID_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestFanSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return FAN_SENSOR_DATA def make_pollster(self): return sensor.FanSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(12, float(7140), CONF.host) class TestCurrentSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return CURRENT_SENSOR_DATA def make_pollster(self): return sensor.CurrentSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(1, float(130), CONF.host) class TestVoltageSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return VOLTAGE_SENSOR_DATA def make_pollster(self): return sensor.VoltageSensorPollster() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): self._test_get_samples() self._verify_metering(4, float(3.309), CONF.host) ceilometer-6.0.0/ceilometer/tests/unit/ipmi/pollsters/base.py0000664000567000056710000000500612701406223025526 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from oslotest import mockpatch import six from ceilometer.agent import manager from ceilometer.tests import base @six.add_metaclass(abc.ABCMeta) class TestPollsterBase(base.BaseTestCase): def fake_data(self): """Fake data used for test.""" return None def fake_sensor_data(self, sensor_type): """Fake sensor data used for test.""" return None @abc.abstractmethod def make_pollster(self): """Produce right pollster for test.""" def _test_get_samples(self): nm = mock.Mock() nm.read_inlet_temperature.side_effect = self.fake_data nm.read_outlet_temperature.side_effect = self.fake_data nm.read_power_all.side_effect = self.fake_data nm.read_airflow.side_effect = self.fake_data nm.read_cups_index.side_effect = self.fake_data nm.read_cups_utilization.side_effect = self.fake_data nm.read_sensor_any.side_effect = self.fake_sensor_data # We should mock the pollster first before initialize the Manager # so that we don't trigger the sudo in pollsters' __init__(). self.useFixture(mockpatch.Patch( 'ceilometer.ipmi.platform.intel_node_manager.NodeManager', return_value=nm)) self.useFixture(mockpatch.Patch( 'ceilometer.ipmi.platform.ipmi_sensor.IPMISensor', return_value=nm)) self.mgr = manager.AgentManager(['ipmi']) self.pollster = self.make_pollster() def _verify_metering(self, length, expected_vol=None, node=None): cache = {} resources = ['local_host'] samples = list(self.pollster.get_samples(self.mgr, cache, resources)) self.assertEqual(length, len(samples)) if expected_vol: self.assertTrue(any(s.volume == expected_vol for s in samples)) if node: self.assertTrue(any(s.resource_metadata['node'] == node for s in samples)) ceilometer-6.0.0/ceilometer/tests/unit/ipmi/__init__.py0000664000567000056710000000000012701406223024311 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/dispatcher/0000775000567000056710000000000012701406364023410 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/dispatcher/test_http.py0000664000567000056710000001062712701406223026000 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import mock from oslo_config import fixture as fixture_config from oslotest import base import requests from ceilometer.dispatcher import http from ceilometer.event.storage import models as event_models from ceilometer.publisher import utils class TestDispatcherHttp(base.BaseTestCase): def setUp(self): super(TestDispatcherHttp, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, } self.msg['message_signature'] = utils.compute_signature( self.msg, self.CONF.publisher.telemetry_secret, ) def test_http_dispatcher_config_options(self): self.CONF.dispatcher_http.target = 'fake' self.CONF.dispatcher_http.timeout = 2 dispatcher = http.HttpDispatcher(self.CONF) self.assertEqual('fake', dispatcher.target) self.assertEqual(2, dispatcher.timeout) def test_http_dispatcher_with_no_target(self): self.CONF.dispatcher_http.target = '' dispatcher = http.HttpDispatcher(self.CONF) # The target should be None self.assertEqual('', dispatcher.target) with mock.patch.object(requests, 'post') as post: dispatcher.record_metering_data(self.msg) # Since the target is not set, no http post should occur, thus the # call_count should be zero. self.assertEqual(0, post.call_count) def test_http_dispatcher_with_no_metadata(self): self.CONF.dispatcher_http.target = 'fake' dispatcher = http.HttpDispatcher(self.CONF) with mock.patch.object(requests, 'post') as post: dispatcher.record_metering_data(self.msg) self.assertEqual(1, post.call_count) class TestEventDispatcherHttp(base.BaseTestCase): def setUp(self): super(TestEventDispatcherHttp, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_http_dispatcher(self): self.CONF.dispatcher_http.event_target = 'fake' dispatcher = http.HttpDispatcher(self.CONF) event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), [], {}) event = utils.message_from_event(event, self.CONF.publisher.telemetry_secret) with mock.patch.object(requests, 'post') as post: dispatcher.record_events(event) self.assertEqual(1, post.call_count) def test_http_dispatcher_bad(self): self.CONF.dispatcher_http.event_target = '' dispatcher = http.HttpDispatcher(self.CONF) event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), [], {}) event = utils.message_from_event(event, self.CONF.publisher.telemetry_secret) with mock.patch('ceilometer.dispatcher.http.LOG', mock.MagicMock()) as LOG: dispatcher.record_events(event) self.assertTrue(LOG.exception.called) def test_http_dispatcher_share_target(self): self.CONF.dispatcher_http.target = 'fake' dispatcher = http.HttpDispatcher(self.CONF) event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), [], {}) event = utils.message_from_event(event, self.CONF.publisher.telemetry_secret) with mock.patch.object(requests, 'post') as post: dispatcher.record_events(event) self.assertEqual('fake', post.call_args[0][0]) ceilometer-6.0.0/ceilometer/tests/unit/dispatcher/__init__.py0000664000567000056710000000000012701406223025501 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/dispatcher/test_file.py0000664000567000056710000000710012701406223025730 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging.handlers import os import tempfile from oslo_config import fixture as fixture_config from oslotest import base from ceilometer.dispatcher import file from ceilometer.publisher import utils class TestDispatcherFile(base.BaseTestCase): def setUp(self): super(TestDispatcherFile, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_file_dispatcher_with_all_config(self): # Create a temporaryFile to get a file name tf = tempfile.NamedTemporaryFile('r') filename = tf.name tf.close() self.CONF.dispatcher_file.file_path = filename self.CONF.dispatcher_file.max_bytes = 50 self.CONF.dispatcher_file.backup_count = 5 dispatcher = file.FileDispatcher(self.CONF) # The number of the handlers should be 1 self.assertEqual(1, len(dispatcher.log.handlers)) # The handler should be RotatingFileHandler handler = dispatcher.log.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.telemetry_secret, ) # The record_metering_data method should exist and not produce errors. dispatcher.record_metering_data(msg) # After the method call above, the file should have been created. self.assertTrue(os.path.exists(handler.baseFilename)) def test_file_dispatcher_with_path_only(self): # Create a temporaryFile to get a file name tf = tempfile.NamedTemporaryFile('r') filename = tf.name tf.close() self.CONF.dispatcher_file.file_path = filename self.CONF.dispatcher_file.max_bytes = 0 self.CONF.dispatcher_file.backup_count = 0 dispatcher = file.FileDispatcher(self.CONF) # The number of the handlers should be 1 self.assertEqual(1, len(dispatcher.log.handlers)) # The handler should be RotatingFileHandler handler = dispatcher.log.handlers[0] self.assertIsInstance(handler, logging.FileHandler) msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.telemetry_secret, ) # The record_metering_data method should exist and not produce errors. dispatcher.record_metering_data(msg) # After the method call above, the file should have been created. self.assertTrue(os.path.exists(handler.baseFilename)) def test_file_dispatcher_with_no_path(self): self.CONF.dispatcher_file.file_path = None dispatcher = file.FileDispatcher(self.CONF) # The log should be None self.assertIsNone(dispatcher.log) ceilometer-6.0.0/ceilometer/tests/unit/dispatcher/test_dispatcher.py0000664000567000056710000000342412701406223027144 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture from oslotest import mockpatch from ceilometer import dispatcher from ceilometer.tests import base class FakeDispatcherSample(dispatcher.MeterDispatcherBase): def record_metering_data(self, data): pass class FakeDispatcher(dispatcher.MeterDispatcherBase, dispatcher.EventDispatcherBase): def record_metering_data(self, data): pass def record_events(self, events): pass class TestDispatchManager(base.BaseTestCase): def setUp(self): super(TestDispatchManager, self).setUp() self.conf = self.useFixture(fixture.Config()) self.conf.config(meter_dispatchers=['database', 'gnocchi'], event_dispatchers=['database']) self.useFixture(mockpatch.Patch( 'ceilometer.dispatcher.gnocchi.GnocchiDispatcher', new=FakeDispatcherSample)) self.useFixture(mockpatch.Patch( 'ceilometer.dispatcher.database.DatabaseDispatcher', new=FakeDispatcher)) def test_load(self): sample_mg, event_mg = dispatcher.load_dispatcher_manager() self.assertEqual(2, len(list(sample_mg))) self.assertEqual(1, len(list(event_mg))) ceilometer-6.0.0/ceilometer/tests/unit/dispatcher/test_db.py0000664000567000056710000001261712701406223025407 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import mock from oslo_config import fixture as fixture_config from oslotest import base from ceilometer.dispatcher import database from ceilometer.event.storage import models as event_models from ceilometer.publisher import utils class TestDispatcherDB(base.BaseTestCase): def setUp(self): super(TestDispatcherDB, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override('connection', 'sqlite://', group='database') self.dispatcher = database.DatabaseDispatcher(self.CONF) self.ctx = None def test_event_conn(self): event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), [], {}) event = utils.message_from_event(event, self.CONF.publisher.telemetry_secret) with mock.patch.object(self.dispatcher.event_conn, 'record_events') as record_events: self.dispatcher.record_events(event) self.assertEqual(1, len(record_events.call_args_list[0][0][0])) @mock.patch('ceilometer.publisher.utils.verify_signature') def test_event_with_bad_signature(self, mocked_verify): event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), [], {}).serialize() def _fake_verify(ev, secret): if ev.get('message_signature') == 'bad_signature': return False return True mocked_verify.side_effect = _fake_verify with mock.patch.object(self.dispatcher.event_conn, 'record_events') as record_events: event['message_signature'] = 'bad_signature' self.dispatcher.record_events(event) self.assertEqual([], record_events.call_args_list[0][0][0]) del event['message_signature'] event['message_signature'] = utils.compute_signature( event, self.CONF.publisher.telemetry_secret) self.dispatcher.record_events(event) self.assertEqual(1, len(record_events.call_args_list[1][0][0])) def test_valid_message(self): msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.telemetry_secret, ) with mock.patch.object(self.dispatcher.meter_conn, 'record_metering_data') as record_metering_data: self.dispatcher.record_metering_data(msg) record_metering_data.assert_called_once_with(msg) def test_invalid_message(self): msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, 'message_signature': 'invalid-signature'} class ErrorConnection(object): called = False def record_metering_data(self, data): self.called = True self.dispatcher._meter_conn = ErrorConnection() self.dispatcher.record_metering_data(msg) if self.dispatcher.meter_conn.called: self.fail('Should not have called the storage connection') def test_timestamp_conversion(self): msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, 'timestamp': '2012-07-02T13:53:40Z', } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.telemetry_secret, ) expected = msg.copy() expected['timestamp'] = datetime.datetime(2012, 7, 2, 13, 53, 40) with mock.patch.object(self.dispatcher.meter_conn, 'record_metering_data') as record_metering_data: self.dispatcher.record_metering_data(msg) record_metering_data.assert_called_once_with(expected) def test_timestamp_tzinfo_conversion(self): msg = {'counter_name': 'test', 'resource_id': self.id(), 'counter_volume': 1, 'timestamp': '2012-09-30T15:31:50.262-08:00', } msg['message_signature'] = utils.compute_signature( msg, self.CONF.publisher.telemetry_secret, ) expected = msg.copy() expected['timestamp'] = datetime.datetime(2012, 9, 30, 23, 31, 50, 262000) with mock.patch.object(self.dispatcher.meter_conn, 'record_metering_data') as record_metering_data: self.dispatcher.record_metering_data(msg) record_metering_data.assert_called_once_with(expected) ceilometer-6.0.0/ceilometer/tests/unit/dispatcher/test_gnocchi.py0000664000567000056710000004062112701406223026430 0ustar jenkinsjenkins00000000000000# # Copyright 2014 eNovance # # Authors: Mehdi Abaakouk # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import uuid from gnocchiclient import exceptions as gnocchi_exc import mock from oslo_config import fixture as config_fixture from oslo_utils import fileutils from oslotest import mockpatch import requests import six import testscenarios from ceilometer.dispatcher import gnocchi from ceilometer import service as ceilometer_service from ceilometer.tests import base load_tests = testscenarios.load_tests_apply_scenarios @mock.patch('gnocchiclient.v1.client.Client', mock.Mock()) class DispatcherTest(base.BaseTestCase): def setUp(self): super(DispatcherTest, self).setUp() self.conf = self.useFixture(config_fixture.Config()) ceilometer_service.prepare_service(argv=[], config_files=[]) self.conf.config( resources_definition_file=self.path_get( 'etc/ceilometer/gnocchi_resources.yaml'), group="dispatcher_gnocchi" ) self.resource_id = str(uuid.uuid4()) self.samples = [{ 'counter_name': 'disk.root.size', 'counter_type': 'gauge', 'counter_volume': '2', 'user_id': 'test_user', 'project_id': 'test_project', 'source': 'openstack', 'timestamp': '2012-05-08 20:23:48.028195', 'resource_id': self.resource_id, 'resource_metadata': { 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', }}, { 'counter_name': 'disk.root.size', 'counter_type': 'gauge', 'counter_volume': '2', 'user_id': 'test_user', 'project_id': 'test_project', 'source': 'openstack', 'timestamp': '2014-05-08 20:23:48.028195', 'resource_id': self.resource_id, 'resource_metadata': { 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', } }] ks_client = mock.Mock(auth_token='fake_token') ks_client.projects.find.return_value = mock.Mock( name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') self.useFixture(mockpatch.Patch( 'ceilometer.keystone_client.get_client', return_value=ks_client)) self.conf.conf.dispatcher_gnocchi.filter_service_activity = True def test_config_load(self): self.conf.config(filter_service_activity=False, group='dispatcher_gnocchi') d = gnocchi.GnocchiDispatcher(self.conf.conf) names = [rd.cfg['resource_type'] for rd in d.resources_definition] self.assertIn('instance', names) self.assertIn('volume', names) @mock.patch('ceilometer.dispatcher.gnocchi.LOG') def test_broken_config_load(self, mylog): contents = [("---\n" "resources:\n" " - resource_type: foobar\n"), ("---\n" "resources:\n" " - resource_type: 0\n"), ("---\n" "resources:\n" " - sample_types: ['foo', 'bar']\n"), ("---\n" "resources:\n" " - sample_types: foobar\n" " - resource_type: foobar\n"), ] for content in contents: if six.PY3: content = content.encode('utf-8') temp = fileutils.write_to_tempfile(content=content, prefix='gnocchi_resources', suffix='.yaml') self.addCleanup(os.remove, temp) self.conf.config(filter_service_activity=False, resources_definition_file=temp, group='dispatcher_gnocchi') d = gnocchi.GnocchiDispatcher(self.conf.conf) self.assertTrue(mylog.error.called) self.assertEqual(0, len(d.resources_definition)) @mock.patch('ceilometer.dispatcher.gnocchi.GnocchiDispatcher' '._process_resource') def _do_test_activity_filter(self, expected_samples, fake_process_resource): def assert_samples(resource_id, metric_grouped_samples): samples = [] for metric_name, s in metric_grouped_samples: samples.extend(list(s)) self.assertEqual(expected_samples, samples) fake_process_resource.side_effect = assert_samples d = gnocchi.GnocchiDispatcher(self.conf.conf) d.record_metering_data(self.samples) fake_process_resource.assert_called_with(self.resource_id, mock.ANY) def test_activity_filter_match_project_id(self): self.samples[0]['project_id'] = ( 'a2d42c23-d518-46b6-96ab-3fba2e146859') self._do_test_activity_filter([self.samples[1]]) def test_activity_filter_match_swift_event(self): self.samples[0]['counter_name'] = 'storage.api.request' self.samples[0]['resource_id'] = 'a2d42c23-d518-46b6-96ab-3fba2e146859' self._do_test_activity_filter([self.samples[1]]) def test_activity_filter_nomatch(self): self._do_test_activity_filter(self.samples) class MockResponse(mock.NonCallableMock): def __init__(self, code): text = {500: 'Internal Server Error', 404: 'Not Found', 204: 'Created', 409: 'Conflict', }.get(code) super(MockResponse, self).__init__(spec=requests.Response, status_code=code, text=text) class DispatcherWorkflowTest(base.BaseTestCase, testscenarios.TestWithScenarios): sample_scenarios = [ ('disk.root.size', dict( sample={ 'counter_name': 'disk.root.size', 'counter_type': 'gauge', 'counter_volume': '2', 'user_id': 'test_user', 'project_id': 'test_project', 'source': 'openstack', 'timestamp': '2012-05-08 20:23:48.028195', 'resource_metadata': { 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', } }, measures_attributes=[{ 'timestamp': '2012-05-08 20:23:48.028195', 'value': '2' }], postable_attributes={ 'user_id': 'test_user', 'project_id': 'test_project', }, patchable_attributes={ 'host': 'foo', 'image_ref': 'imageref!', 'flavor_id': 1234, 'display_name': 'myinstance', }, metric_names=[ 'instance', 'disk.root.size', 'disk.ephemeral.size', 'memory', 'vcpus', 'memory.usage', 'memory.resident', 'cpu', 'cpu.delta', 'cpu_util', 'vcpus', 'disk.read.requests', 'disk.read.requests.rate', 'disk.write.requests', 'disk.write.requests.rate', 'disk.read.bytes', 'disk.read.bytes.rate', 'disk.write.bytes', 'disk.write.bytes.rate', 'disk.latency', 'disk.iops', 'disk.capacity', 'disk.allocation', 'disk.usage'], resource_type='instance')), ('hardware.ipmi.node.power', dict( sample={ 'counter_name': 'hardware.ipmi.node.power', 'counter_type': 'gauge', 'counter_volume': '2', 'user_id': 'test_user', 'project_id': 'test_project', 'source': 'openstack', 'timestamp': '2012-05-08 20:23:48.028195', 'resource_metadata': { 'useless': 'not_used', } }, measures_attributes=[{ 'timestamp': '2012-05-08 20:23:48.028195', 'value': '2' }], postable_attributes={ 'user_id': 'test_user', 'project_id': 'test_project', }, patchable_attributes={ }, metric_names=[ 'hardware.ipmi.node.power', 'hardware.ipmi.node.temperature', 'hardware.ipmi.node.inlet_temperature', 'hardware.ipmi.node.outlet_temperature', 'hardware.ipmi.node.fan', 'hardware.ipmi.node.current', 'hardware.ipmi.node.voltage', 'hardware.ipmi.node.airflow', 'hardware.ipmi.node.cups', 'hardware.ipmi.node.cpu_util', 'hardware.ipmi.node.mem_util', 'hardware.ipmi.node.io_util' ], resource_type='ipmi')), ] worflow_scenarios = [ ('normal_workflow', dict(measure=204, post_resource=None, metric=None, measure_retry=None, patch_resource=204)), ('new_resource', dict(measure=404, post_resource=204, metric=None, measure_retry=204, patch_resource=204)), ('new_resource_fail', dict(measure=404, post_resource=500, metric=None, measure_retry=None, patch_resource=None)), ('resource_update_fail', dict(measure=204, post_resource=None, metric=None, measure_retry=None, patch_resource=500)), ('new_metric', dict(measure=404, post_resource=None, metric=204, measure_retry=204, patch_resource=204)), ('new_metric_fail', dict(measure=404, post_resource=None, metric=500, measure_retry=None, patch_resource=None)), ('retry_fail', dict(measure=404, post_resource=409, metric=None, measure_retry=500, patch_resource=None)), ('measure_fail', dict(measure=500, post_resource=None, metric=None, measure_retry=None, patch_resource=None)), ] @classmethod def generate_scenarios(cls): cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios, cls.worflow_scenarios) def setUp(self): super(DispatcherWorkflowTest, self).setUp() self.conf = self.useFixture(config_fixture.Config()) # Set this explicitly to avoid conflicts with any existing # configuration. self.conf.config(url='http://localhost:8041', group='dispatcher_gnocchi') ks_client = mock.Mock() ks_client.projects.find.return_value = mock.Mock( name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') self.useFixture(mockpatch.Patch( 'ceilometer.keystone_client.get_client', return_value=ks_client)) self.ks_client = ks_client ceilometer_service.prepare_service(argv=[], config_files=[]) self.conf.config( resources_definition_file=self.path_get( 'etc/ceilometer/gnocchi_resources.yaml'), group="dispatcher_gnocchi" ) self.sample['resource_id'] = str(uuid.uuid4()) + "/foobar" @mock.patch('ceilometer.dispatcher.gnocchi.LOG') @mock.patch('gnocchiclient.v1.client.Client') def test_workflow(self, fakeclient_cls, logger): self.dispatcher = gnocchi.GnocchiDispatcher(self.conf.conf) fakeclient = fakeclient_cls.return_value # FIXME(sileht): we don't use urlparse.quote here # to ensure / is converted in %2F # temporary disabled until we find a solution # on gnocchi side. Current gnocchiclient doesn't # encode the resource_id resource_id = self.sample['resource_id'] # .replace("/", "%2F"), metric_name = self.sample['counter_name'] expected_calls = [ mock.call.capabilities.list(), mock.call.metric.add_measures(metric_name, self.measures_attributes, resource_id)] add_measures_side_effect = [] if self.measure == 404 and self.post_resource: add_measures_side_effect += [ gnocchi_exc.ResourceNotFound(404)] elif self.measure == 404 and self.metric: add_measures_side_effect += [ gnocchi_exc.MetricNotFound(404)] elif self.measure == 500: add_measures_side_effect += [Exception('boom!')] if self.post_resource: attributes = self.postable_attributes.copy() attributes.update(self.patchable_attributes) attributes['id'] = self.sample['resource_id'] attributes['metrics'] = dict((metric_name, {}) for metric_name in self.metric_names) expected_calls.append(mock.call.resource.create( self.resource_type, attributes)) if self.post_resource == 409: fakeclient.resource.create.side_effect = [ gnocchi_exc.ResourceAlreadyExists(409)] elif self.post_resource == 500: fakeclient.resource.create.side_effect = [Exception('boom!')] if self.metric: expected_calls.append(mock.call.metric.create({ 'name': self.sample['counter_name'], 'resource_id': resource_id})) if self.metric == 409: fakeclient.metric.create.side_effect = [ gnocchi_exc.NamedMetricAreadyExists(409)] elif self.metric == 500: fakeclient.metric.create.side_effect = [Exception('boom!')] if self.measure_retry: expected_calls.append(mock.call.metric.add_measures( metric_name, self.measures_attributes, resource_id)) if self.measure_retry == 204: add_measures_side_effect += [None] elif self.measure_retry == 500: add_measures_side_effect += [ Exception('boom!')] else: add_measures_side_effect += [None] if self.patch_resource and self.patchable_attributes: expected_calls.append(mock.call.resource.update( self.resource_type, resource_id, self.patchable_attributes)) if self.patch_resource == 500: fakeclient.resource.update.side_effect = [Exception('boom!')] fakeclient.metric.add_measures.side_effect = add_measures_side_effect self.dispatcher.record_metering_data([self.sample]) # Check that the last log message is the expected one if (self.measure == 500 or self.measure_retry == 500 or self.metric == 500 or self.post_resource == 500 or (self.patch_resource == 500 and self.patchable_attributes)): logger.error.assert_called_with('boom!', exc_info=True) elif self.patch_resource == 204 and self.patchable_attributes: logger.debug.assert_called_with( 'Resource %s updated', self.sample['resource_id']) self.assertEqual(0, logger.error.call_count) elif self.measure == 200: logger.debug.assert_called_with( "Measure posted on metric %s of resource %s", self.sample['counter_name'], self.sample['resource_id']) self.assertEqual(0, logger.error.call_count) self.assertEqual(expected_calls, fakeclient.mock_calls) DispatcherWorkflowTest.generate_scenarios() ceilometer-6.0.0/ceilometer/tests/unit/event/0000775000567000056710000000000012701406364022403 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/event/test_endpoint.py0000664000567000056710000001712212701406223025631 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Ceilometer notify daemon.""" import mock from oslo_config import cfg from oslo_config import fixture as fixture_config import oslo_messaging from oslo_utils import fileutils from oslotest import mockpatch import six import yaml from ceilometer.event import endpoint as event_endpoint from ceilometer import pipeline from ceilometer import publisher from ceilometer.publisher import test from ceilometer.tests import base as tests_base TEST_NOTICE_CTXT = { u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'is_admin': True, u'project_id': u'7c150a59fe714e6f9263774af9688f0e', u'quota_class': None, u'read_deleted': u'no', u'remote_address': u'10.0.2.15', u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'roles': [u'admin'], u'timestamp': u'2012-05-08T20:23:41.425105', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', } TEST_NOTICE_METADATA = { u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'timestamp': u'2012-05-08 20:23:48.028195', } TEST_NOTICE_PAYLOAD = { u'created_at': u'2012-05-08 20:23:41', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'fixed_ips': [{u'address': u'10.0.0.2', u'floating_ips': [], u'meta': {}, u'type': u'fixed', u'version': 4}], u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-08 20:23:47.985999', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', } cfg.CONF.import_opt('store_events', 'ceilometer.notification', group='notification') class TestEventEndpoint(tests_base.BaseTestCase): def get_publisher(self, url, namespace=''): fake_drivers = {'test://': test.TestPublisher, 'except://': test.TestPublisher} return fake_drivers[url](url) def _setup_pipeline(self, publishers): ev_pipeline = yaml.dump({ 'sources': [{ 'name': 'test_event', 'events': ['test.test'], 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'publishers': publishers }] }) if six.PY3: ev_pipeline = ev_pipeline.encode('utf-8') ev_pipeline_cfg_file = fileutils.write_to_tempfile( content=ev_pipeline, prefix="event_pipeline", suffix="yaml") self.CONF.set_override('event_pipeline_cfg_file', ev_pipeline_cfg_file) ev_pipeline_mgr = pipeline.setup_event_pipeline() return ev_pipeline_mgr def _setup_endpoint(self, publishers): ev_pipeline_mgr = self._setup_pipeline(publishers) self.endpoint = event_endpoint.EventsNotificationEndpoint( ev_pipeline_mgr) self.endpoint.event_converter = mock.MagicMock() self.endpoint.event_converter.to_event.return_value = mock.MagicMock( event_type='test.test') def setUp(self): super(TestEventEndpoint, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF([]) self.CONF.set_override("connection", "log://", group='database') self.CONF.set_override("store_events", True, group="notification") self.setup_messaging(self.CONF) self.useFixture(mockpatch.PatchObject(publisher, 'get_publisher', side_effect=self.get_publisher)) self.fake_publisher = mock.Mock() self.useFixture(mockpatch.Patch( 'ceilometer.publisher.test.TestPublisher', return_value=self.fake_publisher)) def test_message_to_event(self): self._setup_endpoint(['test://']) self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) def test_bad_event_non_ack_and_requeue(self): self._setup_endpoint(['test://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") ret = self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) def test_message_to_event_bad_event(self): self._setup_endpoint(['test://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") message = { 'payload': {'event_type': "foo", 'message_id': "abc"}, 'metadata': {}, 'ctxt': {} } with mock.patch("ceilometer.pipeline.LOG") as mock_logger: ret = self.endpoint.process_notification('info', [message]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) exception_mock = mock_logger.exception self.assertIn('Exit after error from publisher', exception_mock.call_args_list[0][0][0]) def test_message_to_event_bad_event_multi_publish(self): self._setup_endpoint(['test://', 'except://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") message = { 'payload': {'event_type': "foo", 'message_id': "abc"}, 'metadata': {}, 'ctxt': {} } with mock.patch("ceilometer.pipeline.LOG") as mock_logger: ret = self.endpoint.process_notification('info', [message]) self.assertEqual(oslo_messaging.NotificationResult.HANDLED, ret) exception_mock = mock_logger.exception self.assertIn('Continue after error from publisher', exception_mock.call_args_list[0][0][0]) ceilometer-6.0.0/ceilometer/tests/unit/event/__init__.py0000664000567000056710000000000012701406223024474 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/event/test_converter.py0000664000567000056710000010015312701406223026015 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import jsonpath_rw_ext import mock from oslo_config import fixture as fixture_config import six from ceilometer import declarative from ceilometer.event import converter from ceilometer.event.storage import models from ceilometer import service as ceilometer_service from ceilometer.tests import base class ConverterBase(base.BaseTestCase): @staticmethod def _create_test_notification(event_type, message_id, **kw): return dict(event_type=event_type, message_id=message_id, priority="INFO", publisher_id="compute.host-1-2-3", timestamp="2013-08-08 21:06:37.803826", payload=kw, ) def assertIsValidEvent(self, event, notification): self.assertIsNot( None, event, "Notification dropped unexpectedly:" " %s" % str(notification)) self.assertIsInstance(event, models.Event) def assertIsNotValidEvent(self, event, notification): self.assertIs( None, event, "Notification NOT dropped when expected to be dropped:" " %s" % str(notification)) def assertHasTrait(self, event, name, value=None, dtype=None): traits = [trait for trait in event.traits if trait.name == name] self.assertTrue( len(traits) > 0, "Trait %s not found in event %s" % (name, event)) trait = traits[0] if value is not None: self.assertEqual(value, trait.value) if dtype is not None: self.assertEqual(dtype, trait.dtype) if dtype == models.Trait.INT_TYPE: self.assertIsInstance(trait.value, int) elif dtype == models.Trait.FLOAT_TYPE: self.assertIsInstance(trait.value, float) elif dtype == models.Trait.DATETIME_TYPE: self.assertIsInstance(trait.value, datetime.datetime) elif dtype == models.Trait.TEXT_TYPE: self.assertIsInstance(trait.value, six.string_types) def assertDoesNotHaveTrait(self, event, name): traits = [trait for trait in event.traits if trait.name == name] self.assertEqual( len(traits), 0, "Extra Trait %s found in event %s" % (name, event)) def assertHasDefaultTraits(self, event): text = models.Trait.TEXT_TYPE self.assertHasTrait(event, 'service', dtype=text) def _cmp_tree(self, this, other): if hasattr(this, 'right') and hasattr(other, 'right'): return (self._cmp_tree(this.right, other.right) and self._cmp_tree(this.left, other.left)) if not hasattr(this, 'right') and not hasattr(other, 'right'): return this == other return False def assertPathsEqual(self, path1, path2): self.assertTrue(self._cmp_tree(path1, path2), 'JSONPaths not equivalent %s %s' % (path1, path2)) class TestTraitDefinition(ConverterBase): def setUp(self): super(TestTraitDefinition, self).setUp() self.n1 = self._create_test_notification( "test.thing", "uuid-for-notif-0001", instance_uuid="uuid-for-instance-0001", instance_id="id-for-instance-0001", instance_uuid2=None, instance_id2=None, host='host-1-2-3', bogus_date='', image_meta=dict( disk_gb='20', thing='whatzit'), foobar=50) self.ext1 = mock.MagicMock(name='mock_test_plugin') self.test_plugin_class = self.ext1.plugin self.test_plugin = self.test_plugin_class() self.test_plugin.trait_values.return_value = ['foobar'] self.ext1.reset_mock() self.ext2 = mock.MagicMock(name='mock_nothing_plugin') self.nothing_plugin_class = self.ext2.plugin self.nothing_plugin = self.nothing_plugin_class() self.nothing_plugin.trait_values.return_value = [None] self.ext2.reset_mock() self.fake_plugin_mgr = dict(test=self.ext1, nothing=self.ext2) def test_to_trait_with_plugin(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='test')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with() self.test_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait_null_match_with_plugin(self): cfg = dict(type='text', fields=['payload.nothere', 'payload.bogus'], plugin=dict(name='test')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with() self.test_plugin.trait_values.assert_called_once_with([]) def test_to_trait_with_plugin_null(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='nothing')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIs(None, t) self.nothing_plugin_class.assert_called_once_with() self.nothing_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait_with_plugin_with_parameters(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='test', parameters=dict(a=1, b='foo'))) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with(a=1, b='foo') self.test_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait(self): cfg = dict(type='text', fields='payload.instance_id') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('id-for-instance-0001', t.value) cfg = dict(type='int', fields='payload.image_meta.disk_gb') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.INT_TYPE, t.dtype) self.assertEqual(20, t.value) def test_to_trait_multiple(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('id-for-instance-0001', t.value) cfg = dict(type='text', fields=['payload.instance_uuid', 'payload.instance_id']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_multiple_different_nesting(self): cfg = dict(type='int', fields=['payload.foobar', 'payload.image_meta.disk_gb']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual(50, t.value) cfg = dict(type='int', fields=['payload.image_meta.disk_gb', 'payload.foobar']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual(20, t.value) def test_to_trait_some_null_multiple(self): cfg = dict(type='text', fields=['payload.instance_id2', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_some_missing_multiple(self): cfg = dict(type='text', fields=['payload.not_here_boss', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_missing(self): cfg = dict(type='text', fields='payload.not_here_boss') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIs(None, t) def test_to_trait_null(self): cfg = dict(type='text', fields='payload.instance_id2') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIs(None, t) def test_to_trait_empty_nontext(self): cfg = dict(type='datetime', fields='payload.bogus_date') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIs(None, t) def test_to_trait_multiple_null_missing(self): cfg = dict(type='text', fields=['payload.not_here_boss', 'payload.instance_id2']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIs(None, t) def test_missing_fields_config(self): self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', dict(), self.fake_plugin_mgr) def test_string_fields_config(self): cfg = dict(fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertPathsEqual(t.getter.__self__, jsonpath_rw_ext.parse('payload.test')) def test_list_fields_config(self): cfg = dict(fields=['payload.test', 'payload.other']) t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertPathsEqual( t.getter.__self__, jsonpath_rw_ext.parse('(payload.test)|(payload.other)')) def test_invalid_path_config(self): # test invalid jsonpath... cfg = dict(fields='payload.bogus(') self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', cfg, self.fake_plugin_mgr) def test_invalid_plugin_config(self): # test invalid jsonpath... cfg = dict(fields='payload.test', plugin=dict(bogus="true")) self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'test_trait', cfg, self.fake_plugin_mgr) def test_unknown_plugin(self): # test invalid jsonpath... cfg = dict(fields='payload.test', plugin=dict(name='bogus')) self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'test_trait', cfg, self.fake_plugin_mgr) def test_type_config(self): cfg = dict(type='text', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.TEXT_TYPE, t.trait_type) cfg = dict(type='int', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.INT_TYPE, t.trait_type) cfg = dict(type='float', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.FLOAT_TYPE, t.trait_type) cfg = dict(type='datetime', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.DATETIME_TYPE, t.trait_type) def test_invalid_type_config(self): # test invalid jsonpath... cfg = dict(type='bogus', fields='payload.test') self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', cfg, self.fake_plugin_mgr) class TestEventDefinition(ConverterBase): def setUp(self): super(TestEventDefinition, self).setUp() self.traits_cfg = { 'instance_id': { 'type': 'text', 'fields': ['payload.instance_uuid', 'payload.instance_id'], }, 'host': { 'type': 'text', 'fields': 'payload.host', }, } self.test_notification1 = self._create_test_notification( "test.thing", "uuid-for-notif-0001", instance_id="uuid-for-instance-0001", host='host-1-2-3') self.test_notification2 = self._create_test_notification( "test.thing", "uuid-for-notif-0002", instance_id="uuid-for-instance-0002") self.test_notification3 = self._create_test_notification( "test.thing", "uuid-for-notif-0003", instance_id="uuid-for-instance-0003", host=None) self.fake_plugin_mgr = {} def test_to_event(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) e = edef.to_event(self.test_notification1) self.assertEqual('test.thing', e.event_type) self.assertEqual(datetime.datetime(2013, 8, 8, 21, 6, 37, 803826), e.generated) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'host', value='host-1-2-3', dtype=dtype) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0001', dtype=dtype) def test_to_event_missing_trait(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) e = edef.to_event(self.test_notification2) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0002', dtype=dtype) self.assertDoesNotHaveTrait(e, 'host') def test_to_event_null_trait(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) e = edef.to_event(self.test_notification3) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0003', dtype=dtype) self.assertDoesNotHaveTrait(e, 'host') def test_bogus_cfg_no_traits(self): bogus = dict(event_type='test.foo') self.assertRaises(declarative.DefinitionException, converter.EventDefinition, bogus, self.fake_plugin_mgr) def test_bogus_cfg_no_type(self): bogus = dict(traits=self.traits_cfg) self.assertRaises(declarative.DefinitionException, converter.EventDefinition, bogus, self.fake_plugin_mgr) def test_included_type_string(self): cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertEqual(1, len(edef._included_types)) self.assertEqual('test.thing', edef._included_types[0]) self.assertEqual(0, len(edef._excluded_types)) self.assertTrue(edef.included_type('test.thing')) self.assertFalse(edef.excluded_type('test.thing')) self.assertTrue(edef.match_type('test.thing')) self.assertFalse(edef.match_type('random.thing')) def test_included_type_list(self): cfg = dict(event_type=['test.thing', 'other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertEqual(2, len(edef._included_types)) self.assertEqual(0, len(edef._excluded_types)) self.assertTrue(edef.included_type('test.thing')) self.assertTrue(edef.included_type('other.thing')) self.assertFalse(edef.excluded_type('test.thing')) self.assertTrue(edef.match_type('test.thing')) self.assertTrue(edef.match_type('other.thing')) self.assertFalse(edef.match_type('random.thing')) def test_excluded_type_string(self): cfg = dict(event_type='!test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertEqual(1, len(edef._included_types)) self.assertEqual('*', edef._included_types[0]) self.assertEqual('test.thing', edef._excluded_types[0]) self.assertEqual(1, len(edef._excluded_types)) self.assertEqual('test.thing', edef._excluded_types[0]) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.included_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertTrue(edef.match_type('random.thing')) def test_excluded_type_list(self): cfg = dict(event_type=['!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertEqual(1, len(edef._included_types)) self.assertEqual(2, len(edef._excluded_types)) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.excluded_type('other.thing')) self.assertFalse(edef.excluded_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertFalse(edef.match_type('other.thing')) self.assertTrue(edef.match_type('random.thing')) def test_mixed_type_list(self): cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertEqual(1, len(edef._included_types)) self.assertEqual(2, len(edef._excluded_types)) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.excluded_type('other.thing')) self.assertFalse(edef.excluded_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertFalse(edef.match_type('other.thing')) self.assertFalse(edef.match_type('random.whatzit')) self.assertTrue(edef.match_type('random.thing')) def test_catchall(self): cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['*', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['*'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertTrue(edef.is_catchall) cfg = dict(event_type=['*', 'foo'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) self.assertTrue(edef.is_catchall) @mock.patch('oslo_utils.timeutils.utcnow') def test_extract_when(self, mock_utcnow): now = datetime.datetime.utcnow() modified = now + datetime.timedelta(minutes=1) mock_utcnow.return_value = now body = {"timestamp": str(modified)} when = converter.EventDefinition._extract_when(body) self.assertTimestampEqual(modified, when) body = {"_context_timestamp": str(modified)} when = converter.EventDefinition._extract_when(body) self.assertTimestampEqual(modified, when) then = now + datetime.timedelta(hours=1) body = {"timestamp": str(modified), "_context_timestamp": str(then)} when = converter.EventDefinition._extract_when(body) self.assertTimestampEqual(modified, when) when = converter.EventDefinition._extract_when({}) self.assertTimestampEqual(now, when) def test_default_traits(self): cfg = dict(event_type='test.thing', traits={}) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() traits = set(edef.traits.keys()) for dt in default_traits: self.assertIn(dt, traits) self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS), len(edef.traits)) def test_traits(self): cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() traits = set(edef.traits.keys()) for dt in default_traits: self.assertIn(dt, traits) self.assertIn('host', traits) self.assertIn('instance_id', traits) self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS) + 2, len(edef.traits)) class TestNotificationConverter(ConverterBase): def setUp(self): super(TestNotificationConverter, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf ceilometer_service.prepare_service(argv=[], config_files=[]) self.valid_event_def1 = [{ 'event_type': 'compute.instance.create.*', 'traits': { 'instance_id': { 'type': 'text', 'fields': ['payload.instance_uuid', 'payload.instance_id'], }, 'host': { 'type': 'text', 'fields': 'payload.host', }, }, }] self.test_notification1 = self._create_test_notification( "compute.instance.create.start", "uuid-for-notif-0001", instance_id="uuid-for-instance-0001", host='host-1-2-3') self.test_notification2 = self._create_test_notification( "bogus.notification.from.mars", "uuid-for-notif-0002", weird='true', host='cydonia') self.fake_plugin_mgr = {} @mock.patch('oslo_utils.timeutils.utcnow') def test_converter_missing_keys(self, mock_utcnow): # test a malformed notification now = datetime.datetime.utcnow() mock_utcnow.return_value = now c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr, add_catchall=True) message = {'event_type': "foo", 'message_id': "abc", 'publisher_id': "1"} e = c.to_event(message) self.assertIsValidEvent(e, message) self.assertEqual(1, len(e.traits)) self.assertEqual("foo", e.event_type) self.assertEqual(now, e.generated) def test_converter_with_catchall(self): c = converter.NotificationEventsConverter( self.valid_event_def1, self.fake_plugin_mgr, add_catchall=True) self.assertEqual(2, len(c.definitions)) e = c.to_event(self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(3, len(e.traits)) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id') self.assertHasTrait(e, 'host') e = c.to_event(self.test_notification2) self.assertIsValidEvent(e, self.test_notification2) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) self.assertDoesNotHaveTrait(e, 'instance_id') self.assertDoesNotHaveTrait(e, 'host') def test_converter_without_catchall(self): c = converter.NotificationEventsConverter( self.valid_event_def1, self.fake_plugin_mgr, add_catchall=False) self.assertEqual(1, len(c.definitions)) e = c.to_event(self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(3, len(e.traits)) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id') self.assertHasTrait(e, 'host') e = c.to_event(self.test_notification2) self.assertIsNotValidEvent(e, self.test_notification2) def test_converter_empty_cfg_with_catchall(self): c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr, add_catchall=True) self.assertEqual(1, len(c.definitions)) e = c.to_event(self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) e = c.to_event(self.test_notification2) self.assertIsValidEvent(e, self.test_notification2) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) def test_converter_empty_cfg_without_catchall(self): c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr, add_catchall=False) self.assertEqual(0, len(c.definitions)) e = c.to_event(self.test_notification1) self.assertIsNotValidEvent(e, self.test_notification1) e = c.to_event(self.test_notification2) self.assertIsNotValidEvent(e, self.test_notification2) @staticmethod def _convert_message(convert, level): message = {'priority': level, 'event_type': "foo", 'message_id': "abc", 'publisher_id': "1"} return convert.to_event(message) def test_store_raw_all(self): self.CONF.event.store_raw = ['info', 'error'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertTrue(self._convert_message(c, 'error').raw) def test_store_raw_info_only(self): self.CONF.event.store_raw = ['info'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_error_only(self): self.CONF.event.store_raw = ['error'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertTrue(self._convert_message(c, 'error').raw) def test_store_raw_skip_all(self): c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_info_only_no_case(self): self.CONF.event.store_raw = ['INFO'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_bad_skip_all(self): self.CONF.event.store_raw = ['unknown'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_bad_and_good(self): self.CONF.event.store_raw = ['info', 'unknown'] c = converter.NotificationEventsConverter( [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_setup_events_default_config(self): self.CONF.set_override('definitions_cfg_file', '/not/existing/file', group='event') self.CONF.set_override('drop_unmatched_notifications', False, group='event') c = converter.setup_events(self.fake_plugin_mgr) self.assertIsInstance(c, converter.NotificationEventsConverter) self.assertEqual(1, len(c.definitions)) self.assertTrue(c.definitions[0].is_catchall) self.CONF.set_override('drop_unmatched_notifications', True, group='event') c = converter.setup_events(self.fake_plugin_mgr) self.assertIsInstance(c, converter.NotificationEventsConverter) self.assertEqual(0, len(c.definitions)) ceilometer-6.0.0/ceilometer/tests/unit/event/test_trait_plugins.py0000664000567000056710000001035212701406223026673 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from ceilometer.event import trait_plugins class TestSplitterPlugin(base.BaseTestCase): def setUp(self): super(TestSplitterPlugin, self).setUp() self.pclass = trait_plugins.SplitterTraitPlugin def test_split(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('test', value) param = dict(separator='-', segment=1) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('foobar', value) param = dict(separator='-', segment=1, max_split=1) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('foobar-baz', value) def test_no_sep(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [('test.thing', 'test.foobar.baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('test.foobar.baz', value) def test_no_segment(self): param = dict(separator='-', segment=5) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertIs(None, value) def test_no_match(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [] value = plugin.trait_values(match_list) self.assertEqual([], value) class TestBitfieldPlugin(base.BaseTestCase): def setUp(self): super(TestBitfieldPlugin, self).setUp() self.pclass = trait_plugins.BitfieldTraitPlugin self.init = 0 self.params = dict(initial_bitfield=self.init, flags=[dict(path='payload.foo', bit=0, value=42), dict(path='payload.foo', bit=1, value=12), dict(path='payload.thud', bit=1, value=23), dict(path='thingy.boink', bit=4), dict(path='thingy.quux', bit=6, value="wokka"), dict(path='payload.bar', bit=10, value='test')]) def test_bitfield(self): match_list = [('payload.foo', 12), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x412, value[0]) def test_initial(self): match_list = [('payload.foo', 12), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] self.params['initial_bitfield'] = 0x2000 plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x2412, value[0]) def test_no_match(self): match_list = [] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(self.init, value[0]) def test_multi(self): match_list = [('payload.foo', 12), ('payload.thud', 23), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x412, value[0]) ceilometer-6.0.0/ceilometer/tests/unit/__init__.py0000664000567000056710000000000012701406223023353 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/test_neutronclient.py0000664000567000056710000001657412701406223025573 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from ceilometer import neutron_client class TestNeutronClient(base.BaseTestCase): def setUp(self): super(TestNeutronClient, self).setUp() self.nc = neutron_client.Client() self.nc.lb_version = 'v1' @staticmethod def fake_ports_list(): return {'ports': [{'admin_state_up': True, 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'device_owner': 'network:router_gateway', 'extra_dhcp_opts': [], 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', 'mac_address': 'fa:16:3e:c5:35:93', 'name': '', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'status': 'ACTIVE', 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}, ]} def test_port_get_all(self): with mock.patch.object(self.nc.client, 'list_ports', side_effect=self.fake_ports_list): ports = self.nc.port_get_all() self.assertEqual(1, len(ports)) self.assertEqual('96d49cc3-4e01-40ce-9cac-c0e32642a442', ports[0]['id']) @staticmethod def fake_networks_list(): return {'networks': [{'admin_state_up': True, 'id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'name': 'public', 'provider:network_type': 'gre', 'provider:physical_network': None, 'provider:segmentation_id': 2, 'router:external': True, 'shared': False, 'status': 'ACTIVE', 'subnets': [u'c4b6f5b8-3508-4896-b238-a441f25fb492'], 'tenant_id': '62d6f08bbd3a44f6ad6f00ca15cce4e5'}, ]} @staticmethod def fake_pool_list(): return {'pools': [{'status': 'ACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, ]} def test_pool_list(self): with mock.patch.object(self.nc.client, 'list_pools', side_effect=self.fake_pool_list): pools = self.nc.pool_get_all() self.assertEqual(1, len(pools)) self.assertEqual('ce73ad36-437d-4c84-aee1-186027d3da9a', pools[0]['id']) @staticmethod def fake_vip_list(): return {'vips': [{'status': 'ACTIVE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.2', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip'}, ]} def test_vip_list(self): with mock.patch.object(self.nc.client, 'list_vips', side_effect=self.fake_vip_list): vips = self.nc.vip_get_all() self.assertEqual(1, len(vips)) self.assertEqual('cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', vips[0]['id']) @staticmethod def fake_member_list(): return {'members': [{'status': 'ACTIVE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.3', 'status_description': None, 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, ]} def test_member_list(self): with mock.patch.object(self.nc.client, 'list_members', side_effect=self.fake_member_list): members = self.nc.member_get_all() self.assertEqual(1, len(members)) self.assertEqual('290b61eb-07bc-4372-9fbf-36459dd0f96b', members[0]['id']) @staticmethod def fake_monitors_list(): return {'health_monitors': [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', 'admin_state_up': True, 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", 'delay': 2, 'max_retries': 5, 'timeout': 5, 'pools': [], 'type': 'PING', }]} def test_monitor_list(self): with mock.patch.object(self.nc.client, 'list_health_monitors', side_effect=self.fake_monitors_list): monitors = self.nc.health_monitor_get_all() self.assertEqual(1, len(monitors)) self.assertEqual('34ae33e1-0035-49e2-a2ca-77d5d3fab365', monitors[0]['id']) @staticmethod def fake_pool_stats(fake_pool): return {'stats': [{'active_connections': 1, 'total_connections': 2, 'bytes_in': 3, 'bytes_out': 4 }]} def test_pool_stats(self): with mock.patch.object(self.nc.client, 'retrieve_pool_stats', side_effect=self.fake_pool_stats): stats = self.nc.pool_stats('fake_pool')['stats'] self.assertEqual(1, len(stats)) self.assertEqual(1, stats[0]['active_connections']) self.assertEqual(2, stats[0]['total_connections']) self.assertEqual(3, stats[0]['bytes_in']) self.assertEqual(4, stats[0]['bytes_out']) ceilometer-6.0.0/ceilometer/tests/unit/test_decoupled_pipeline.py0000664000567000056710000002725312701406224026530 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import yaml from ceilometer import pipeline from ceilometer import sample from ceilometer.tests import pipeline_base class TestDecoupledPipeline(pipeline_base.BasePipelineTestCase): def _setup_pipeline_cfg(self): source = {'name': 'test_source', 'interval': 5, 'counters': ['a'], 'resources': [], 'sinks': ['test_sink']} sink = {'name': 'test_sink', 'transformers': [{'name': 'update', 'parameters': {}}], 'publishers': ['test://']} self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} def _augment_pipeline_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'interval': 5, 'counters': ['b'], 'resources': [], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'transformers': [{ 'name': 'update', 'parameters': { 'append_name': '_new', } }], 'publishers': ['new'], }) def _break_pipeline_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'interval': 5, 'counters': ['b'], 'resources': [], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'transformers': [{ 'name': 'update', 'parameters': { 'append_name': '_new', } }], 'publishers': ['except'], }) def _dup_pipeline_name_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'interval': 5, 'counters': ['b'], 'resources': [], 'sinks': ['test_sink'] }) def _set_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field] = value else: self.pipeline_cfg['sinks'][0][field] = value def _extend_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field].extend(value) else: self.pipeline_cfg['sinks'][0][field].extend(value) def _unset_pipeline_cfg(self, field): if field in self.pipeline_cfg['sources'][0]: del self.pipeline_cfg['sources'][0][field] else: del self.pipeline_cfg['sinks'][0][field] def test_source_no_sink(self): del self.pipeline_cfg['sinks'] self._exception_create_pipelinemanager() def test_source_no_meters_or_counters(self): del self.pipeline_cfg['sources'][0]['counters'] self._exception_create_pipelinemanager() def test_source_dangling_sink(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'interval': 5, 'counters': ['b'], 'resources': [], 'sinks': ['second_sink'] }) self._exception_create_pipelinemanager() def test_sink_no_source(self): del self.pipeline_cfg['sources'] self._exception_create_pipelinemanager() def test_source_with_multiple_sinks(self): counter_cfg = ['a', 'b'] self._set_pipeline_cfg('counters', counter_cfg) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'transformers': [{ 'name': 'update', 'parameters': { 'append_name': '_new', } }], 'publishers': ['new'], }) self.pipeline_cfg['sources'][0]['sinks'].append('second_sink') pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.assertEqual(2, len(pipeline_manager.pipelines)) self.assertEqual('test_source:test_sink', str(pipeline_manager.pipelines[0])) self.assertEqual('test_source:second_sink', str(pipeline_manager.pipelines[1])) test_publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[1].publishers[0] for publisher, sfx in [(test_publisher, '_update'), (new_publisher, '_new')]: self.assertEqual(2, len(publisher.samples)) self.assertEqual(2, publisher.calls) self.assertEqual('a' + sfx, getattr(publisher.samples[0], "name")) self.assertEqual('b' + sfx, getattr(publisher.samples[1], "name")) def test_multiple_sources_with_single_sink(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'interval': 5, 'counters': ['b'], 'resources': [], 'sinks': ['test_sink'] }) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher(None) as p: p([self.test_counter]) self.assertEqual(2, len(pipeline_manager.pipelines)) self.assertEqual('test_source:test_sink', str(pipeline_manager.pipelines[0])) self.assertEqual('second_source:test_sink', str(pipeline_manager.pipelines[1])) test_publisher = pipeline_manager.pipelines[0].publishers[0] another_publisher = pipeline_manager.pipelines[1].publishers[0] for publisher in [test_publisher, another_publisher]: self.assertEqual(2, len(publisher.samples)) self.assertEqual(2, publisher.calls) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('b_update', getattr(publisher.samples[1], "name")) transformed_samples = self.TransformerClass.samples self.assertEqual(2, len(transformed_samples)) self.assertEqual(['a', 'b'], [getattr(s, 'name') for s in transformed_samples]) def _do_test_rate_of_change_in_boilerplate_pipeline_cfg(self, index, meters, units): with open('etc/ceilometer/pipeline.yaml') as fap: data = fap.read() pipeline_cfg = yaml.safe_load(data) for s in pipeline_cfg['sinks']: s['publishers'] = ['test://'] pipeline_manager = pipeline.PipelineManager(pipeline_cfg, self.transformer_manager) pipe = pipeline_manager.pipelines[index] self._do_test_rate_of_change_mapping(pipe, meters, units) def test_rate_of_change_boilerplate_disk_read_cfg(self): meters = ('disk.read.bytes', 'disk.read.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_disk_write_cfg(self): meters = ('disk.write.bytes', 'disk.write.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_network_incoming_cfg(self): meters = ('network.incoming.bytes', 'network.incoming.packets') units = ('B', 'packet') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(4, meters, units) def test_rate_of_change_boilerplate_per_disk_device_read_cfg(self): meters = ('disk.device.read.bytes', 'disk.device.read.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_per_disk_device_write_cfg(self): meters = ('disk.device.write.bytes', 'disk.device.write.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_network_outgoing_cfg(self): meters = ('network.outgoing.bytes', 'network.outgoing.packets') units = ('B', 'packet') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(4, meters, units) def test_duplicated_sinks_names(self): self.pipeline_cfg['sinks'].append({ 'name': 'test_sink', 'publishers': ['except'], }) self.assertRaises(pipeline.PipelineException, pipeline.PipelineManager, self.pipeline_cfg, self.transformer_manager) def test_duplicated_source_names(self): self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'interval': 5, 'counters': ['a'], 'resources': [], 'sinks': ['test_sink'] }) self.assertRaises(pipeline.PipelineException, pipeline.PipelineManager, self.pipeline_cfg, self.transformer_manager) ceilometer-6.0.0/ceilometer/tests/unit/test_utils.py0000664000567000056710000001510012701406223024022 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/utils.py """ import datetime import decimal from oslotest import base from ceilometer import utils class TestUtils(base.BaseTestCase): def test_datetime_to_decimal(self): expected = 1356093296.12 utc_datetime = datetime.datetime.utcfromtimestamp(expected) actual = utils.dt_to_decimal(utc_datetime) self.assertAlmostEqual(expected, float(actual), places=5) def test_decimal_to_datetime(self): expected = 1356093296.12 dexpected = decimal.Decimal(str(expected)) # Python 2.6 wants str() expected_datetime = datetime.datetime.utcfromtimestamp(expected) actual_datetime = utils.decimal_to_dt(dexpected) # Python 3 have rounding issue on this, so use float self.assertAlmostEqual(utils.dt_to_decimal(expected_datetime), utils.dt_to_decimal(actual_datetime), places=5) def test_recursive_keypairs(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B'}} pairs = list(utils.recursive_keypairs(data)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested:a', 'A'), ('nested:b', 'B')], pairs) def test_recursive_keypairs_with_separator(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, } separator = '.' pairs = list(utils.recursive_keypairs(data, separator)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested.a', 'A'), ('nested.b', 'B')], pairs) def test_recursive_keypairs_with_list_of_dict(self): small = 1 big = 1 << 64 expected = [('a', 'A'), ('b', 'B'), ('nested:list', [{small: 99, big: 42}])] data = {'a': 'A', 'b': 'B', 'nested': {'list': [{small: 99, big: 42}]}} pairs = list(utils.recursive_keypairs(data)) self.assertEqual(len(expected), len(pairs)) for k, v in pairs: # the keys 1 and 1<<64 cause a hash collision on 64bit platforms if k == 'nested:list': self.assertIn(v, [[{small: 99, big: 42}], [{big: 42, small: 99}]]) else: self.assertIn((k, v), expected) def test_restore_nesting_unested(self): metadata = {'a': 'A', 'b': 'B'} unwound = utils.restore_nesting(metadata) self.assertIs(metadata, unwound) def test_restore_nesting(self): metadata = {'a': 'A', 'b': 'B', 'nested:a': 'A', 'nested:b': 'B', 'nested:twice:c': 'C', 'nested:twice:d': 'D', 'embedded:e': 'E'} unwound = utils.restore_nesting(metadata) expected = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', 'twice': {'c': 'C', 'd': 'D'}}, 'embedded': {'e': 'E'}} self.assertEqual(expected, unwound) self.assertIsNot(metadata, unwound) def test_restore_nesting_with_separator(self): metadata = {'a': 'A', 'b': 'B', 'nested.a': 'A', 'nested.b': 'B', 'nested.twice.c': 'C', 'nested.twice.d': 'D', 'embedded.e': 'E'} unwound = utils.restore_nesting(metadata, separator='.') expected = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', 'twice': {'c': 'C', 'd': 'D'}}, 'embedded': {'e': 'E'}} self.assertEqual(expected, unwound) self.assertIsNot(metadata, unwound) def test_decimal_to_dt_with_none_parameter(self): self.assertIsNone(utils.decimal_to_dt(None)) def test_dict_to_kv(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, 'nested2': [{'c': 'A'}, {'c': 'B'}] } pairs = list(utils.dict_to_keyval(data)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested.a', 'A'), ('nested.b', 'B'), ('nested2[0].c', 'A'), ('nested2[1].c', 'B')], sorted(pairs, key=lambda x: x[0])) def test_hash_of_set(self): x = ['a', 'b'] y = ['a', 'b', 'a'] z = ['a', 'c'] self.assertEqual(utils.hash_of_set(x), utils.hash_of_set(y)) self.assertNotEqual(utils.hash_of_set(x), utils.hash_of_set(z)) self.assertNotEqual(utils.hash_of_set(y), utils.hash_of_set(z)) def test_hash_ring(self): num_nodes = 10 num_keys = 1000 nodes = [str(x) for x in range(num_nodes)] hr = utils.HashRing(nodes) buckets = [0] * num_nodes assignments = [-1] * num_keys for k in range(num_keys): n = int(hr.get_node(str(k))) self.assertTrue(0 <= n <= num_nodes) buckets[n] += 1 assignments[k] = n # at least something in each bucket self.assertTrue(all((c > 0 for c in buckets))) # approximately even distribution diff = max(buckets) - min(buckets) self.assertTrue(diff < 0.3 * (num_keys / num_nodes)) # consistency num_nodes += 1 nodes.append(str(num_nodes + 1)) hr = utils.HashRing(nodes) for k in range(num_keys): n = int(hr.get_node(str(k))) assignments[k] -= n reassigned = len([c for c in assignments if c != 0]) self.assertTrue(reassigned < num_keys / num_nodes) ceilometer-6.0.0/ceilometer/tests/unit/network/0000775000567000056710000000000012701406364022753 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/network/__init__.py0000664000567000056710000000000012701406223025044 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/network/services/0000775000567000056710000000000012701406364024576 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/network/services/__init__.py0000664000567000056710000000000012701406223026667 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/network/services/test_lbaas.py0000664000567000056710000005204212701406223027266 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_context import context from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.network.services import discovery from ceilometer.network.services import lbaas class _BaseTestLBPollster(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestLBPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.context = context.get_admin_context() self.manager = manager.AgentManager() cfg.CONF.set_override('neutron_lbaas_version', 'v1', group='service_types') plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestLBPoolPollster(_BaseTestLBPollster): def setUp(self): super(TestLBPoolPollster, self).setUp() self.pollster = lbaas.LBPoolPollster() fake_pools = self.fake_get_pools() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'pool_get_all', return_value=fake_pools)) @staticmethod def fake_get_pools(): return [{'status': 'ACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'INACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb02', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'PENDING_CREATE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb03', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'UNKNOWN', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb03', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'error', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb_error', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, ] def test_pool_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_pools())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_pools()[0][field], samples[0].resource_metadata[field]) def test_pool_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_pools())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_pool_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_pools())) self.assertEqual(set(['network.services.lb.pool']), set([s.name for s in samples])) def test_pool_discovery(self): discovered_pools = discovery.LBPoolsDiscovery().discover(self.manager) self.assertEqual(4, len(discovered_pools)) for pool in self.fake_get_pools(): if pool['status'] == 'error': self.assertNotIn(pool, discovered_pools) else: self.assertIn(pool, discovered_pools) class TestLBVipPollster(_BaseTestLBPollster): def setUp(self): super(TestLBVipPollster, self).setUp() self.pollster = lbaas.LBVipPollster() fake_vips = self.fake_get_vips() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'vip_get_all', return_value=fake_vips)) @staticmethod def fake_get_vips(): return [{'status': 'ACTIVE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.2', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip'}, {'status': 'INACTIVE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.3', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'ba6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip02'}, {'status': 'PENDING_CREATE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.4', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip03'}, {'status': 'UNKNOWN', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.8', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip03'}, {'status': 'error', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.8', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip_error'}, ] def test_vip_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vips())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_vips()[0][field], samples[0].resource_metadata[field]) def test_pool_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vips())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_vip_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vips())) self.assertEqual(set(['network.services.lb.vip']), set([s.name for s in samples])) def test_vip_discovery(self): discovered_vips = discovery.LBVipsDiscovery().discover(self.manager) self.assertEqual(4, len(discovered_vips)) for pool in self.fake_get_vips(): if pool['status'] == 'error': self.assertNotIn(pool, discovered_vips) else: self.assertIn(pool, discovered_vips) class TestLBMemberPollster(_BaseTestLBPollster): def setUp(self): super(TestLBMemberPollster, self).setUp() self.pollster = lbaas.LBMemberPollster() fake_members = self.fake_get_members() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'member_get_all', return_value=fake_members)) @staticmethod def fake_get_members(): return [{'status': 'ACTIVE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.3', 'status_description': None, 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'INACTIVE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.5', 'status_description': None, 'id': '2456661eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'PENDING_CREATE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.6', 'status_description': None, 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'UNKNOWN', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.6', 'status_description': None, 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'error', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.6', 'status_description': None, 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, ] def test_get_samples_not_empty(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_members())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_members()[0][field], samples[0].resource_metadata[field]) def test_pool_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_members())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_members())) self.assertEqual(set(['network.services.lb.member']), set([s.name for s in samples])) def test_members_discovery(self): discovered_members = discovery.LBMembersDiscovery().discover( self.manager) self.assertEqual(4, len(discovered_members)) for pool in self.fake_get_members(): if pool['status'] == 'error': self.assertNotIn(pool, discovered_members) else: self.assertIn(pool, discovered_members) class TestLBHealthProbePollster(_BaseTestLBPollster): def setUp(self): super(TestLBHealthProbePollster, self).setUp() self.pollster = lbaas.LBHealthMonitorPollster() fake_health_monitor = self.fake_get_health_monitor() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'health_monitor_get_all', return_value=fake_health_monitor)) @staticmethod def fake_get_health_monitor(): return [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', 'admin_state_up': True, 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", 'delay': 2, 'max_retries': 5, 'timeout': 5, 'pools': [], 'type': 'PING', }] def test_get_samples_not_empty(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_health_monitor())) self.assertEqual(1, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_health_monitor()[0][field], samples[0].resource_metadata[field]) def test_get_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_health_monitor())) self.assertEqual(set(['network.services.lb.health_monitor']), set([s.name for s in samples])) def test_probes_discovery(self): discovered_probes = discovery.LBHealthMonitorsDiscovery().discover( self.manager) self.assertEqual(discovered_probes, self.fake_get_health_monitor()) class TestLBStatsPollster(_BaseTestLBPollster): def setUp(self): super(TestLBStatsPollster, self).setUp() fake_pool_stats = self.fake_pool_stats() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'pool_stats', return_value=fake_pool_stats)) fake_pools = self.fake_get_pools() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'pool_get_all', return_value=fake_pools)) @staticmethod def fake_get_pools(): return [{'status': 'ACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, ] @staticmethod def fake_pool_stats(): return {'stats': {'active_connections': 2, 'bytes_in': 1, 'bytes_out': 3, 'total_connections': 4 } } @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, sample_name, expected_volume, expected_type): pollster = factory() cache = {} samples = list(pollster.get_samples(self.manager, cache, self.fake_get_pools())) self.assertEqual(1, len(samples)) self.assertIsNotNone(samples) self.assertIn('lbstats', cache) self.assertEqual(set([sample_name]), set([s.name for s in samples])) match = [s for s in samples if s.name == sample_name] self.assertEqual(1, len(match), 'missing counter %s' % sample_name) self.assertEqual(expected_volume, match[0].volume) self.assertEqual(expected_type, match[0].type) def test_lb_total_connections(self): self._check_get_samples(lbaas.LBTotalConnectionsPollster, 'network.services.lb.total.connections', 4, 'cumulative') def test_lb_active_connections(self): self._check_get_samples(lbaas.LBActiveConnectionsPollster, 'network.services.lb.active.connections', 2, 'gauge') def test_lb_incoming_bytes(self): self._check_get_samples(lbaas.LBBytesInPollster, 'network.services.lb.incoming.bytes', 1, 'gauge') def test_lb_outgoing_bytes(self): self._check_get_samples(lbaas.LBBytesOutPollster, 'network.services.lb.outgoing.bytes', 3, 'gauge') ceilometer-6.0.0/ceilometer/tests/unit/network/services/test_lbaas_v2.py0000664000567000056710000003210412701406223027672 0ustar jenkinsjenkins00000000000000# # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_context import context from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.network.services import discovery from ceilometer.network.services import lbaas class _BaseTestLBPollster(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestLBPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.context = context.get_admin_context() self.manager = manager.AgentManager() plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestLBListenerPollster(_BaseTestLBPollster): def setUp(self): super(TestLBListenerPollster, self).setUp() self.pollster = lbaas.LBListenerPollster() self.pollster.lb_version = 'v2' fake_listeners = self.fake_list_listeners() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'list_listener', return_value=fake_listeners)) @staticmethod def fake_list_listeners(): return [{'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'a9729389-6147-41a3-ab22-a24aed8692b2'}], 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'name': 'mylistener_online', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'ONLINE'}, {'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a'}], 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylistener_offline', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'OFFLINE'}, {'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'name': 'mylistener_error', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'ERROR'}, {'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'name': 'mylistener_pending_create', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'PENDING_CREATE'} ] def test_listener_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_listeners())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_list_listeners()[0][field], samples[0].resource_metadata[field]) def test_listener_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_listeners())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(4, samples[2].volume) def test_list_listener_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_listeners())) self.assertEqual(set(['network.services.lb.listener']), set([s.name for s in samples])) def test_listener_discovery(self): discovered_listeners = discovery.LBListenersDiscovery().discover( self.manager) self.assertEqual(4, len(discovered_listeners)) for listener in self.fake_list_listeners(): if listener['operating_status'] == 'pending_create': self.assertNotIn(listener, discovered_listeners) else: self.assertIn(listener, discovered_listeners) class TestLBLoadBalancerPollster(_BaseTestLBPollster): def setUp(self): super(TestLBLoadBalancerPollster, self).setUp() self.pollster = lbaas.LBLoadBalancerPollster() self.pollster.lb_version = 'v2' fake_loadbalancers = self.fake_list_loadbalancers() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'list_loadbalancer', return_value=fake_loadbalancers)) @staticmethod def fake_list_loadbalancers(): return [{'operating_status': 'ONLINE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'ACTIVE', 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'vip_address': '10.0.0.2', 'vip_subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'loadbalancer_online'}, {'operating_status': 'OFFLINE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'INACTIVE', 'listeners': [{'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a'}], 'vip_address': '10.0.0.3', 'vip_subnet_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'name': 'loadbalancer_offline'}, {'operating_status': 'ERROR', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'INACTIVE', 'listeners': [{'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d8b'}], 'vip_address': '10.0.0.4', 'vip_subnet_id': '213d3059-87a4-45a5-91e9-d721068df0b2', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'loadbalancer_error'}, {'operating_status': 'PENDING_CREATE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'INACTIVE', 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d4ed7c'}], 'vip_address': '10.0.0.5', 'vip_subnet_id': '123d3059-87a4-45a5-91e9-d721068ae0c3', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395763b2', 'name': 'loadbalancer_pending_create'} ] def test_loadbalancer_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_loadbalancers())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_list_loadbalancers()[0][field], samples[0].resource_metadata[field]) def test_loadbalancer_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_loadbalancers())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(4, samples[2].volume) def test_list_loadbalancer_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_loadbalancers())) self.assertEqual(set(['network.services.lb.loadbalancer']), set([s.name for s in samples])) def test_loadbalancer_discovery(self): discovered_loadbalancers = \ discovery.LBLoadBalancersDiscovery().discover(self.manager) self.assertEqual(4, len(discovered_loadbalancers)) for loadbalancer in self.fake_list_loadbalancers(): if loadbalancer['operating_status'] == 'pending_create': self.assertNotIn(loadbalancer, discovered_loadbalancers) else: self.assertIn(loadbalancer, discovered_loadbalancers) class TestLBStatsPollster(_BaseTestLBPollster): def setUp(self): super(TestLBStatsPollster, self).setUp() fake_balancer_stats = self.fake_balancer_stats() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'get_loadbalancer_stats', return_value=fake_balancer_stats)) fake_loadbalancers = self.fake_list_loadbalancers() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'list_loadbalancer', return_value=fake_loadbalancers)) cfg.CONF.set_override('neutron_lbaas_version', 'v2', group='service_types') @staticmethod def fake_list_loadbalancers(): return [{'operating_status': 'ONLINE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'ACTIVE', 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'vip_address': '10.0.0.2', 'vip_subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'loadbalancer_online'}, ] @staticmethod def fake_balancer_stats(): return {'active_connections': 2, 'bytes_in': 1, 'bytes_out': 3, 'total_connections': 4} @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, sample_name, expected_volume, expected_type): pollster = factory() cache = {} samples = list(pollster.get_samples(self.manager, cache, self.fake_list_loadbalancers())) self.assertEqual(1, len(samples)) self.assertIsNotNone(samples) self.assertIn('lbstats', cache) self.assertEqual(set([sample_name]), set([s.name for s in samples])) match = [s for s in samples if s.name == sample_name] self.assertEqual(1, len(match), 'missing counter %s' % sample_name) self.assertEqual(expected_volume, match[0].volume) self.assertEqual(expected_type, match[0].type) def test_lb_total_connections(self): self._check_get_samples(lbaas.LBTotalConnectionsPollster, 'network.services.lb.total.connections', 4, 'cumulative') def test_lb_active_connections(self): self._check_get_samples(lbaas.LBActiveConnectionsPollster, 'network.services.lb.active.connections', 2, 'gauge') def test_lb_incoming_bytes(self): self._check_get_samples(lbaas.LBBytesInPollster, 'network.services.lb.incoming.bytes', 1, 'gauge') def test_lb_outgoing_bytes(self): self._check_get_samples(lbaas.LBBytesOutPollster, 'network.services.lb.outgoing.bytes', 3, 'gauge') ceilometer-6.0.0/ceilometer/tests/unit/network/services/test_vpnaas.py0000664000567000056710000001646212701406223027502 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_context import context from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.network.services import discovery from ceilometer.network.services import vpnaas class _BaseTestVPNPollster(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestVPNPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.context = context.get_admin_context() self.manager = manager.AgentManager() plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestVPNServicesPollster(_BaseTestVPNPollster): def setUp(self): super(TestVPNServicesPollster, self).setUp() self.pollster = vpnaas.VPNServicesPollster() fake_vpn = self.fake_get_vpn_service() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'vpn_get_all', return_value=fake_vpn)) @staticmethod def fake_get_vpn_service(): return [{'status': 'ACTIVE', 'name': 'myvpn', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'INACTIVE', 'name': 'myvpn', 'description': '', 'admin_state_up': True, 'id': 'cdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'PENDING_CREATE', 'name': 'myvpn', 'description': '', 'id': 'bdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'error', 'name': 'myvpn', 'description': '', 'id': 'edde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'admin_state_up': False, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, ] def test_vpn_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vpn_service())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_vpn_service()[0][field], samples[0].resource_metadata[field]) def test_vpn_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vpn_service())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_vpn_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vpn_service())) self.assertEqual(set(['network.services.vpn']), set([s.name for s in samples])) def test_vpn_discovery(self): discovered_vpns = discovery.VPNServicesDiscovery().discover( self.manager) self.assertEqual(3, len(discovered_vpns)) for vpn in self.fake_get_vpn_service(): if vpn['status'] == 'error': self.assertNotIn(vpn, discovered_vpns) else: self.assertIn(vpn, discovered_vpns) class TestIPSecConnectionsPollster(_BaseTestVPNPollster): def setUp(self): super(TestIPSecConnectionsPollster, self).setUp() self.pollster = vpnaas.IPSecConnectionsPollster() fake_conns = self.fake_get_ipsec_connections() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'ipsec_site_connections_get_all', return_value=fake_conns)) @staticmethod def fake_get_ipsec_connections(): return [{'name': 'connection1', 'description': 'Remote-connection1', 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'ikepolicy_id': 'ade3d818-fdcb-fg4b-de7f-4550dc8a9d7a', 'ipsecpolicy_id': 'fce3d818-fdcb-fg4b-de7f-7850dc8a9d7a', 'vpnservice_id': 'dce3d818-fdcb-fg4b-de7f-5650dc8a9d7a', 'admin_state_up': True, 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} ] def test_conns_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_ipsec_connections())) self.assertEqual(1, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_ipsec_connections()[0][field], samples[0].resource_metadata[field]) def test_get_conns_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_ipsec_connections())) self.assertEqual(set(['network.services.vpn.connections']), set([s.name for s in samples])) def test_conns_discovery(self): discovered_conns = discovery.IPSecConnectionsDiscovery().discover( self.manager) self.assertEqual(1, len(discovered_conns)) self.assertEqual(self.fake_get_ipsec_connections(), discovered_conns) ceilometer-6.0.0/ceilometer/tests/unit/network/services/test_fwaas.py0000664000567000056710000001577112701406223027315 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_context import context from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.network.services import discovery from ceilometer.network.services import fwaas class _BaseTestFWPollster(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestFWPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.context = context.get_admin_context() self.manager = manager.AgentManager() plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestFirewallPollster(_BaseTestFWPollster): def setUp(self): super(TestFirewallPollster, self).setUp() self.pollster = fwaas.FirewallPollster() fake_fw = self.fake_get_fw_service() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'firewall_get_all', return_value=fake_fw)) @staticmethod def fake_get_fw_service(): return [{'status': 'ACTIVE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'INACTIVE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'PENDING_CREATE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'error', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, ] def test_fw_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_fw_service()[0][field], samples[0].resource_metadata[field]) def test_vpn_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_vpn_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual(set(['network.services.firewall']), set([s.name for s in samples])) def test_vpn_discovery(self): discovered_fws = discovery.FirewallDiscovery().discover(self.manager) self.assertEqual(3, len(discovered_fws)) for vpn in self.fake_get_fw_service(): if vpn['status'] == 'error': self.assertNotIn(vpn, discovered_fws) else: self.assertIn(vpn, discovered_fws) class TestIPSecConnectionsPollster(_BaseTestFWPollster): def setUp(self): super(TestIPSecConnectionsPollster, self).setUp() self.pollster = fwaas.FirewallPolicyPollster() fake_fw_policy = self.fake_get_fw_policy() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'fw_policy_get_all', return_value=fake_fw_policy)) @staticmethod def fake_get_fw_policy(): return [{'name': 'my_fw_policy', 'description': 'fw_policy', 'admin_state_up': True, 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', 'firewall_rules': [{'enabled': True, 'action': 'allow', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '80', 'source_ip_address': '10.24.4.2'}, {'enabled': True, 'action': 'deny', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '22'}], 'shared': True, 'audited': True, 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} ] def test_policy_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_policy())) self.assertEqual(1, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_fw_policy()[0][field], samples[0].resource_metadata[field]) def test_get_policy_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_policy())) self.assertEqual(set(['network.services.firewall.policy']), set([s.name for s in samples])) def test_fw_policy_discovery(self): discovered_policy = discovery.FirewallPolicyDiscovery().discover( self.manager) self.assertEqual(1, len(discovered_policy)) self.assertEqual(self.fake_get_fw_policy(), discovered_policy) ceilometer-6.0.0/ceilometer/tests/unit/network/test_notifications.py0000664000567000056710000017514012701406223027237 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer.network.notifications """ import mock from ceilometer.network import notifications from ceilometer.tests import base as test NOTIFICATION_NETWORK_CREATE = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'network.create.end', u'timestamp': u'2012-09-27 14:11:27.086575', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': {u'network': {u'status': u'ACTIVE', u'subnets': [], u'name': u'abcedf', u'router:external': False, u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'admin_state_up': True, u'shared': False, u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:11:26.924779', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} NOTIFICATION_BULK_NETWORK_CREATE = { '_context_roles': [u'_member_', u'heat_stack_owner', u'admin'], u'_context_request_id': u'req-a2dfdefd-b773-4400-9d52-5e146e119950', u'_context_read_deleted': u'no', u'event_type': u'network.create.end', u'_context_user_name': u'admin', u'_context_project_name': u'admin', u'timestamp': u'2014-05-1510: 24: 56.335612', u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'_context_tenant_name': u'admin', u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', u'message_id': u'914eb601-9390-4a72-8629-f013a4c84467', u'priority': 'info', u'_context_is_admin': True, u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', u'_context_timestamp': u'2014-05-1510: 24: 56.285975', u'_context_user': u'7520940056d54cceb25cbce888300bea', u'_context_user_id': u'7520940056d54cceb25cbce888300bea', u'publisher_id': u'network.devstack', u'payload': { u'networks': [{u'status': u'ACTIVE', u'subnets': [], u'name': u'test2', u'provider: physical_network': None, u'admin_state_up': True, u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'provider: network_type': u'local', u'shared': False, u'id': u'7cbc7a66-bbd0-41fc-a186-81c3da5c9843', u'provider: segmentation_id': None}, {u'status': u'ACTIVE', u'subnets': [], u'name': u'test3', u'provider: physical_network': None, u'admin_state_up': True, u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'provider: network_type': u'local', u'shared': False, u'id': u'5a7cb86f-1638-4cc1-8dcc-8bbbc8c7510d', u'provider: segmentation_id': None}] } } NOTIFICATION_SUBNET_CREATE = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'subnet.create.end', u'timestamp': u'2012-09-27 14:11:27.426620', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': { u'subnet': { u'name': u'mysubnet', u'enable_dhcp': True, u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'dns_nameservers': [], u'allocation_pools': [{u'start': u'192.168.42.2', u'end': u'192.168.42.254'}], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'192.168.42.1', u'cidr': u'192.168.42.0/24', u'id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5'}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:11:27.214490', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'd86dfc66-d3c3-4aea-b06d-bf37253e6116'} NOTIFICATION_BULK_SUBNET_CREATE = { '_context_roles': [u'_member_', u'heat_stack_owner', u'admin'], u'_context_request_id': u'req-b77e278a-0cce-4987-9f82-15957b234768', u'_context_read_deleted': u'no', u'event_type': u'subnet.create.end', u'_context_user_name': u'admin', u'_context_project_name': u'admin', u'timestamp': u'2014-05-1510: 47: 08.133888', u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'_context_tenant_name': u'admin', u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', u'message_id': u'c7e6f9fd-ead2-415f-8493-b95bedf72e43', u'priority': u'info', u'_context_is_admin': True, u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', u'_context_timestamp': u'2014-05-1510: 47: 07.970043', u'_context_user': u'7520940056d54cceb25cbce888300bea', u'_context_user_id': u'7520940056d54cceb25cbce888300bea', u'publisher_id': u'network.devstack', u'payload': { u'subnets': [{u'name': u'', u'enable_dhcp': True, u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'dns_nameservers': [], u'ipv6_ra_mode': None, u'allocation_pools': [{u'start': u'10.0.4.2', u'end': u'10.0.4.254'}], u'host_routes': [], u'ipv6_address_mode': None, u'ip_version': 4, u'gateway_ip': u'10.0.4.1', u'cidr': u'10.0.4.0/24', u'id': u'14020d7b-6dd7-4349-bb8e-8f954c919022'}, {u'name': u'', u'enable_dhcp': True, u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', u'dns_nameservers': [], u'ipv6_ra_mode': None, u'allocation_pools': [{u'start': u'10.0.5.2', u'end': u'10.0.5.254'}], u'host_routes': [], u'ipv6_address_mode': None, u'ip_version': 4, u'gateway_ip': u'10.0.5.1', u'cidr': u'10.0.5.0/24', u'id': u'a080991b-a32a-4bf7-a558-96c4b77d075c'}] } } NOTIFICATION_PORT_CREATE = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'port.create.end', u'timestamp': u'2012-09-27 14:28:31.536370', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': { u'port': { u'status': u'ACTIVE', u'name': u'', u'admin_state_up': True, u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'device_owner': u'', u'mac_address': u'fa:16:3e:75:0c:49', u'fixed_ips': [{ u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', u'ip_address': u'192.168.42.3'}], u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', u'device_id': u''}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:28:31.438919', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'7135b8ab-e13c-4ac8-bc31-75e7f756622a'} NOTIFICATION_BULK_PORT_CREATE = { u'_context_roles': [u'_member_', u'SwiftOperator'], u'_context_request_id': u'req-678be9ad-c399-475a-b3e8-8da0c06375aa', u'_context_read_deleted': u'no', u'event_type': u'port.create.end', u'_context_project_name': u'demo', u'timestamp': u'2014-05-0909: 19: 58.317548', u'_context_tenant_id': u'133087d90fc149528b501dd8b75ea965', u'_context_timestamp': u'2014-05-0909: 19: 58.160011', u'_context_tenant': u'133087d90fc149528b501dd8b75ea965', u'payload': { u'ports': [{u'status': u'DOWN', u'name': u'port--1501135095', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'acf63fdc-b43b-475d-8cca-9429b843d5e8', u'tenant_id': u'133087d90fc149528b501dd8b75ea965', u'binding: vnic_type': u'normal', u'device_owner': u'', u'mac_address': u'fa: 16: 3e: 37: 10: 39', u'fixed_ips': [], u'id': u'296c2c9f-14e9-48da-979d-78b213454c59', u'security_groups': [ u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], u'device_id': u''}, {u'status': u'DOWN', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': False, u'network_id': u'0a8eea59-0146-425c-b470-e9ddfa99ec61', u'tenant_id': u'133087d90fc149528b501dd8b75ea965', u'binding: vnic_type': u'normal', u'device_owner': u'', u'mac_address': u'fa: 16: 3e: 8e: 6e: 53', u'fixed_ips': [], u'id': u'd8bb667f-5cd3-4eca-a984-268e25b1b7a5', u'security_groups': [ u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], u'device_id': u''}] }, u'_unique_id': u'60b1650f17fc4fa59492f447321fb26c', u'_context_is_admin': False, u'_context_project_id': u'133087d90fc149528b501dd8b75ea965', u'_context_tenant_name': u'demo', u'_context_user': u'b1eb48f9c54741f4adc1b4ea512d400c', u'_context_user_name': u'demo', u'publisher_id': u'network.os-ci-test12', u'message_id': u'04aa45e1-3c30-4c69-8638-e7ff8621e9bc', u'_context_user_id': u'b1eb48f9c54741f4adc1b4ea512d400c', u'priority': u'INFO' } NOTIFICATION_PORT_UPDATE = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'port.update.end', u'timestamp': u'2012-09-27 14:35:09.514052', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': { u'port': { u'status': u'ACTIVE', u'name': u'bonjour', u'admin_state_up': True, u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'device_owner': u'', u'mac_address': u'fa:16:3e:75:0c:49', u'fixed_ips': [{ u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', u'ip_address': u'192.168.42.3'}], u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', u'device_id': u''}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:35:09.447682', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'07b0a3a1-c0b5-40ab-a09c-28dee6bf48f4'} NOTIFICATION_NETWORK_EXISTS = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'network.exists', u'timestamp': u'2012-09-27 14:11:27.086575', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': {u'network': {u'status': u'ACTIVE', u'subnets': [], u'name': u'abcedf', u'router:external': False, u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'admin_state_up': True, u'shared': False, u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:11:26.924779', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} NOTIFICATION_ROUTER_EXISTS = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'router.exists', u'timestamp': u'2012-09-27 14:11:27.086575', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': {u'router': {'status': u'ACTIVE', 'external_gateway_info': {'network_id': u'89d55642-4dec-43a4-a617-6cec051393b5'}, 'name': u'router1', 'admin_state_up': True, 'tenant_id': u'bb04a2b769c94917b57ba49df7783cfd', 'id': u'ab8bb3ed-df23-4ca0-8f03-b887abcd5c23'}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:11:26.924779', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} NOTIFICATION_FLOATINGIP_EXISTS = { u'_context_roles': [u'anotherrole', u'Member'], u'_context_read_deleted': u'no', u'event_type': u'floatingip.exists', u'timestamp': u'2012-09-27 14:11:27.086575', u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', u'payload': {u'floatingip': {'router_id': None, 'tenant_id': u'6e5f9df9b3a249ab834f25fe1b1b81fd', 'floating_network_id': u'001400f7-1710-4245-98c3-39ba131cc39a', 'fixed_ip_address': None, 'floating_ip_address': u'172.24.4.227', 'port_id': None, 'id': u'2b7cc28c-6f78-4735-9246-257168405de6'}}, u'priority': u'INFO', u'_context_is_admin': False, u'_context_timestamp': u'2012-09-27 14:11:26.924779', u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', u'publisher_id': u'network.ubuntu-VirtualBox', u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} NOTIFICATION_FLOATINGIP_UPDATE_START = { '_context_roles': [u'_member_', u'admin', u'heat_stack_owner'], '_context_request_id': u'req-bd5ed336-242f-4705-836e-8e8f3d0d1ced', '_context_read_deleted': u'no', 'event_type': u'floatingip.update.start', '_context_user_name': u'admin', '_context_project_name': u'admin', 'timestamp': u'2014-05-3107: 19: 43.463101', '_context_tenant_id': u'9fc714821a3747c8bc4e3a9bfbe82732', '_context_tenant_name': u'admin', '_context_tenant': u'9fc714821a3747c8bc4e3a9bfbe82732', 'message_id': u'0ab6d71f-ba0a-4501-86fe-6cc20521ef5a', 'priority': 'info', '_context_is_admin': True, '_context_project_id': u'9fc714821a3747c8bc4e3a9bfbe82732', '_context_timestamp': u'2014-05-3107: 19: 43.460767', '_context_user': u'6ca7b13b33e4425cae0b85e2cf93d9a1', '_context_user_id': u'6ca7b13b33e4425cae0b85e2cf93d9a1', 'publisher_id': u'network.devstack', 'payload': { u'id': u'64262b2a-8f5d-4ade-9405-0cbdd03c1555', u'floatingip': { u'fixed_ip_address': u'172.24.4.227', u'port_id': u'8ab815c8-03cc-4b45-a673-79bdd0c258f2' } } } NOTIFICATION_POOL_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", "event_type": "pool.create.end", "timestamp": "2014-09-15 17:20:50.687649", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "ce255443233748ce9cc71b480974df28", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "pool": { "status": "ACTIVE", "lb_method": "ROUND_ROBIN", "protocol": "HTTP", "description": "", "health_monitors": [], "members": [], "status_description": None, "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", "vip_id": None, "name": "my_pool", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "health_monitors_status": [], "provider": "haproxy"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:20:49.600299", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} NOTIFICATION_VIP_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "vip.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "vip": { "status": "ACTIVE", "protocol": "HTTP", "description": "", "address": "10.0.0.2", "protocol_port": 80, "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", "status_description": None, "name": "my_vip", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "connection_limit": -1, "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", "session_persistence": {"type": "SOURCE_IP"}}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} NOTIFICATION_HEALTH_MONITORS_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "health_monitor.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "health_monitor": { "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "delay": 10, "max_retries": 10, "timeout": 10, "pools": [], "type": "PING", "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_MEMBERS_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "member.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "member": {"admin_state_up": True, "status": "ACTIVE", "status_description": None, "weight": 1, "address": "10.0.0.3", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "protocol_port": 80, "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_FIREWALL_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall": { "status": "ACTIVE", "name": "my_firewall", "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_FIREWALL_RULE_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall_rule.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall_rule": { "protocol": "tcp", "description": "", "source_port": 80, "source_ip_address": '192.168.255.10', "destination_ip_address": '10.10.10.1', "firewall_policy_id": '', "position": None, "destination_port": 80, "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", "name": "rule_01", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "enabled": True, "action": "allow", "ip_version": 4, "shared": False}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_FIREWALL_POLICY_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall_policy.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall_policy": {"name": "my_policy", "firewall_rules": [], "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "audited": False, "shared": False, "id": "c46a1c15-0496-41c9-beff-9a309a25653e", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_VPNSERVICE_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "vpnservice.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", "status": "ACTIVE", "name": "my_vpn", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_IPSEC_POLICY_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ipsecpolicy.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ipsecpolicy": {"encapsulation_mode": "tunnel", "encryption_algorithm": "aes-128", "pfs": "group5", "lifetime": { "units": "seconds", "value": 3600}, "name": "my_ipsec_polixy", "transform_protocol": "esp", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "id": "998d910d-4506-47c9-a160-47ec51ff53fc", "auth_algorithm": "sha1", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_IKE_POLICY_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ikepolicy.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ikepolicy": {"encryption_algorithm": "aes-128", "pfs": "group5", "name": "my_ike_policy", "phase1_negotiation_mode": "main", "lifetime": {"units": "seconds", "value": 3600}, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "ike_version": "v1", "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", "auth_algorithm": "sha1", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_IPSEC_SITE_CONN_CREATE = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ipsec_site_connection.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ipsec_site_connection": { "status": "ACTIVE", "psk": "test", "initiator": "bi-directional", "name": "my_ipsec_connection", "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], "mtu": 1500, "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", "dpd": {"action": "hold", "interval": 30, "timeout": 120}, "route_mode": "static", "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", "peer_address": "10.0.0.1", "peer_id": "10.0.0.254", "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_POOL_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", "event_type": "pool.update.end", "timestamp": "2014-09-15 17:20:50.687649", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "ce255443233748ce9cc71b480974df28", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "pool": { "status": "ACTIVE", "lb_method": "ROUND_ROBIN", "protocol": "HTTP", "description": "", "health_monitors": [], "members": [], "status_description": None, "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", "vip_id": None, "name": "my_pool", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "health_monitors_status": [], "provider": "haproxy"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:20:49.600299", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} NOTIFICATION_VIP_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "vip.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "vip": { "status": "ACTIVE", "protocol": "HTTP", "description": "", "address": "10.0.0.2", "protocol_port": 80, "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", "status_description": None, "name": "my_vip", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "connection_limit": -1, "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", "session_persistence": {"type": "SOURCE_IP"}}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} NOTIFICATION_HEALTH_MONITORS_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "health_monitor.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "health_monitor": { "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "delay": 10, "max_retries": 10, "timeout": 10, "pools": [], "type": "PING", "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_MEMBERS_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "member.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "member": {"admin_state_up": True, "status": "ACTIVE", "status_description": None, "weight": 1, "address": "10.0.0.3", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "protocol_port": 80, "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_FIREWALL_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall": { "status": "ACTIVE", "name": "my_firewall", "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_FIREWALL_RULE_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall_rule.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall_rule": { "protocol": "tcp", "description": "", "source_port": 80, "source_ip_address": '192.168.255.10', "destination_ip_address": '10.10.10.1', "firewall_policy_id": '', "position": None, "destination_port": 80, "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", "name": "rule_01", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "enabled": True, "action": "allow", "ip_version": 4, "shared": False}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_FIREWALL_POLICY_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "firewall_policy.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "firewall_policy": {"name": "my_policy", "firewall_rules": [], "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "audited": False, "shared": False, "id": "c46a1c15-0496-41c9-beff-9a309a25653e", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} NOTIFICATION_VPNSERVICE_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "vpnservice.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", "status": "ACTIVE", "name": "my_vpn", "admin_state_up": True, "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} NOTIFICATION_IPSEC_POLICY_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ipsecpolicy.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ipsecpolicy": {"encapsulation_mode": "tunnel", "encryption_algorithm": "aes-128", "pfs": "group5", "lifetime": { "units": "seconds", "value": 3600}, "name": "my_ipsec_polixy", "transform_protocol": "esp", "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "id": "998d910d-4506-47c9-a160-47ec51ff53fc", "auth_algorithm": "sha1", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_IKE_POLICY_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ikepolicy.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ikepolicy": {"encryption_algorithm": "aes-128", "pfs": "group5", "name": "my_ike_policy", "phase1_negotiation_mode": "main", "lifetime": {"units": "seconds", "value": 3600}, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "ike_version": "v1", "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", "auth_algorithm": "sha1", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_IPSEC_SITE_CONN_UPDATE = { "_context_roles": ["admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "ipsec_site_connection.update.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "ipsec_site_connection": { "status": "ACTIVE", "psk": "test", "initiator": "bi-directional", "name": "my_ipsec_connection", "admin_state_up": True, "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], "mtu": 1500, "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", "dpd": {"action": "hold", "interval": 30, "timeout": 120}, "route_mode": "static", "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", "peer_address": "10.0.0.1", "peer_id": "10.0.0.254", "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", "description": ""}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} NOTIFICATION_EMPTY_PAYLOAD = { "_context_roles": ["heat_stack_owner", "admin"], "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", "event_type": "health_monitor.create.end", "timestamp": "2014-09-15 17:22:11.323644", "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", "_context_tenant_name": "demo", "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", "payload": { "health_monitor": {}}, "_context_project_name": "demo", "_context_read_deleted": "no", "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", "priority": "INFO", "_context_is_admin": True, "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", "_context_timestamp": "2014-09-15 17:22:11.187163", "_context_user_name": "admin", "publisher_id": "network.ubuntu", "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} class TestNotifications(test.BaseTestCase): def test_network_create(self): v = notifications.Network(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_NETWORK_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.create", samples[1].name) def test_bulk_network_create(self): v = notifications.Network(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_BULK_NETWORK_CREATE)) self.assertEqual(4, len(samples)) self.assertEqual("network", samples[0].name) self.assertEqual("network.create", samples[1].name) self.assertEqual("network", samples[2].name) self.assertEqual("network.create", samples[3].name) def test_subnet_create(self): v = notifications.Subnet(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_SUBNET_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("subnet.create", samples[1].name) def test_bulk_subnet_create(self): v = notifications.Subnet(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_BULK_SUBNET_CREATE)) self.assertEqual(4, len(samples)) self.assertEqual("subnet", samples[0].name) self.assertEqual("subnet.create", samples[1].name) self.assertEqual("subnet", samples[2].name) self.assertEqual("subnet.create", samples[3].name) def test_port_create(self): v = notifications.Port(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_PORT_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("port.create", samples[1].name) def test_bulk_port_create(self): v = notifications.Port(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_BULK_PORT_CREATE)) self.assertEqual(4, len(samples)) self.assertEqual("port", samples[0].name) self.assertEqual("port.create", samples[1].name) self.assertEqual("port", samples[2].name) self.assertEqual("port.create", samples[3].name) def test_port_update(self): v = notifications.Port(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_PORT_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("port.update", samples[1].name) def test_network_exists(self): v = notifications.Network(mock.Mock()) samples = v.process_notification(NOTIFICATION_NETWORK_EXISTS) self.assertEqual(1, len(list(samples))) def test_router_exists(self): v = notifications.Router(mock.Mock()) samples = v.process_notification(NOTIFICATION_ROUTER_EXISTS) self.assertEqual(1, len(list(samples))) def test_floatingip_exists(self): v = notifications.FloatingIP(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_FLOATINGIP_EXISTS)) self.assertEqual(1, len(samples)) self.assertEqual("ip.floating", samples[0].name) def test_floatingip_update(self): v = notifications.FloatingIP(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_FLOATINGIP_UPDATE_START)) self.assertEqual(len(samples), 2) self.assertEqual("ip.floating", samples[0].name) def test_pool_create(self): v = notifications.Pool(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_POOL_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.pool", samples[0].name) def test_vip_create(self): v = notifications.Vip(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_VIP_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.vip", samples[0].name) def test_member_create(self): v = notifications.Member(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_MEMBERS_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.member", samples[0].name) def test_health_monitor_create(self): v = notifications.HealthMonitor(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_HEALTH_MONITORS_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.health_monitor", samples[0].name) def test_firewall_create(self): v = notifications.Firewall(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_FIREWALL_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall", samples[0].name) def test_vpnservice_create(self): v = notifications.VPNService(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn", samples[0].name) def test_ipsec_connection_create(self): v = notifications.IPSecSiteConnection(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IPSEC_SITE_CONN_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.connections", samples[0].name) def test_firewall_policy_create(self): v = notifications.FirewallPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_FIREWALL_POLICY_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall.policy", samples[0].name) def test_firewall_rule_create(self): v = notifications.FirewallRule(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_FIREWALL_RULE_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall.rule", samples[0].name) def test_ipsec_policy_create(self): v = notifications.IPSecPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IPSEC_POLICY_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) def test_ike_policy_create(self): v = notifications.IKEPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IKE_POLICY_CREATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) def test_pool_update(self): v = notifications.Pool(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_POOL_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.pool", samples[0].name) def test_vip_update(self): v = notifications.Vip(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_VIP_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.vip", samples[0].name) def test_member_update(self): v = notifications.Member(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_MEMBERS_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.member", samples[0].name) def test_health_monitor_update(self): v = notifications.HealthMonitor(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_HEALTH_MONITORS_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.lb.health_monitor", samples[0].name) def test_firewall_update(self): v = notifications.Firewall(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_FIREWALL_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall", samples[0].name) def test_vpnservice_update(self): v = notifications.VPNService(mock.Mock()) samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn", samples[0].name) def test_ipsec_connection_update(self): v = notifications.IPSecSiteConnection(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IPSEC_SITE_CONN_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.connections", samples[0].name) def test_firewall_policy_update(self): v = notifications.FirewallPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_FIREWALL_POLICY_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall.policy", samples[0].name) def test_firewall_rule_update(self): v = notifications.FirewallRule(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_FIREWALL_RULE_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.firewall.rule", samples[0].name) def test_ipsec_policy_update(self): v = notifications.IPSecPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IPSEC_POLICY_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) def test_ike_policy_update(self): v = notifications.IKEPolicy(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_IKE_POLICY_UPDATE)) self.assertEqual(2, len(samples)) self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) def test_empty_event_payload(self): v = notifications.HealthMonitor(mock.Mock()) samples = list(v.process_notification( NOTIFICATION_EMPTY_PAYLOAD)) self.assertEqual(0, len(samples)) class TestEventTypes(test.BaseTestCase): def test_network(self): v = notifications.Network(mock.Mock()) events = v.event_types self.assertIsNotEmpty(events) def test_subnet(self): v = notifications.Subnet(mock.Mock()) events = v.event_types self.assertIsNotEmpty(events) def test_port(self): v = notifications.Port(mock.Mock()) events = v.event_types self.assertIsNotEmpty(events) def test_router(self): self.assertTrue(notifications.Router(mock.Mock()).event_types) def test_floatingip(self): self.assertTrue(notifications.FloatingIP(mock.Mock()).event_types) def test_pool(self): self.assertTrue(notifications.Pool(mock.Mock()).event_types) def test_vip(self): self.assertTrue(notifications.Vip(mock.Mock()).event_types) def test_member(self): self.assertTrue(notifications.Member(mock.Mock()).event_types) def test_health_monitor(self): self.assertTrue(notifications.HealthMonitor(mock.Mock()).event_types) def test_firewall(self): self.assertTrue(notifications.Firewall(mock.Mock()).event_types) def test_vpnservice(self): self.assertTrue(notifications.VPNService(mock.Mock()).event_types) def test_ipsec_connection(self): self.assertTrue(notifications.IPSecSiteConnection( mock.Mock()).event_types) def test_firewall_policy(self): self.assertTrue(notifications.FirewallPolicy(mock.Mock()).event_types) def test_firewall_rule(self): self.assertTrue(notifications.FirewallRule(mock.Mock()).event_types) def test_ipsec_policy(self): self.assertTrue(notifications.IPSecPolicy(mock.Mock()).event_types) def test_ike_policy(self): self.assertTrue(notifications.IKEPolicy(mock.Mock()).event_types) ceilometer-6.0.0/ceilometer/tests/unit/network/test_floating_ip.py0000664000567000056710000001027712701406223026660 0ustar jenkinsjenkins00000000000000# Copyright 2016 Sungard Availability Services # Copyright 2016 Red Hat # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.network import floatingip class _BaseTestFloatingIPPollster(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestFloatingIPPollster, self).setUp() self.manager = manager.AgentManager() plugin_base._get_keystone = mock.Mock() class TestFloatingIPPollster(_BaseTestFloatingIPPollster): def setUp(self): super(TestFloatingIPPollster, self).setUp() self.pollster = floatingip.FloatingIPPollster() fake_fip = self.fake_get_fip_service() self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' 'fip_get_all', return_value=fake_fip)) @staticmethod def fake_get_fip_service(): return [{'router_id': 'e24f8a37-1bb7-49e4-833c-049bb21986d2', 'status': 'ACTIVE', 'tenant_id': '54a00c50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'f41f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.6', 'floating_ip_address': '65.79.162.11', 'port_id': '93a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': '18ca27bf-72bc-40c8-9c13-414d564ea367'}, {'router_id': 'astf8a37-1bb7-49e4-833c-049bb21986d2', 'status': 'DOWN', 'tenant_id': '34a00c50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'gh1f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.7', 'floating_ip_address': '65.79.162.12', 'port_id': '453a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': 'jkca27bf-72bc-40c8-9c13-414d564ea367'}, {'router_id': 'e2478937-1bb7-49e4-833c-049bb21986d2', 'status': 'error', 'tenant_id': '54a0gggg50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'po1f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.8', 'floating_ip_address': '65.79.162.13', 'port_id': '67a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': '90ca27bf-72bc-40c8-9c13-414d564ea367'}] def test_default_discovery(self): self.assertEqual('endpoint:network', self.pollster.default_discovery) def test_fip_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=['http://localhost:9696/'])) self.assertEqual(1, len(samples)) self.assertEqual('18ca27bf-72bc-40c8-9c13-414d564ea367', samples[0].resource_id) self.assertEqual("65.79.162.11", samples[0].resource_metadata[ "floating_ip_address"]) self.assertEqual("10.0.0.6", samples[0].resource_metadata[ "fixed_ip_address"]) def test_fip_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=['http://localhost:9696/'])) self.assertEqual(1, samples[0].volume) def test_get_fip_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=['http://localhost:9696/'])) self.assertEqual(set(['ip.floating']), set([s.name for s in samples])) ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/0000775000567000056710000000000012701406364025145 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/test_driver.py0000664000567000056710000000207512701406223030047 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from ceilometer.network.statistics import driver class TestDriver(base.BaseTestCase): @staticmethod def test_driver_ok(): class OkDriver(driver.Driver): def get_sample_data(self, meter_name, resources, cache): pass OkDriver() def test_driver_ng(self): class NgDriver(driver.Driver): """get_sample_data method is lost.""" self.assertRaises(TypeError, NgDriver) ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/opencontrail/0000775000567000056710000000000012701406364027642 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py0000664000567000056710000002722512701406223032550 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from six.moves.urllib import parse as urlparse from ceilometer.network.statistics.opencontrail import driver class TestOpencontrailDriver(base.BaseTestCase): def setUp(self): super(TestOpencontrailDriver, self).setUp() self.nc_ports = mock.patch('ceilometer.neutron_client' '.Client.port_get_all', return_value=self.fake_ports()) self.nc_ports.start() self.driver = driver.OpencontrailDriver() self.parse_url = urlparse.ParseResult('opencontrail', '127.0.0.1:8143', '/', None, None, None) self.params = {'password': ['admin'], 'scheme': ['http'], 'username': ['admin'], 'verify_ssl': ['false'], 'resource': ['if_stats_list']} @staticmethod def fake_ports(): return [{'admin_state_up': True, 'device_owner': 'compute:None', 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'extra_dhcp_opts': [], 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', 'mac_address': 'fa:16:3e:c5:35:93', 'name': '', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'status': 'ACTIVE', 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}] @staticmethod def fake_port_stats(): return {"value": [{ "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", "value": { "UveVirtualMachineAgent": { "if_stats_list": [{ "out_bytes": 22, "in_bandwidth_usage": 0, "in_bytes": 23, "out_bandwidth_usage": 0, "out_pkts": 5, "in_pkts": 6, "name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442") }], "fip_stats_list": [{ "in_bytes": 33, "iface_name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442"), "out_bytes": 44, "out_pkts": 10, "virtual_network": "default-domain:openstack:public", "in_pkts": 11, "ip_address": "1.1.1.1" }] }}}]} @staticmethod def fake_port_stats_with_node(): return {"value": [{ "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", "value": { "UveVirtualMachineAgent": { "if_stats_list": [ [[{ "out_bytes": 22, "in_bandwidth_usage": 0, "in_bytes": 23, "out_bandwidth_usage": 0, "out_pkts": 5, "in_pkts": 6, "name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442") }], 'node1'], [[{ "out_bytes": 22, "in_bandwidth_usage": 0, "in_bytes": 23, "out_bandwidth_usage": 0, "out_pkts": 4, "in_pkts": 13, "name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442")}], 'node2'] ] }}}]} def _test_meter(self, meter_name, expected, fake_port_stats=None): if not fake_port_stats: fake_port_stats = self.fake_port_stats() with mock.patch('ceilometer.network.' 'statistics.opencontrail.' 'client.NetworksAPIClient.' 'get_vm_statistics', return_value=fake_port_stats) as port_stats: samples = self.driver.get_sample_data(meter_name, self.parse_url, self.params, {}) self.assertEqual(expected, [s for s in samples]) port_stats.assert_called_with('*') def test_switch_port_receive_packets_with_node(self): expected = [(6, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY), (13, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY)] self._test_meter('switch.port.receive.packets', expected, self.fake_port_stats_with_node()) def test_switch_port_receive_packets(self): expected = [(6, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY)] self._test_meter('switch.port.receive.packets', expected) def test_switch_port_transmit_packets(self): expected = [(5, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY)] self._test_meter('switch.port.transmit.packets', expected) def test_switch_port_receive_bytes(self): expected = [(23, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY)] self._test_meter('switch.port.receive.bytes', expected) def test_switch_port_transmit_bytes(self): expected = [(22, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, mock.ANY)] self._test_meter('switch.port.transmit.bytes', expected) def test_switch_port_receive_packets_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(11, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, mock.ANY)] self._test_meter('switch.port.receive.packets', expected) def test_switch_port_transmit_packets_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(10, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, mock.ANY)] self._test_meter('switch.port.transmit.packets', expected) def test_switch_port_receive_bytes_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(33, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, mock.ANY)] self._test_meter('switch.port.receive.bytes', expected) def test_switch_port_transmit_bytes_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(44, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, mock.ANY)] self._test_meter('switch.port.transmit.bytes', expected) def test_switch_port_transmit_bytes_non_existing_network(self): self.params['virtual_network'] = ['aaa'] self.params['resource'] = ['fip_stats_list'] self._test_meter('switch.port.transmit.bytes', []) ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/opencontrail/__init__.py0000664000567000056710000000000012701406223031733 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py0000664000567000056710000000517312701406223032531 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as config_fixture from oslotest import base from ceilometer.network.statistics.opencontrail import client from ceilometer import service as ceilometer_service class TestOpencontrailClient(base.BaseTestCase): def setUp(self): super(TestOpencontrailClient, self).setUp() self.conf = self.useFixture(config_fixture.Config()) ceilometer_service.prepare_service(argv=[], config_files=[]) self.client = client.Client('http://127.0.0.1:8081', {'arg1': 'aaa'}) self.get_resp = mock.MagicMock() self.get = mock.patch('requests.get', return_value=self.get_resp).start() self.get_resp.raw.version = 1.1 self.get_resp.status_code = 200 self.get_resp.reason = 'OK' self.get_resp.content = '' def test_vm_statistics(self): self.client.networks.get_vm_statistics('bbb') call_args = self.get.call_args_list[0][0] call_kwargs = self.get.call_args_list[0][1] expected_url = ('http://127.0.0.1:8081/analytics/' 'uves/virtual-machine/bbb') self.assertEqual(expected_url, call_args[0]) data = call_kwargs.get('data') expected_data = {'arg1': 'aaa'} self.assertEqual(expected_data, data) def test_vm_statistics_params(self): self.client.networks.get_vm_statistics('bbb', {'resource': 'fip_stats_list', 'virtual_network': 'ccc'}) call_args = self.get.call_args_list[0][0] call_kwargs = self.get.call_args_list[0][1] expected_url = ('http://127.0.0.1:8081/analytics/' 'uves/virtual-machine/bbb') self.assertEqual(expected_url, call_args[0]) data = call_kwargs.get('data') expected_data = {'arg1': 'aaa', 'resource': 'fip_stats_list', 'virtual_network': 'ccc'} self.assertEqual(expected_data, data) ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/test_statistics.py0000664000567000056710000001466212701406223030753 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_utils import timeutils from oslotest import base from ceilometer.network import statistics from ceilometer.network.statistics import driver from ceilometer import sample class TestBase(base.BaseTestCase): @staticmethod def test_subclass_ok(): class OkSubclass(statistics._Base): meter_name = 'foo' meter_type = sample.TYPE_GAUGE meter_unit = 'B' OkSubclass() def test_subclass_ng(self): class NgSubclass1(statistics._Base): """meter_name is lost.""" meter_type = sample.TYPE_GAUGE meter_unit = 'B' class NgSubclass2(statistics._Base): """meter_type is lost.""" meter_name = 'foo' meter_unit = 'B' class NgSubclass3(statistics._Base): """meter_unit is lost.""" meter_name = 'foo' meter_type = sample.TYPE_GAUGE self.assertRaises(TypeError, NgSubclass1) self.assertRaises(TypeError, NgSubclass2) self.assertRaises(TypeError, NgSubclass3) class TestBaseGetSamples(base.BaseTestCase): def setUp(self): super(TestBaseGetSamples, self).setUp() class FakePollster(statistics._Base): meter_name = 'foo' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'bar' self.pollster = FakePollster() def tearDown(self): statistics._Base.drivers = {} super(TestBaseGetSamples, self).tearDown() @staticmethod def _setup_ext_mgr(**drivers): statistics._Base.drivers = drivers def _make_fake_driver(self, *return_values): class FakeDriver(driver.Driver): def __init__(self): self.index = 0 def get_sample_data(self, meter_name, parse_url, params, cache): if self.index >= len(return_values): yield None retval = return_values[self.index] self.index += 1 yield retval return FakeDriver @staticmethod def _make_timestamps(count): now = timeutils.utcnow() return [(now + datetime.timedelta(seconds=i)).isoformat() for i in range(count)] def _get_samples(self, *resources): return [v for v in self.pollster.get_samples(self, {}, resources)] def _assert_sample(self, s, volume, resource_id, resource_metadata, timestamp): self.assertEqual('foo', s.name) self.assertEqual(sample.TYPE_CUMULATIVE, s.type) self.assertEqual('bar', s.unit) self.assertEqual(volume, s.volume) self.assertIsNone(s.user_id) self.assertIsNone(s.project_id) self.assertEqual(resource_id, s.resource_id) self.assertEqual(timestamp, s.timestamp) self.assertEqual(resource_metadata, s.resource_metadata) def test_get_samples_one_driver_one_resource(self): times = self._make_timestamps(2) fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'}, times[0]), (2, 'b', None, times[1])) self._setup_ext_mgr(http=fake_driver()) samples = self._get_samples('http://foo') self.assertEqual(1, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) def test_get_samples_one_driver_two_resource(self): times = self._make_timestamps(3) fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'}, times[0]), (2, 'b', None, times[1]), (3, 'c', None, times[2])) self._setup_ext_mgr(http=fake_driver()) samples = self._get_samples('http://foo', 'http://bar') self.assertEqual(2, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) self._assert_sample(samples[1], 2, 'b', None, times[1]) def test_get_samples_two_driver_one_resource(self): times = self._make_timestamps(4) fake_driver1 = self._make_fake_driver((1, 'a', {'spam': 'egg'}, times[0]), (2, 'b', None), times[1]) fake_driver2 = self._make_fake_driver((11, 'A', None, times[2]), (12, 'B', None, times[3])) self._setup_ext_mgr(http=fake_driver1(), https=fake_driver2()) samples = self._get_samples('http://foo') self.assertEqual(1, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) def test_get_samples_multi_samples(self): times = self._make_timestamps(2) fake_driver = self._make_fake_driver([(1, 'a', {'spam': 'egg'}, times[0]), (2, 'b', None, times[1])]) self._setup_ext_mgr(http=fake_driver()) samples = self._get_samples('http://foo') self.assertEqual(2, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, times[0]) self._assert_sample(samples[1], 2, 'b', None, times[1]) def test_get_samples_return_none(self): fake_driver = self._make_fake_driver(None) self._setup_ext_mgr(http=fake_driver()) samples = self._get_samples('http://foo') self.assertEqual(0, len(samples)) def test_get_samples_return_no_generator(self): class NoneFakeDriver(driver.Driver): def get_sample_data(self, meter_name, parse_url, params, cache): return None self._setup_ext_mgr(http=NoneFakeDriver()) samples = self._get_samples('http://foo') self.assertFalse(samples) ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/__init__.py0000664000567000056710000000174412701406223027256 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base class _PollsterTestBase(base.BaseTestCase): def _test_pollster(self, pollster_class, meter_name, meter_type, meter_unit): pollster = pollster_class() self.assertEqual(pollster.meter_name, meter_name) self.assertEqual(pollster.meter_type, meter_type) self.assertEqual(pollster.meter_unit, meter_unit) ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/test_table.py0000664000567000056710000000315112701406223027637 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import table from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestTablePollsters(statistics._PollsterTestBase): def test_table_pollster(self): self._test_pollster( table.TablePollster, 'switch.table', sample.TYPE_GAUGE, 'table') def test_table_pollster_active_entries(self): self._test_pollster( table.TablePollsterActiveEntries, 'switch.table.active.entries', sample.TYPE_GAUGE, 'entry') def test_table_pollster_lookup_packets(self): self._test_pollster( table.TablePollsterLookupPackets, 'switch.table.lookup.packets', sample.TYPE_GAUGE, 'packet') def test_table_pollster_matched_packets(self): self._test_pollster( table.TablePollsterMatchedPackets, 'switch.table.matched.packets', sample.TYPE_GAUGE, 'packet') ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/test_switch.py0000664000567000056710000000170712701406223030056 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import switch from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestSwitchPollster(statistics._PollsterTestBase): def test_table_pollster(self): self._test_pollster( switch.SWPollster, 'switch', sample.TYPE_GAUGE, 'switch') ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/test_flow.py0000664000567000056710000000342412701406223027522 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import flow from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestFlowPollsters(statistics._PollsterTestBase): def test_flow_pollster(self): self._test_pollster( flow.FlowPollster, 'switch.flow', sample.TYPE_GAUGE, 'flow') def test_flow_pollster_duration_seconds(self): self._test_pollster( flow.FlowPollsterDurationSeconds, 'switch.flow.duration_seconds', sample.TYPE_GAUGE, 's') def test_flow_pollster_duration_nanoseconds(self): self._test_pollster( flow.FlowPollsterDurationNanoseconds, 'switch.flow.duration_nanoseconds', sample.TYPE_GAUGE, 'ns') def test_flow_pollster_packets(self): self._test_pollster( flow.FlowPollsterPackets, 'switch.flow.packets', sample.TYPE_CUMULATIVE, 'packet') def test_flow_pollster_bytes(self): self._test_pollster( flow.FlowPollsterBytes, 'switch.flow.bytes', sample.TYPE_CUMULATIVE, 'B') ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/test_port.py0000664000567000056710000000715112701406223027540 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import port from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestPortPollsters(statistics._PollsterTestBase): def test_port_pollster(self): self._test_pollster( port.PortPollster, 'switch.port', sample.TYPE_GAUGE, 'port') def test_port_pollster_receive_packets(self): self._test_pollster( port.PortPollsterReceivePackets, 'switch.port.receive.packets', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_transmit_packets(self): self._test_pollster( port.PortPollsterTransmitPackets, 'switch.port.transmit.packets', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_bytes(self): self._test_pollster( port.PortPollsterReceiveBytes, 'switch.port.receive.bytes', sample.TYPE_CUMULATIVE, 'B') def test_port_pollster_transmit_bytes(self): self._test_pollster( port.PortPollsterTransmitBytes, 'switch.port.transmit.bytes', sample.TYPE_CUMULATIVE, 'B') def test_port_pollster_receive_drops(self): self._test_pollster( port.PortPollsterReceiveDrops, 'switch.port.receive.drops', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_transmit_drops(self): self._test_pollster( port.PortPollsterTransmitDrops, 'switch.port.transmit.drops', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_errors(self): self._test_pollster( port.PortPollsterReceiveErrors, 'switch.port.receive.errors', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_transmit_errors(self): self._test_pollster( port.PortPollsterTransmitErrors, 'switch.port.transmit.errors', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_frame_errors(self): self._test_pollster( port.PortPollsterReceiveFrameErrors, 'switch.port.receive.frame_error', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_overrun_errors(self): self._test_pollster( port.PortPollsterReceiveOverrunErrors, 'switch.port.receive.overrun_error', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_crc_errors(self): self._test_pollster( port.PortPollsterReceiveCRCErrors, 'switch.port.receive.crc_error', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_collision_count(self): self._test_pollster( port.PortPollsterCollisionCount, 'switch.port.collision.count', sample.TYPE_CUMULATIVE, 'packet') ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/opendaylight/0000775000567000056710000000000012701406364027634 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py0000664000567000056710000020136312701406223032537 0ustar jenkinsjenkins00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from oslotest import base import six from six import moves from six.moves.urllib import parse as url_parse from ceilometer.network.statistics.opendaylight import driver @six.add_metaclass(abc.ABCMeta) class _Base(base.BaseTestCase): @abc.abstractproperty def flow_data(self): pass @abc.abstractproperty def port_data(self): pass @abc.abstractproperty def table_data(self): pass @abc.abstractproperty def topology_data(self): pass @abc.abstractproperty def switch_data(self): pass @abc.abstractproperty def user_links_data(self): pass @abc.abstractproperty def active_hosts_data(self): pass @abc.abstractproperty def inactive_hosts_data(self): pass fake_odl_url = url_parse.ParseResult('opendaylight', 'localhost:8080', 'controller/nb/v2', None, None, None) fake_params = url_parse.parse_qs('user=admin&password=admin&scheme=http&' 'container_name=default&auth=basic') fake_params_multi_container = ( url_parse.parse_qs('user=admin&password=admin&scheme=http&' 'container_name=first&container_name=second&' 'auth=basic')) def setUp(self): super(_Base, self).setUp() self.addCleanup(mock.patch.stopall) self.driver = driver.OpenDayLightDriver() self.get_flow_statistics = mock.patch( 'ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_flow_statistics', return_value=self.flow_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_table_statistics', return_value=self.table_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_port_statistics', return_value=self.port_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'TopologyAPIClient.get_topology', return_value=self.topology_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'TopologyAPIClient.get_user_links', return_value=self.user_links_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'SwitchManagerAPIClient.get_nodes', return_value=self.switch_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'HostTrackerAPIClient.get_active_hosts', return_value=self.active_hosts_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'HostTrackerAPIClient.get_inactive_hosts', return_value=self.inactive_hosts_data).start() def _test_for_meter(self, meter_name, expected_data): sample_data = self.driver.get_sample_data(meter_name, self.fake_odl_url, self.fake_params, {}) for sample, expected in moves.zip(sample_data, expected_data): self.assertEqual(expected[0], sample[0]) # check volume self.assertEqual(expected[1], sample[1]) # check resource id self.assertEqual(expected[2], sample[2]) # check resource metadata self.assertIsNotNone(sample[3]) # timestamp class TestOpenDayLightDriverSpecial(_Base): flow_data = {"flowStatistics": []} port_data = {"portStatistics": []} table_data = {"tableStatistics": []} topology_data = {"edgeProperties": []} switch_data = {"nodeProperties": []} user_links_data = {"userLinks": []} active_hosts_data = {"hostConfig": []} inactive_hosts_data = {"hostConfig": []} def test_not_implemented_meter(self): sample_data = self.driver.get_sample_data('egg', self.fake_odl_url, self.fake_params, {}) self.assertIsNone(sample_data) sample_data = self.driver.get_sample_data('switch.table.egg', self.fake_odl_url, self.fake_params, {}) self.assertIsNone(sample_data) def test_cache(self): cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.assertEqual(1, self.get_flow_statistics.call_count) cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.assertEqual(2, self.get_flow_statistics.call_count) def test_multi_container(self): cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params_multi_container, cache) self.assertEqual(2, self.get_flow_statistics.call_count) self.assertIn('network.statistics.opendaylight', cache) odl_data = cache['network.statistics.opendaylight'] self.assertIn('first', odl_data) self.assertIn('second', odl_data) def test_http_error(self): mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_flow_statistics', side_effect=Exception()).start() sample_data = self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, {}) self.assertEqual(0, len(sample_data)) mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_flow_statistics', side_effect=[Exception(), self.flow_data]).start() cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params_multi_container, cache) self.assertIn('network.statistics.opendaylight', cache) odl_data = cache['network.statistics.opendaylight'] self.assertIn('second', odl_data) class TestOpenDayLightDriverSimple(_Base): flow_data = { "flowStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "flowStatistic": [ { "flow": { "match": { "matchField": [ { "type": "DL_TYPE", "value": "2048" }, { "mask": "255.255.255.255", "type": "NW_DST", "value": "1.1.1.1" } ] }, "actions": { "@type": "output", "port": { "id": "3", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" } }, "hardTimeout": "0", "id": "0", "idleTimeout": "0", "priority": "1" }, "byteCount": "0", "durationNanoseconds": "397000000", "durationSeconds": "1828", "packetCount": "0", "tableId": "0" }, ] } ] } port_data = { "portStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "portStatistic": [ { "nodeConnector": { "id": "4", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" }, ] } ] } table_data = { "tableStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "tableStatistic": [ { "activeCount": "11", "lookupCount": "816", "matchedCount": "220", "nodeTable": { "id": "0", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" } } }, ] } ] } topology_data = {"edgeProperties": []} switch_data = { "nodeProperties": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "properties": { "actions": { "value": "4095" }, "timeStamp": { "name": "connectedSince", "value": "1377291227877" } } }, ] } user_links_data = {"userLinks": []} active_hosts_data = {"hostConfig": []} inactive_hosts_data = {"hostConfig": []} def test_meter_switch(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', "properties_actions": "4095", "properties_timeStamp_connectedSince": "1377291227877" }), ] self._test_for_meter('switch', expected_data) def test_meter_switch_port(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4', }), ] self._test_for_meter('switch.port', expected_data) def test_meter_switch_port_receive_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.packets', expected_data) def test_meter_switch_port_transmit_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.transmit.packets', expected_data) def test_meter_switch_port_receive_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.bytes', expected_data) def test_meter_switch_port_transmit_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.transmit.bytes', expected_data) def test_meter_switch_port_receive_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.drops', expected_data) def test_meter_switch_port_transmit_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.transmit.drops', expected_data) def test_meter_switch_port_receive_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.errors', expected_data) def test_meter_switch_port_transmit_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.transmit.errors', expected_data) def test_meter_switch_port_receive_frame_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.frame_error', expected_data) def test_meter_switch_port_receive_overrun_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.overrun_error', expected_data) def test_meter_switch_port_receive_crc_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.receive.crc_error', expected_data) def test_meter_switch_port_collision_count(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), ] self._test_for_meter('switch.port.collision.count', expected_data) def test_meter_switch_table(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), ] self._test_for_meter('switch.table', expected_data) def test_meter_switch_table_active_entries(self): expected_data = [ (11, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), ] self._test_for_meter('switch.table.active.entries', expected_data) def test_meter_switch_table_lookup_packets(self): expected_data = [ (816, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), ] self._test_for_meter('switch.table.lookup.packets', expected_data) def test_meter_switch_table_matched_packets(self): expected_data = [ (220, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), ] self._test_for_meter('switch.table.matched.packets', expected_data) def test_meter_switch_flow(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1" }), ] self._test_for_meter('switch.flow', expected_data) def test_meter_switch_flow_duration_seconds(self): expected_data = [ (1828, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.duration_seconds', expected_data) def test_meter_switch_flow_duration_nanoseconds(self): expected_data = [ (397000000, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) def test_meter_switch_flow_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.packets', expected_data) def test_meter_switch_flow_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.bytes', expected_data) class TestOpenDayLightDriverComplex(_Base): flow_data = { "flowStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "flowStatistic": [ { "flow": { "match": { "matchField": [ { "type": "DL_TYPE", "value": "2048" }, { "mask": "255.255.255.255", "type": "NW_DST", "value": "1.1.1.1" } ] }, "actions": { "@type": "output", "port": { "id": "3", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" } }, "hardTimeout": "0", "id": "0", "idleTimeout": "0", "priority": "1" }, "byteCount": "0", "durationNanoseconds": "397000000", "durationSeconds": "1828", "packetCount": "0", "tableId": "0" }, { "flow": { "match": { "matchField": [ { "type": "DL_TYPE", "value": "2048" }, { "mask": "255.255.255.255", "type": "NW_DST", "value": "1.1.1.2" } ] }, "actions": { "@type": "output", "port": { "id": "4", "node": { "id": "00:00:00:00:00:00:00:03", "type": "OF" }, "type": "OF" } }, "hardTimeout": "0", "id": "0", "idleTimeout": "0", "priority": "1" }, "byteCount": "89", "durationNanoseconds": "200000", "durationSeconds": "5648", "packetCount": "30", "tableId": "1" } ] } ] } port_data = { "portStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "portStatistic": [ { "nodeConnector": { "id": "4", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" }, { "nodeConnector": { "id": "3", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "12740", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "182", "transmitBytes": "12110", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "173" }, { "nodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "12180", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "174", "transmitBytes": "12670", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "181" }, { "nodeConnector": { "id": "1", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" }, { "nodeConnector": { "id": "0", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" } ] } ] } table_data = { "tableStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "tableStatistic": [ { "activeCount": "11", "lookupCount": "816", "matchedCount": "220", "nodeTable": { "id": "0", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" } } }, { "activeCount": "20", "lookupCount": "10", "matchedCount": "5", "nodeTable": { "id": "1", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" } } } ] } ] } topology_data = { "edgeProperties": [ { "edge": { "headNodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:03", "type": "OF" }, "type": "OF" }, "tailNodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" } }, "properties": { "bandwidth": { "value": 10000000000 }, "config": { "value": 1 }, "name": { "value": "s2-eth3" }, "state": { "value": 1 }, "timeStamp": { "name": "creation", "value": 1379527162648 } } }, { "edge": { "headNodeConnector": { "id": "5", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "tailNodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:04", "type": "OF" }, "type": "OF" } }, "properties": { "timeStamp": { "name": "creation", "value": 1379527162648 } } } ] } switch_data = { "nodeProperties": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "properties": { "actions": { "value": "4095" }, "buffers": { "value": "256" }, "capabilities": { "value": "199" }, "description": { "value": "None" }, "macAddress": { "value": "00:00:00:00:00:02" }, "tables": { "value": "-1" }, "timeStamp": { "name": "connectedSince", "value": "1377291227877" } } }, { "node": { "id": "00:00:00:00:00:00:00:03", "type": "OF" }, "properties": { "actions": { "value": "1024" }, "buffers": { "value": "512" }, "capabilities": { "value": "1000" }, "description": { "value": "Foo Bar" }, "macAddress": { "value": "00:00:00:00:00:03" }, "tables": { "value": "10" }, "timeStamp": { "name": "connectedSince", "value": "1377291228000" } } } ] } user_links_data = { "userLinks": [ { "dstNodeConnector": "OF|5@OF|00:00:00:00:00:00:00:05", "name": "link1", "srcNodeConnector": "OF|3@OF|00:00:00:00:00:00:00:02", "status": "Success" } ] } active_hosts_data = { "hostConfig": [ { "dataLayerAddress": "00:00:00:00:01:01", "networkAddress": "1.1.1.1", "nodeConnectorId": "9", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:01", "nodeType": "OF", "staticHost": "false", "vlan": "0" }, { "dataLayerAddress": "00:00:00:00:02:02", "networkAddress": "2.2.2.2", "nodeConnectorId": "1", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:02", "nodeType": "OF", "staticHost": "true", "vlan": "0" } ] } inactive_hosts_data = { "hostConfig": [ { "dataLayerAddress": "00:00:00:01:01:01", "networkAddress": "1.1.1.3", "nodeConnectorId": "8", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:01", "nodeType": "OF", "staticHost": "false", "vlan": "0" }, { "dataLayerAddress": "00:00:00:01:02:02", "networkAddress": "2.2.2.4", "nodeConnectorId": "0", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:02", "nodeType": "OF", "staticHost": "false", "vlan": "1" } ] } def test_meter_switch(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', "properties_actions": "4095", "properties_buffers": "256", "properties_capabilities": "199", "properties_description": "None", "properties_macAddress": "00:00:00:00:00:02", "properties_tables": "-1", "properties_timeStamp_connectedSince": "1377291227877" }), (1, "00:00:00:00:00:00:00:03", { 'controller': 'OpenDaylight', 'container': 'default', "properties_actions": "1024", "properties_buffers": "512", "properties_capabilities": "1000", "properties_description": "Foo Bar", "properties_macAddress": "00:00:00:00:00:03", "properties_tables": "10", "properties_timeStamp_connectedSince": "1377291228000" }), ] self._test_for_meter('switch', expected_data) def test_meter_switch_port(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4', }), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3', 'user_link_node_id': '00:00:00:00:00:00:00:05', 'user_link_node_port': '5', 'user_link_status': 'Success', 'user_link_name': 'link1', }), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2', 'topology_node_id': '00:00:00:00:00:00:00:03', 'topology_node_port': '2', "topology_bandwidth": 10000000000, "topology_config": 1, "topology_name": "s2-eth3", "topology_state": 1, "topology_timeStamp_creation": 1379527162648 }), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1', 'host_status': 'active', 'host_dataLayerAddress': '00:00:00:00:02:02', 'host_networkAddress': '2.2.2.2', 'host_staticHost': 'true', 'host_vlan': '0', }), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0', 'host_status': 'inactive', 'host_dataLayerAddress': '00:00:00:01:02:02', 'host_networkAddress': '2.2.2.4', 'host_staticHost': 'false', 'host_vlan': '1', }), ] self._test_for_meter('switch.port', expected_data) def test_meter_switch_port_receive_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (182, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (174, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.packets', expected_data) def test_meter_switch_port_transmit_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (173, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (181, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.transmit.packets', expected_data) def test_meter_switch_port_receive_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (12740, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (12180, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.bytes', expected_data) def test_meter_switch_port_transmit_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (12110, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (12670, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.transmit.bytes', expected_data) def test_meter_switch_port_receive_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.drops', expected_data) def test_meter_switch_port_transmit_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.transmit.drops', expected_data) def test_meter_switch_port_receive_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.errors', expected_data) def test_meter_switch_port_transmit_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.transmit.errors', expected_data) def test_meter_switch_port_receive_frame_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.frame_error', expected_data) def test_meter_switch_port_receive_overrun_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.overrun_error', expected_data) def test_meter_switch_port_receive_crc_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.receive.crc_error', expected_data) def test_meter_switch_port_collision_count(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}), ] self._test_for_meter('switch.port.collision.count', expected_data) def test_meter_switch_table(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}), ] self._test_for_meter('switch.table', expected_data) def test_meter_switch_table_active_entries(self): expected_data = [ (11, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), (20, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}), ] self._test_for_meter('switch.table.active.entries', expected_data) def test_meter_switch_table_lookup_packets(self): expected_data = [ (816, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), (10, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}), ] self._test_for_meter('switch.table.lookup.packets', expected_data) def test_meter_switch_table_matched_packets(self): expected_data = [ (220, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}), (5, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}), ] self._test_for_meter('switch.table.matched.packets', expected_data) def test_meter_switch_flow(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1" }), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1" }), ] self._test_for_meter('switch.flow', expected_data) def test_meter_switch_flow_duration_seconds(self): expected_data = [ (1828, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), (5648, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.duration_seconds', expected_data) def test_meter_switch_flow_duration_nanoseconds(self): expected_data = [ (397000000, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), (200000, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) def test_meter_switch_flow_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), (30, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.packets', expected_data) def test_meter_switch_flow_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), (89, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}), ] self._test_for_meter('switch.flow.bytes', expected_data) ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/opendaylight/__init__.py0000664000567000056710000000000012701406223031725 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py0000664000567000056710000001407112701406223032520 0ustar jenkinsjenkins00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as config_fixture from oslotest import base from requests import auth as req_auth import six from six.moves.urllib import parse as urlparse from ceilometer.i18n import _ from ceilometer.network.statistics.opendaylight import client from ceilometer import service as ceilometer_service class TestClientHTTPBasicAuth(base.BaseTestCase): auth_way = 'basic' scheme = 'http' def setUp(self): super(TestClientHTTPBasicAuth, self).setUp() self.conf = self.useFixture(config_fixture.Config()) ceilometer_service.prepare_service(argv=[], config_files=[]) self.parsed_url = urlparse.urlparse( 'http://127.0.0.1:8080/controller/nb/v2?container_name=default&' 'container_name=egg&auth=%s&user=admin&password=admin_pass&' 'scheme=%s' % (self.auth_way, self.scheme)) self.params = urlparse.parse_qs(self.parsed_url.query) self.endpoint = urlparse.urlunparse( urlparse.ParseResult(self.scheme, self.parsed_url.netloc, self.parsed_url.path, None, None, None)) odl_params = {'auth': self.params.get('auth')[0], 'user': self.params.get('user')[0], 'password': self.params.get('password')[0]} self.client = client.Client(self.endpoint, odl_params) self.resp = mock.MagicMock() self.get = mock.patch('requests.get', return_value=self.resp).start() self.resp.raw.version = 1.1 self.resp.status_code = 200 self.resp.reason = 'OK' self.resp.headers = {} self.resp.content = 'dummy' def _test_request(self, method, url): data = method('default') call_args = self.get.call_args_list[0][0] call_kwargs = self.get.call_args_list[0][1] # check url real_url = url % {'container_name': 'default', 'scheme': self.scheme} self.assertEqual(real_url, call_args[0]) # check auth parameters auth = call_kwargs.get('auth') if self.auth_way == 'digest': self.assertIsInstance(auth, req_auth.HTTPDigestAuth) else: self.assertIsInstance(auth, req_auth.HTTPBasicAuth) self.assertEqual('admin', auth.username) self.assertEqual('admin_pass', auth.password) # check header self.assertEqual( {'Accept': 'application/json'}, call_kwargs['headers']) # check return value self.assertEqual(self.get().json(), data) def test_flow_statistics(self): self._test_request( self.client.statistics.get_flow_statistics, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/statistics/%(container_name)s/flow') def test_port_statistics(self): self._test_request( self.client.statistics.get_port_statistics, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/statistics/%(container_name)s/port') def test_table_statistics(self): self._test_request( self.client.statistics.get_table_statistics, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/statistics/%(container_name)s/table') def test_topology(self): self._test_request( self.client.topology.get_topology, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/topology/%(container_name)s') def test_user_links(self): self._test_request( self.client.topology.get_user_links, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/topology/%(container_name)s/userLinks') def test_switch(self): self._test_request( self.client.switch_manager.get_nodes, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/switchmanager/%(container_name)s/nodes') def test_active_hosts(self): self._test_request( self.client.host_tracker.get_active_hosts, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/hosttracker/%(container_name)s/hosts/active') def test_inactive_hosts(self): self._test_request( self.client.host_tracker.get_inactive_hosts, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/hosttracker/%(container_name)s/hosts/inactive') def test_http_error(self): self.resp.status_code = 404 self.resp.reason = 'Not Found' try: self.client.statistics.get_flow_statistics('default') self.fail('') except client.OpenDaylightRESTAPIFailed as e: self.assertEqual( _('OpenDaylitght API returned %(status)s %(reason)s') % {'status': self.resp.status_code, 'reason': self.resp.reason}, six.text_type(e)) def test_other_error(self): class _Exception(Exception): pass self.get = mock.patch('requests.get', side_effect=_Exception).start() self.assertRaises(_Exception, self.client.statistics.get_flow_statistics, 'default') class TestClientHTTPDigestAuth(TestClientHTTPBasicAuth): auth_way = 'digest' class TestClientHTTPSBasicAuth(TestClientHTTPBasicAuth): scheme = 'https' class TestClientHTTPSDigestAuth(TestClientHTTPDigestAuth): scheme = 'https' ceilometer-6.0.0/ceilometer/tests/unit/test_declarative.py0000664000567000056710000000315312701406223025152 0ustar jenkinsjenkins00000000000000# # Copyright 2016 Mirantis, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import mockpatch from ceilometer import declarative from ceilometer.tests import base class TestDefinition(base.BaseTestCase): def setUp(self): super(TestDefinition, self).setUp() self.configs = [ "_field1", "_field2|_field3", {'fields': 'field4.`split(., 1, 1)`'}, {'fields': ['field5.arg', 'field6'], 'type': 'text'} ] self.parser = mock.MagicMock() parser_patch = mockpatch.Patch( "jsonpath_rw_ext.parser.ExtentedJsonPathParser.parse", new=self.parser) self.useFixture(parser_patch) def test_caching_parsers(self): for config in self.configs * 2: declarative.Definition("test", config, mock.MagicMock()) self.assertEqual(4, self.parser.call_count) self.parser.assert_has_calls([ mock.call("_field1"), mock.call("_field2|_field3"), mock.call("field4.`split(., 1, 1)`"), mock.call("(field5.arg)|(field6)"), ]) ceilometer-6.0.0/ceilometer/tests/unit/publisher/0000775000567000056710000000000012701406364023257 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/publisher/__init__.py0000664000567000056710000000000012701406223025350 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/publisher/test_utils.py0000664000567000056710000001245312701406223026027 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/utils.py """ from oslo_serialization import jsonutils from oslotest import base from ceilometer.publisher import utils class TestSignature(base.BaseTestCase): def test_compute_signature_change_key(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'A': 'A', 'b': 'B'}, 'not-so-secret') self.assertNotEqual(sig1, sig2) def test_compute_signature_change_value(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'a': 'a', 'b': 'B'}, 'not-so-secret') self.assertNotEqual(sig1, sig2) def test_compute_signature_same(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') self.assertEqual(sig1, sig2) def test_compute_signature_signed(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') data['message_signature'] = sig1 sig2 = utils.compute_signature(data, 'not-so-secret') self.assertEqual(sig1, sig2) def test_compute_signature_use_configured_secret(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') sig2 = utils.compute_signature(data, 'different-value') self.assertNotEqual(sig1, sig2) def test_verify_signature_signed(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') data['message_signature'] = sig1 self.assertTrue(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_unsigned(self): data = {'a': 'A', 'b': 'B'} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_incorrect(self): data = {'a': 'A', 'b': 'B', 'message_signature': 'Not the same'} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_invalid_encoding(self): data = {'a': 'A', 'b': 'B', 'message_signature': ''} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_unicode(self): data = {'a': 'A', 'b': 'B', 'message_signature': u''} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_nested(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') self.assertTrue(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_nested_json(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', 'c': ('c',), 'd': ['d'] }, } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') jsondata = jsonutils.loads(jsonutils.dumps(data)) self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) def test_verify_unicode_symbols(self): data = {u'a\xe9\u0437': 'A', 'b': u'B\xe9\u0437' } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') jsondata = jsonutils.loads(jsonutils.dumps(data)) self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) def test_besteffort_compare_digest(self): hash1 = "f5ac3fe42b80b80f979825d177191bc5" hash2 = "f5ac3fe42b80b80f979825d177191bc5" hash3 = "1dece7821bf3fd70fe1309eaa37d52a2" hash4 = b"f5ac3fe42b80b80f979825d177191bc5" hash5 = b"f5ac3fe42b80b80f979825d177191bc5" hash6 = b"1dece7821bf3fd70fe1309eaa37d52a2" self.assertTrue(utils.besteffort_compare_digest(hash1, hash2)) self.assertFalse(utils.besteffort_compare_digest(hash1, hash3)) self.assertTrue(utils.besteffort_compare_digest(hash4, hash5)) self.assertFalse(utils.besteffort_compare_digest(hash4, hash6)) def test_verify_no_secret(self): data = {'a': 'A', 'b': 'B'} self.assertTrue(utils.verify_signature(data, '')) ceilometer-6.0.0/ceilometer/tests/unit/publisher/test_file.py0000664000567000056710000001045312701406224025605 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/file.py """ import datetime import logging.handlers import os import tempfile from oslo_utils import netutils from oslotest import base from ceilometer.publisher import file from ceilometer import sample class TestFilePublisher(base.BaseTestCase): test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def test_file_publisher_maxbytes(self): # Test valid configurations tempdir = tempfile.mkdtemp() name = '%s/log_file' % tempdir parsed_url = netutils.urlsplit('file://%s?max_bytes=50&backup_count=3' % name) publisher = file.FilePublisher(parsed_url) publisher.publish_samples(None, self.test_data) handler = publisher.publisher_logger.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) self.assertEqual([50, name, 3], [handler.maxBytes, handler.baseFilename, handler.backupCount]) # The rotating file gets created since only allow 50 bytes. self.assertTrue(os.path.exists('%s.1' % name)) def test_file_publisher(self): # Test missing max bytes, backup count configurations tempdir = tempfile.mkdtemp() name = '%s/log_file_plain' % tempdir parsed_url = netutils.urlsplit('file://%s' % name) publisher = file.FilePublisher(parsed_url) publisher.publish_samples(None, self.test_data) handler = publisher.publisher_logger.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) self.assertEqual([0, name, 0], [handler.maxBytes, handler.baseFilename, handler.backupCount]) # Test the content is corrected saved in the file self.assertTrue(os.path.exists(name)) with open(name, 'r') as f: content = f.read() for sample_item in self.test_data: self.assertIn(sample_item.id, content) self.assertIn(sample_item.timestamp, content) def test_file_publisher_invalid(self): # Test invalid max bytes, backup count configurations tempdir = tempfile.mkdtemp() parsed_url = netutils.urlsplit( 'file://%s/log_file_bad' '?max_bytes=yus&backup_count=5y' % tempdir) publisher = file.FilePublisher(parsed_url) publisher.publish_samples(None, self.test_data) self.assertIsNone(publisher.publisher_logger) ceilometer-6.0.0/ceilometer/tests/unit/publisher/test_udp.py0000664000567000056710000001315412701406224025457 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/udp.py """ import datetime import socket import mock import msgpack from oslo_config import fixture as fixture_config from oslo_utils import netutils from oslotest import base from ceilometer.publisher import udp from ceilometer.publisher import utils from ceilometer import sample COUNTER_SOURCE = 'testsource' class TestUDPPublisher(base.BaseTestCase): test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), ] @staticmethod def _make_fake_socket(published): def _fake_socket_socket(family, type): def record_data(msg, dest): published.append((msg, dest)) udp_socket = mock.Mock() udp_socket.sendto = record_data return udp_socket return _fake_socket_socket def setUp(self): super(TestUDPPublisher, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.publisher.telemetry_secret = 'not-so-secret' def _check_udp_socket(self, url, expected_addr_family): with mock.patch.object(socket, 'socket') as mock_socket: udp.UDPPublisher(netutils.urlsplit(url)) mock_socket.assert_called_with(expected_addr_family, socket.SOCK_DGRAM) def test_publisher_udp_socket_ipv4(self): self._check_udp_socket('udp://127.0.0.1:4952', socket.AF_INET) def test_publisher_udp_socket_ipv6(self): self._check_udp_socket('udp://[::1]:4952', socket.AF_INET6) def test_published(self): self.data_sent = [] with mock.patch('socket.socket', self._make_fake_socket(self.data_sent)): publisher = udp.UDPPublisher( netutils.urlsplit('udp://somehost')) publisher.publish_samples(None, self.test_data) self.assertEqual(5, len(self.data_sent)) sent_counters = [] for data, dest in self.data_sent: counter = msgpack.loads(data, encoding="utf-8") sent_counters.append(counter) # Check destination self.assertEqual(('somehost', self.CONF.collector.udp_port), dest) # Check that counters are equal def sort_func(counter): return counter['counter_name'] counters = [utils.meter_message_from_counter(d, "not-so-secret") for d in self.test_data] counters.sort(key=sort_func) sent_counters.sort(key=sort_func) self.assertEqual(counters, sent_counters) @staticmethod def _raise_ioerror(*args): raise IOError def _make_broken_socket(self, family, type): udp_socket = mock.Mock() udp_socket.sendto = self._raise_ioerror return udp_socket def test_publish_error(self): with mock.patch('socket.socket', self._make_broken_socket): publisher = udp.UDPPublisher( netutils.urlsplit('udp://localhost')) publisher.publish_samples(None, self.test_data) ceilometer-6.0.0/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py0000664000567000056710000002124112701406224031361 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Cisco Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/kafka_broker.py """ import datetime import uuid import mock from oslo_utils import netutils from ceilometer.event.storage import models as event from ceilometer.publisher import kafka_broker as kafka from ceilometer.publisher import messaging as msg_publisher from ceilometer import sample from ceilometer.tests import base as tests_base @mock.patch('ceilometer.publisher.kafka_broker.LOG', mock.Mock()) @mock.patch('ceilometer.publisher.kafka_broker.kafka.KafkaClient', mock.Mock()) class TestKafkaPublisher(tests_base.BaseTestCase): test_event_data = [ event.Event(message_id=uuid.uuid4(), event_type='event_%d' % i, generated=datetime.datetime.utcnow(), traits=[], raw={}) for i in range(0, 5) ] test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def test_publish(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer')) with mock.patch.object(publisher, '_producer') as fake_producer: publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_without_options(self): publisher = kafka.KafkaBrokerPublisher( netutils.urlsplit('kafka://127.0.0.1:9092')) with mock.patch.object(publisher, '_producer') as fake_producer: publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_to_host_without_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer')) self.assertEqual('default', publisher.policy) publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=test')) self.assertEqual('default', publisher.policy) def test_publish_to_host_with_default_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=default')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = TypeError self.assertRaises(msg_publisher.DeliveryFailure, publisher.publish_samples, mock.MagicMock(), self.test_data) self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_to_host_with_drop_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=drop')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) def test_publish_to_host_with_queue_policy(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) self.assertEqual(1, len(publisher.local_queue)) def test_publish_to_down_host_with_default_queue_size(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") for i in range(0, 2000): for s in self.test_data: s.name = 'test-%d' % i publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(1024, len(publisher.local_queue)) self.assertEqual('test-976', publisher.local_queue[0][2][0]['counter_name']) self.assertEqual('test-1999', publisher.local_queue[1023][2][0]['counter_name']) def test_publish_to_host_from_down_to_up_with_queue(self): publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") for i in range(0, 16): for s in self.test_data: s.name = 'test-%d' % i publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(16, len(publisher.local_queue)) fake_producer.send_messages.side_effect = None for s in self.test_data: s.name = 'test-%d' % 16 publisher.publish_samples(mock.MagicMock(), self.test_data) self.assertEqual(0, len(publisher.local_queue)) def test_publish_event_with_default_policy(self): publisher = kafka.KafkaBrokerPublisher( netutils.urlsplit('kafka://127.0.0.1:9092?topic=ceilometer')) with mock.patch.object(publisher, '_producer') as fake_producer: publisher.publish_events(mock.MagicMock(), self.test_event_data) self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) with mock.patch.object(publisher, '_producer') as fake_producer: fake_producer.send_messages.side_effect = Exception("test") self.assertRaises(msg_publisher.DeliveryFailure, publisher.publish_events, mock.MagicMock(), self.test_event_data) self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) self.assertEqual(0, len(publisher.local_queue)) ceilometer-6.0.0/ceilometer/tests/unit/publisher/test_messaging_publisher.py0000664000567000056710000002705412701406224030725 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/messaging.py """ import datetime import uuid import mock from oslo_config import fixture as fixture_config from oslo_utils import netutils import testscenarios.testcase from ceilometer.event.storage import models as event from ceilometer.publisher import messaging as msg_publisher from ceilometer import sample from ceilometer.tests import base as tests_base class BasePublisherTestCase(tests_base.BaseTestCase): test_event_data = [ event.Event(message_id=uuid.uuid4(), event_type='event_%d' % i, generated=datetime.datetime.utcnow(), traits=[], raw={}) for i in range(0, 5) ] test_sample_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def setUp(self): super(BasePublisherTestCase, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.setup_messaging(self.CONF) class NotifierOnlyPublisherTest(BasePublisherTestCase): @mock.patch('oslo_messaging.Notifier') def test_publish_topic_override(self, notifier): msg_publisher.SampleNotifierPublisher( netutils.urlsplit('notifier://?topic=custom_topic')) notifier.assert_called_with(mock.ANY, topic='custom_topic', driver=mock.ANY, retry=mock.ANY, publisher_id=mock.ANY) msg_publisher.EventNotifierPublisher( netutils.urlsplit('notifier://?topic=custom_event_topic')) notifier.assert_called_with(mock.ANY, topic='custom_event_topic', driver=mock.ANY, retry=mock.ANY, publisher_id=mock.ANY) class TestPublisher(testscenarios.testcase.WithScenarios, BasePublisherTestCase): scenarios = [ ('notifier', dict(protocol="notifier", publisher_cls=msg_publisher.SampleNotifierPublisher, test_data=BasePublisherTestCase.test_sample_data, pub_func='publish_samples', attr='source')), ('event_notifier', dict(protocol="notifier", publisher_cls=msg_publisher.EventNotifierPublisher, test_data=BasePublisherTestCase.test_event_data, pub_func='publish_events', attr='event_type')), ] def setUp(self): super(TestPublisher, self).setUp() self.topic = (self.CONF.publisher_notifier.event_topic if self.pub_func == 'publish_events' else self.CONF.publisher_notifier.metering_topic) class TestPublisherPolicy(TestPublisher): @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_no_policy(self, mylog): publisher = self.publisher_cls( netutils.urlsplit('%s://' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) self.assertTrue(mylog.info.called) self.assertEqual('default', publisher.policy) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_policy_block(self, mylog): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=default' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) self.assertTrue(mylog.info.called) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_policy_incorrect(self, mylog): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=notexist' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), mock.MagicMock(), self.test_data) self.assertTrue(mylog.warning.called) self.assertEqual('default', publisher.policy) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG', mock.Mock()) class TestPublisherPolicyReactions(TestPublisher): def test_published_with_policy_drop_and_rpc_down(self): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=drop' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) def test_published_with_policy_queue_and_rpc_down(self): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(1, len(publisher.local_queue)) fake_send.assert_called_once_with( mock.ANY, self.topic, mock.ANY) def test_published_with_policy_queue_and_rpc_down_up(self): self.rpc_unreachable = True publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(1, len(publisher.local_queue)) fake_send.side_effect = mock.MagicMock() getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(0, len(publisher.local_queue)) topic = self.topic expected = [mock.call(mock.ANY, topic, mock.ANY), mock.call(mock.ANY, topic, mock.ANY), mock.call(mock.ANY, topic, mock.ANY)] self.assertEqual(expected, fake_send.mock_calls) def test_published_with_policy_sized_queue_and_rpc_down(self): publisher = self.publisher_cls(netutils.urlsplit( '%s://?policy=queue&max_queue_length=3' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect for i in range(0, 5): for s in self.test_data: setattr(s, self.attr, 'test-%d' % i) getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(3, len(publisher.local_queue)) self.assertEqual( 'test-2', publisher.local_queue[0][2][0][self.attr] ) self.assertEqual( 'test-3', publisher.local_queue[1][2][0][self.attr] ) self.assertEqual( 'test-4', publisher.local_queue[2][2][0][self.attr] ) def test_published_with_policy_default_sized_queue_and_rpc_down(self): publisher = self.publisher_cls( netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect for i in range(0, 2000): for s in self.test_data: setattr(s, self.attr, 'test-%d' % i) getattr(publisher, self.pub_func)(mock.MagicMock(), self.test_data) self.assertEqual(1024, len(publisher.local_queue)) self.assertEqual( 'test-976', publisher.local_queue[0][2][0][self.attr] ) self.assertEqual( 'test-1999', publisher.local_queue[1023][2][0][self.attr] ) ceilometer-6.0.0/ceilometer/tests/unit/compute/0000775000567000056710000000000012701406364022736 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/notifications/0000775000567000056710000000000012701406364025607 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/notifications/test_instance.py0000664000567000056710000006541512701406223031031 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for converters for producing compute counter messages from notification events. """ from oslotest import base from ceilometer.compute.notifications import instance from ceilometer import sample INSTANCE_CREATE_END = { u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'_context_is_admin': True, u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': u'10.0.2.15', u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T20:23:41.425105', u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'event_type': u'compute.instance.create.end', u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'payload': {u'created_at': u'2012-05-08 20:23:41', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'fixed_ips': [{u'address': u'10.0.0.2', u'floating_ips': [], u'meta': {}, u'type': u'fixed', u'version': 4}], u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-08 20:23:47.985999', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', }, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 20:23:48.028195', } INSTANCE_DELETE_START = { u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'_context_is_admin': True, u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': u'10.0.2.15', u'_context_request_id': u'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T20:24:14.547374', u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'event_type': u'compute.instance.delete.start', u'message_id': u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4', u'payload': {u'created_at': u'2012-05-08 20:23:41', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-08 20:23:47', u'memory_mb': 512, u'state': u'active', u'state_description': u'deleting', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', }, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 20:24:14.824743', } INSTANCE_EXISTS = { u'_context_auth_token': None, u'_context_is_admin': True, u'_context_project_id': None, u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': None, u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T16:03:43.760204', u'_context_user_id': None, u'event_type': u'compute.instance.exists', u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', u'audit_period_ending': u'2012-05-08 16:00:00', u'bandwidth': {}, u'created_at': u'2012-05-07 22:16:18', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-07 23:01:27', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', }, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 16:03:44.122481', } INSTANCE_EXISTS_METADATA_LIST = { u'_context_auth_token': None, u'_context_is_admin': True, u'_context_project_id': None, u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': None, u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T16:03:43.760204', u'_context_user_id': None, u'event_type': u'compute.instance.exists', u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', u'audit_period_ending': u'2012-05-08 16:00:00', u'bandwidth': {}, u'created_at': u'2012-05-07 22:16:18', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-07 23:01:27', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'metadata': [], u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', }, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 16:03:44.122481', } INSTANCE_FINISH_RESIZE_END = { u'_context_roles': [u'admin'], u'_context_request_id': u'req-e3f71bb9-e9b9-418b-a9db-a5950c851b25', u'_context_quota_class': None, u'event_type': u'compute.instance.finish_resize.end', u'_context_user_name': u'admin', u'_context_project_name': u'admin', u'timestamp': u'2013-01-04 15:10:17.436974', u'_context_is_admin': True, u'message_id': u'a2f7770d-b85d-4797-ab10-41407a44368e', u'_context_auth_token': None, u'_context_instance_lock_checked': False, u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', u'_context_timestamp': u'2013-01-04T15:08:39.162612', u'_context_read_deleted': u'no', u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', u'_context_remote_address': u'10.147.132.184', u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', u'payload': {u'state_description': u'', u'availability_zone': None, u'ephemeral_gb': 0, u'instance_type_id': 5, u'deleted_at': u'', u'fixed_ips': [{u'floating_ips': [], u'label': u'private', u'version': 4, u'meta': {}, u'address': u'10.0.0.3', u'type': u'fixed'}], u'memory_mb': 2048, u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', u'reservation_id': u'r-u3fvim06', u'hostname': u's1', u'state': u'resized', u'launched_at': u'2013-01-04T15:10:14.923939', u'metadata': {u'metering.server_group': u'Group_A', u'AutoScalingGroupName': u'tyky-Group_Awste7', u'metering.foo.bar': u'true'}, u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', u'access_ip_v6': None, u'disk_gb': 20, u'access_ip_v4': None, u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', u'host': u'ip-10-147-132-184.ec2.internal', u'display_name': u's1', u'image_ref_url': u'http://10.147.132.184:9292/images/' 'a130b9d9-e00e-436e-9782-836ccef06e8a', u'root_gb': 20, u'tenant_id': u'cea4b25edb484e5392727181b7721d29', u'created_at': u'2013-01-04T11:21:48.000000', u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', u'instance_type': u'm1.small', u'vcpus': 1, u'image_meta': {u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', u'base_image_ref': u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, u'architecture': None, u'os_type': None }, u'priority': u'INFO' } INSTANCE_RESIZE_REVERT_END = { u'_context_roles': [u'admin'], u'_context_request_id': u'req-9da1d714-dabe-42fd-8baa-583e57cd4f1a', u'_context_quota_class': None, u'event_type': u'compute.instance.resize.revert.end', u'_context_user_name': u'admin', u'_context_project_name': u'admin', u'timestamp': u'2013-01-04 15:20:32.009532', u'_context_is_admin': True, u'message_id': u'c48deeba-d0c3-4154-b3db-47480b52267a', u'_context_auth_token': None, u'_context_instance_lock_checked': False, u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', u'_context_timestamp': u'2013-01-04T15:19:51.018218', u'_context_read_deleted': u'no', u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', u'_context_remote_address': u'10.147.132.184', u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', u'payload': {u'state_description': u'resize_reverting', u'availability_zone': None, u'ephemeral_gb': 0, u'instance_type_id': 2, u'deleted_at': u'', u'reservation_id': u'r-u3fvim06', u'memory_mb': 512, u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', u'hostname': u's1', u'state': u'resized', u'launched_at': u'2013-01-04T15:10:14.000000', u'metadata': {u'metering.server_group': u'Group_A', u'AutoScalingGroupName': u'tyky-Group_A-wste7', u'metering.foo.bar': u'true'}, u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', u'access_ip_v6': None, u'disk_gb': 0, u'access_ip_v4': None, u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', u'host': u'ip-10-147-132-184.ec2.internal', u'display_name': u's1', u'image_ref_url': u'http://10.147.132.184:9292/images/' 'a130b9d9-e00e-436e-9782-836ccef06e8a', u'root_gb': 0, u'tenant_id': u'cea4b25edb484e5392727181b7721d29', u'created_at': u'2013-01-04T11:21:48.000000', u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', u'instance_type': u'm1.tiny', u'vcpus': 1, u'image_meta': {u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', u'base_image_ref': u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, u'architecture': None, u'os_type': None }, u'priority': u'INFO' } INSTANCE_SCHEDULED = { u'_context_request_id': u'req-f28a836a-32bf-4cc3-940a-3515878c181f', u'_context_quota_class': None, u'event_type': u'scheduler.run_instance.scheduled', u'_context_service_catalog': [{ u'endpoints': [{ u'adminURL': u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', u'region': u'RegionOne', u'internalURL': u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', u'id': u'30cb904fdc294eea9b225e06b2d0d4eb', u'publicURL': u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb'}], u'endpoints_links': [], u'type': u'volume', u'name': u'cinder'}], u'_context_auth_token': u'TOK', u'_context_user_id': u'0a757cd896b64b65ba3784afef564116', u'payload': { 'instance_id': 'fake-uuid1-1', u'weighted_host': {u'host': u'eglynn-f19-devstack3', u'weight': 1.0}, u'request_spec': { u'num_instances': 1, u'block_device_mapping': [{ u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', u'guest_format': None, u'boot_index': 0, u'no_device': None, u'connection_info': None, u'volume_id': None, u'volume_size': None, u'device_name': None, u'disk_bus': None, u'image_id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', u'source_type': u'image', u'device_type': u'disk', u'snapshot_id': None, u'destination_type': u'local', u'delete_on_termination': True}], u'image': { u'status': u'active', u'name': u'cirros-0.3.1-x86_64-uec', u'deleted': False, u'container_format': u'ami', u'created_at': u'2014-02-18T13:16:26.000000', u'disk_format': u'ami', u'updated_at': u'2014-02-18T13:16:27.000000', u'properties': { u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275'}, u'min_disk': 0, u'min_ram': 0, u'checksum': u'f8a2eeee2dc65b3d9b6e63678955bd83', u'owner': u'2bd766a095b44486bf07cf7f666997eb', u'is_public': True, u'deleted_at': None, u'id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', u'size': 25165824}, u'instance_type': { u'root_gb': 1, u'name': u'm1.tiny', u'ephemeral_gb': 0, u'memory_mb': 512, u'vcpus': 1, u'extra_specs': {}, u'swap': 0, u'rxtx_factor': 1.0, u'flavorid': u'1', u'vcpu_weight': None, u'id': 2}, u'instance_properties': { u'vm_state': u'building', u'availability_zone': None, u'terminated_at': None, u'ephemeral_gb': 0, u'instance_type_id': 2, u'user_data': None, u'cleaned': False, u'vm_mode': None, u'deleted_at': None, u'reservation_id': u'r-ven5q6om', u'id': 15, u'security_groups': [{ u'deleted_at': None, u'user_id': u'0a757cd896b64b65ba3784afef564116', u'description': u'default', u'deleted': False, u'created_at': u'2014-02-19T11:02:31.000000', u'updated_at': None, u'project_id': u'2bd766a095b44486bf07cf7f666997eb', u'id': 1, u'name': u'default'}], u'disable_terminate': False, u'root_device_name': None, u'display_name': u'new', u'uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', u'default_swap_device': None, u'info_cache': { u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', u'deleted': False, u'created_at': u'2014-03-05T12:44:00.000000', u'updated_at': None, u'network_info': [], u'deleted_at': None}, u'hostname': u'new', u'launched_on': None, u'display_description': u'new', u'key_data': None, u'deleted': False, u'config_drive': u'', u'power_state': 0, u'default_ephemeral_device': None, u'progress': 0, u'project_id': u'2bd766a095b44486bf07cf7f666997eb', u'launched_at': None, u'scheduled_at': None, u'node': None, u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275', u'access_ip_v6': None, u'access_ip_v4': None, u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', u'key_name': None, u'updated_at': None, u'host': None, u'root_gb': 1, u'user_id': u'0a757cd896b64b65ba3784afef564116', u'system_metadata': { u'image_kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', u'image_min_disk': u'1', u'instance_type_memory_mb': u'512', u'instance_type_swap': u'0', u'instance_type_vcpu_weight': None, u'instance_type_root_gb': u'1', u'instance_type_name': u'm1.tiny', u'image_ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275', u'instance_type_id': u'2', u'instance_type_ephemeral_gb': u'0', u'instance_type_rxtx_factor': u'1.0', u'instance_type_flavorid': u'1', u'instance_type_vcpus': u'1', u'image_container_format': u'ami', u'image_min_ram': u'0', u'image_disk_format': u'ami', u'image_base_image_ref': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b'}, u'task_state': u'scheduling', u'shutdown_terminate': False, u'cell_name': None, u'ephemeral_key_uuid': None, u'locked': False, u'name': u'instance-0000000f', u'created_at': u'2014-03-05T12:44:00.000000', u'locked_by': None, u'launch_index': 0, u'memory_mb': 512, u'vcpus': 1, u'image_ref': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', u'architecture': None, u'auto_disk_config': False, u'os_type': None, u'metadata': {u'metering.server_group': u'Group_A', u'AutoScalingGroupName': u'tyky-Group_Awste7', u'metering.foo.bar': u'true'}}, u'security_group': [u'default'], u'instance_uuids': [u'9206baae-c3b6-41bc-96f2-2c0726ff51c8']}}, u'priority': u'INFO', u'_context_is_admin': True, u'_context_timestamp': u'2014-03-05T12:44:00.135674', u'publisher_id': u'scheduler.eglynn-f19-devstack3', u'message_id': u'd6c1ae63-a26b-47c7-8397-8794216e09dd', u'_context_remote_address': u'172.16.12.21', u'_context_roles': [u'_member_', u'admin'], u'timestamp': u'2014-03-05 12:44:00.733758', u'_context_user': u'0a757cd896b64b65ba3784afef564116', u'_unique_id': u'2af47cbdde604ff794bb046f3f9db1e2', u'_context_project_name': u'admin', u'_context_read_deleted': u'no', u'_context_tenant': u'2bd766a095b44486bf07cf7f666997eb', u'_context_instance_lock_checked': False, u'_context_project_id': u'2bd766a095b44486bf07cf7f666997eb', u'_context_user_name': u'admin' } class TestNotifications(base.BaseTestCase): def test_process_notification(self): info = list(instance.Instance(None).process_notification( INSTANCE_CREATE_END ))[0] for name, actual, expected in [ ('counter_name', info.name, 'instance'), ('counter_type', info.type, sample.TYPE_GAUGE), ('counter_volume', info.volume, 1), ('timestamp', info.timestamp, INSTANCE_CREATE_END['timestamp']), ('resource_id', info.resource_id, INSTANCE_CREATE_END['payload']['instance_id']), ('instance_type_id', info.resource_metadata['instance_type_id'], INSTANCE_CREATE_END['payload']['instance_type_id']), ('host', info.resource_metadata['host'], INSTANCE_CREATE_END['publisher_id']), ]: self.assertEqual(expected, actual, name) @staticmethod def _find_counter(counters, name): return filter(lambda counter: counter.name == name, counters)[0] def _verify_user_metadata(self, metadata): self.assertIn('user_metadata', metadata) user_meta = metadata['user_metadata'] self.assertEqual('Group_A', user_meta.get('server_group')) self.assertNotIn('AutoScalingGroupName', user_meta) self.assertIn('foo_bar', user_meta) self.assertNotIn('foo.bar', user_meta) def test_instance_create_instance(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_CREATE_END)) self.assertEqual(1, len(counters)) c = counters[0] self.assertEqual(1, c.volume) def test_instance_exists_instance(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_EXISTS)) self.assertEqual(1, len(counters)) def test_instance_exists_metadata_list(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_EXISTS_METADATA_LIST)) self.assertEqual(1, len(counters)) def test_instance_delete_instance(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_DELETE_START)) self.assertEqual(1, len(counters)) def test_instance_finish_resize_instance(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) self.assertEqual(1, len(counters)) c = counters[0] self.assertEqual(1, c.volume) self._verify_user_metadata(c.resource_metadata) def test_instance_resize_finish_instance(self): ic = instance.Instance(None) counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) self.assertEqual(1, len(counters)) c = counters[0] self.assertEqual(1, c.volume) self._verify_user_metadata(c.resource_metadata) def test_instance_scheduled(self): ic = instance.InstanceScheduled(None) self.assertIn(INSTANCE_SCHEDULED['event_type'], ic.event_types) counters = list(ic.process_notification(INSTANCE_SCHEDULED)) self.assertEqual(1, len(counters)) names = [c.name for c in counters] self.assertEqual(['instance.scheduled'], names) rid = [c.resource_id for c in counters] self.assertEqual(['fake-uuid1-1'], rid) ceilometer-6.0.0/ceilometer/tests/unit/compute/notifications/__init__.py0000664000567000056710000000000012701406223027700 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/pollsters/0000775000567000056710000000000012701406364024765 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/pollsters/test_diskio.py0000664000567000056710000003600312701406223027654 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # Copyright 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.compute.pollsters import disk from ceilometer.compute.virt import inspector as virt_inspector import ceilometer.tests.base as base class TestBaseDiskIO(base.BaseTestCase): TYPE = 'cumulative' def setUp(self): super(TestBaseDiskIO, self).setUp() self.inspector = mock.Mock() self.instance = self._get_fake_instances() patch_virt = mockpatch.Patch( 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', new=mock.Mock(return_value=self.inspector)) self.useFixture(patch_virt) # as we're having lazy hypervisor inspector singleton object in the # base compute pollster class, that leads to the fact that we # need to mock all this class property to avoid context sharing between # the tests patch_inspector = mockpatch.Patch( 'ceilometer.compute.pollsters.BaseComputePollster.inspector', self.inspector) self.useFixture(patch_inspector) @staticmethod def _get_fake_instances(): instances = [] for i in [1, 2]: instance = mock.MagicMock() instance.name = 'instance-%s' % i setattr(instance, 'OS-EXT-SRV-ATTR:instance_name', instance.name) instance.id = i instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} instance.status = 'active' instances.append(instance) return instances @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, name, expected_count=2): pollster = factory() mgr = manager.AgentManager() cache = {} samples = list(pollster.get_samples(mgr, cache, self.instance)) self.assertIsNotEmpty(samples) cache_key = getattr(pollster, self.CACHE_KEY) self.assertIn(cache_key, cache) for instance in self.instance: self.assertIn(instance.id, cache[cache_key]) self.assertEqual(set([name]), set([s.name for s in samples])) match = [s for s in samples if s.name == name] self.assertEqual(len(match), expected_count, 'missing counter %s' % name) return match def _check_aggregate_samples(self, factory, name, expected_volume, expected_device=None): match = self._check_get_samples(factory, name) self.assertEqual(expected_volume, match[0].volume) self.assertEqual(self.TYPE, match[0].type) if expected_device is not None: self.assertEqual(set(expected_device), set(match[0].resource_metadata.get('device'))) instances = [i.id for i in self.instance] for m in match: self.assertIn(m.resource_id, instances) def _check_per_device_samples(self, factory, name, expected_volume, expected_device=None): match = self._check_get_samples(factory, name, expected_count=4) match_dict = {} for m in match: match_dict[m.resource_id] = m for instance in self.instance: key = "%s-%s" % (instance.id, expected_device) self.assertEqual(expected_volume, match_dict[key].volume) self.assertEqual(self.TYPE, match_dict[key].type) self.assertEqual(key, match_dict[key].resource_id) class TestDiskPollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='vda1'), virt_inspector.DiskStats(read_bytes=1, read_requests=2, write_bytes=3, write_requests=4, errors=-1)), (virt_inspector.Disk(device='vda2'), virt_inspector.DiskStats(read_bytes=2, read_requests=3, write_bytes=5, write_requests=7, errors=-1)), ] CACHE_KEY = "CACHE_KEY_DISK" def setUp(self): super(TestDiskPollsters, self).setUp() self.inspector.inspect_disks = mock.Mock(return_value=self.DISKS) def test_disk_read_requests(self): self._check_aggregate_samples(disk.ReadRequestsPollster, 'disk.read.requests', 5, expected_device=['vda1', 'vda2']) def test_disk_read_bytes(self): self._check_aggregate_samples(disk.ReadBytesPollster, 'disk.read.bytes', 3, expected_device=['vda1', 'vda2']) def test_disk_write_requests(self): self._check_aggregate_samples(disk.WriteRequestsPollster, 'disk.write.requests', 11, expected_device=['vda1', 'vda2']) def test_disk_write_bytes(self): self._check_aggregate_samples(disk.WriteBytesPollster, 'disk.write.bytes', 8, expected_device=['vda1', 'vda2']) def test_per_disk_read_requests(self): self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 2, 'vda1') self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 3, 'vda2') def test_per_disk_write_requests(self): self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 4, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 7, 'vda2') def test_per_disk_read_bytes(self): self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 1, 'vda1') self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 2, 'vda2') def test_per_disk_write_bytes(self): self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 3, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 5, 'vda2') class TestDiskRatePollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='disk1'), virt_inspector.DiskRateStats(1024, 300, 5120, 700)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskRateStats(2048, 400, 6144, 800)) ] TYPE = 'gauge' CACHE_KEY = "CACHE_KEY_DISK_RATE" def setUp(self): super(TestDiskRatePollsters, self).setUp() self.inspector.inspect_disk_rates = mock.Mock(return_value=self.DISKS) def test_disk_read_bytes_rate(self): self._check_aggregate_samples(disk.ReadBytesRatePollster, 'disk.read.bytes.rate', 3072, expected_device=['disk1', 'disk2']) def test_disk_read_requests_rate(self): self._check_aggregate_samples(disk.ReadRequestsRatePollster, 'disk.read.requests.rate', 700, expected_device=['disk1', 'disk2']) def test_disk_write_bytes_rate(self): self._check_aggregate_samples(disk.WriteBytesRatePollster, 'disk.write.bytes.rate', 11264, expected_device=['disk1', 'disk2']) def test_disk_write_requests_rate(self): self._check_aggregate_samples(disk.WriteRequestsRatePollster, 'disk.write.requests.rate', 1500, expected_device=['disk1', 'disk2']) def test_per_disk_read_bytes_rate(self): self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, 'disk.device.read.bytes.rate', 1024, 'disk1') self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, 'disk.device.read.bytes.rate', 2048, 'disk2') def test_per_disk_read_requests_rate(self): self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, 'disk.device.read.requests.rate', 300, 'disk1') self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, 'disk.device.read.requests.rate', 400, 'disk2') def test_per_disk_write_bytes_rate(self): self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, 'disk.device.write.bytes.rate', 5120, 'disk1') self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, 'disk.device.write.bytes.rate', 6144, 'disk2') def test_per_disk_write_requests_rate(self): self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, 'disk.device.write.requests.rate', 700, 'disk1') self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, 'disk.device.write.requests.rate', 800, 'disk2') class TestDiskLatencyPollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='disk1'), virt_inspector.DiskLatencyStats(1000)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskLatencyStats(2000)) ] TYPE = 'gauge' CACHE_KEY = "CACHE_KEY_DISK_LATENCY" def setUp(self): super(TestDiskLatencyPollsters, self).setUp() self.inspector.inspect_disk_latency = mock.Mock( return_value=self.DISKS) def test_disk_latency(self): self._check_aggregate_samples(disk.DiskLatencyPollster, 'disk.latency', 3) def test_per_device_latency(self): self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, 'disk.device.latency', 1, 'disk1') self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, 'disk.device.latency', 2, 'disk2') class TestDiskIOPSPollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='disk1'), virt_inspector.DiskIOPSStats(10)), (virt_inspector.Disk(device='disk2'), virt_inspector.DiskIOPSStats(20)), ] TYPE = 'gauge' CACHE_KEY = "CACHE_KEY_DISK_IOPS" def setUp(self): super(TestDiskIOPSPollsters, self).setUp() self.inspector.inspect_disk_iops = mock.Mock(return_value=self.DISKS) def test_disk_iops(self): self._check_aggregate_samples(disk.DiskIOPSPollster, 'disk.iops', 30) def test_per_device_iops(self): self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, 'disk.device.iops', 10, 'disk1') self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, 'disk.device.iops', 20, 'disk2') class TestDiskInfoPollsters(TestBaseDiskIO): DISKS = [ (virt_inspector.Disk(device='vda1'), virt_inspector.DiskInfo(capacity=3, allocation=2, physical=1)), (virt_inspector.Disk(device='vda2'), virt_inspector.DiskInfo(capacity=4, allocation=3, physical=2)), ] TYPE = 'gauge' CACHE_KEY = "CACHE_KEY_DISK_INFO" def setUp(self): super(TestDiskInfoPollsters, self).setUp() self.inspector.inspect_disk_info = mock.Mock(return_value=self.DISKS) def test_disk_capacity(self): self._check_aggregate_samples(disk.CapacityPollster, 'disk.capacity', 7, expected_device=['vda1', 'vda2']) def test_disk_allocation(self): self._check_aggregate_samples(disk.AllocationPollster, 'disk.allocation', 5, expected_device=['vda1', 'vda2']) def test_disk_physical(self): self._check_aggregate_samples(disk.PhysicalPollster, 'disk.usage', 3, expected_device=['vda1', 'vda2']) def test_per_disk_capacity(self): self._check_per_device_samples(disk.PerDeviceCapacityPollster, 'disk.device.capacity', 3, 'vda1') self._check_per_device_samples(disk.PerDeviceCapacityPollster, 'disk.device.capacity', 4, 'vda2') def test_per_disk_allocation(self): self._check_per_device_samples(disk.PerDeviceAllocationPollster, 'disk.device.allocation', 2, 'vda1') self._check_per_device_samples(disk.PerDeviceAllocationPollster, 'disk.device.allocation', 3, 'vda2') def test_per_disk_physical(self): self._check_per_device_samples(disk.PerDevicePhysicalPollster, 'disk.device.usage', 1, 'vda1') self._check_per_device_samples(disk.PerDevicePhysicalPollster, 'disk.device.usage', 2, 'vda2') ceilometer-6.0.0/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py0000664000567000056710000001173512701406223032047 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the compute pollsters. """ import mock from oslotest import base import six from ceilometer.agent import manager from ceilometer.compute.pollsters import util class FauxInstance(object): def __init__(self, **kwds): for name, value in kwds.items(): setattr(self, name, value) def __getitem__(self, key): return getattr(self, key) def get(self, key, default): try: return getattr(self, key) except AttributeError: return default class TestLocationMetadata(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): self.manager = manager.AgentManager() super(TestLocationMetadata, self).setUp() # Mimics an instance returned from nova api call self.INSTANCE_PROPERTIES = {'name': 'display name', 'id': ('234cbe81-4e09-4f64-9b2a-' '714f6b9046e3'), 'OS-EXT-SRV-ATTR:instance_name': 'instance-000001', 'OS-EXT-AZ:availability_zone': 'foo-zone', 'reservation_id': 'reservation id', 'architecture': 'x86_64', 'kernel_id': 'kernel id', 'os_type': 'linux', 'ramdisk_id': 'ramdisk id', 'status': 'active', 'ephemeral_gb': 0, 'root_gb': 20, 'disk_gb': 20, 'image': {'id': 1, 'links': [{"rel": "bookmark", 'href': 2}]}, 'hostId': '1234-5678', 'OS-EXT-SRV-ATTR:host': 'host-test', 'flavor': {'name': 'm1.tiny', 'id': 1, 'disk': 20, 'ram': 512, 'vcpus': 2, 'ephemeral': 0}, 'metadata': {'metering.autoscale.group': 'X' * 512, 'metering.ephemeral_gb': 42}} self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) def test_metadata(self): md = util._get_metadata_from_object(self.instance) for prop, value in six.iteritems(self.INSTANCE_PROPERTIES): if prop not in ("metadata"): # Special cases if prop == 'name': prop = 'display_name' elif prop == 'hostId': prop = "host" elif prop == 'OS-EXT-SRV-ATTR:host': prop = "instance_host" elif prop == 'OS-EXT-SRV-ATTR:instance_name': prop = 'name' elif prop == "id": prop = "instance_id" self.assertEqual(value, md[prop]) user_metadata = md['user_metadata'] expected = self.INSTANCE_PROPERTIES[ 'metadata']['metering.autoscale.group'][:256] self.assertEqual(expected, user_metadata['autoscale_group']) self.assertEqual(1, len(user_metadata)) def test_metadata_empty_image(self): self.INSTANCE_PROPERTIES['image'] = None self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) md = util._get_metadata_from_object(self.instance) self.assertIsNone(md['image']) self.assertIsNone(md['image_ref']) self.assertIsNone(md['image_ref_url']) def test_metadata_image_through_conductor(self): # There should be no links here, should default to None self.INSTANCE_PROPERTIES['image'] = {'id': 1} self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) md = util._get_metadata_from_object(self.instance) self.assertEqual(1, md['image_ref']) self.assertIsNone(md['image_ref_url']) ceilometer-6.0.0/ceilometer/tests/unit/compute/pollsters/test_net.py0000664000567000056710000003157712701406223027173 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from ceilometer.agent import manager from ceilometer.compute.pollsters import net from ceilometer.compute.virt import inspector as virt_inspector from ceilometer import sample from ceilometer.tests.unit.compute.pollsters import base class FauxInstance(object): def __init__(self, **kwargs): for name, value in kwargs.items(): setattr(self, name, value) def __getitem__(self, key): return getattr(self, key) def get(self, key, default): return getattr(self, key, default) class TestNetPollster(base.TestPollsterBase): def setUp(self): super(TestNetPollster, self).setUp() self.vnic0 = virt_inspector.Interface( name='vnet0', fref='fa163e71ec6e', mac='fa:16:3e:71:ec:6d', parameters=dict(ip='10.0.0.2', projmask='255.255.255.0', projnet='proj1', dhcp_server='10.0.0.1')) stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, tx_bytes=3, tx_packets=4) self.vnic1 = virt_inspector.Interface( name='vnet1', fref='fa163e71ec6f', mac='fa:16:3e:71:ec:6e', parameters=dict(ip='192.168.0.3', projmask='255.255.255.0', projnet='proj2', dhcp_server='10.0.0.2')) stats1 = virt_inspector.InterfaceStats(rx_bytes=5, rx_packets=6, tx_bytes=7, tx_packets=8) self.vnic2 = virt_inspector.Interface( name='vnet2', fref=None, mac='fa:18:4e:72:fc:7e', parameters=dict(ip='192.168.0.4', projmask='255.255.255.0', projnet='proj3', dhcp_server='10.0.0.3')) stats2 = virt_inspector.InterfaceStats(rx_bytes=9, rx_packets=10, tx_bytes=11, tx_packets=12) vnics = [ (self.vnic0, stats0), (self.vnic1, stats1), (self.vnic2, stats2), ] self.inspector.inspect_vnics = mock.Mock(return_value=vnics) self.INSTANCE_PROPERTIES = {'name': 'display name', 'OS-EXT-SRV-ATTR:instance_name': 'instance-000001', 'OS-EXT-AZ:availability_zone': 'foo-zone', 'reservation_id': 'reservation id', 'id': 'instance id', 'user_id': 'user id', 'tenant_id': 'tenant id', 'architecture': 'x86_64', 'kernel_id': 'kernel id', 'os_type': 'linux', 'ramdisk_id': 'ramdisk id', 'status': 'active', 'ephemeral_gb': 0, 'root_gb': 20, 'disk_gb': 20, 'image': {'id': 1, 'links': [{"rel": "bookmark", 'href': 2}]}, 'hostId': '1234-5678', 'OS-EXT-SRV-ATTR:host': 'host-test', 'flavor': {'disk': 20, 'ram': 512, 'name': 'tiny', 'vcpus': 2, 'ephemeral': 0}, 'metadata': {'metering.autoscale.group': 'X' * 512, 'metering.foobar': 42}} self.faux_instance = FauxInstance(**self.INSTANCE_PROPERTIES) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, expected): mgr = manager.AgentManager() pollster = factory() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(3, len(samples)) # one for each nic self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def _verify_vnic_metering(ip, expected_volume, expected_rid): match = [s for s in samples if s.resource_metadata['parameters']['ip'] == ip ] self.assertEqual(len(match), 1, 'missing ip %s' % ip) self.assertEqual(expected_volume, match[0].volume) self.assertEqual('cumulative', match[0].type) self.assertEqual(expected_rid, match[0].resource_id) for ip, volume, rid in expected: _verify_vnic_metering(ip, volume, rid) def test_incoming_bytes(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.IncomingBytesPollster, [('10.0.0.2', 1, self.vnic0.fref), ('192.168.0.3', 5, self.vnic1.fref), ('192.168.0.4', 9, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_outgoing_bytes(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingBytesPollster, [('10.0.0.2', 3, self.vnic0.fref), ('192.168.0.3', 7, self.vnic1.fref), ('192.168.0.4', 11, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_incoming_packets(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.IncomingPacketsPollster, [('10.0.0.2', 2, self.vnic0.fref), ('192.168.0.3', 6, self.vnic1.fref), ('192.168.0.4', 10, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_outgoing_packets(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingPacketsPollster, [('10.0.0.2', 4, self.vnic0.fref), ('192.168.0.3', 8, self.vnic1.fref), ('192.168.0.4', 12, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_metadata(self): factory = net.OutgoingBytesPollster pollster = factory() sm = pollster.make_vnic_sample(self.faux_instance, name='network.outgoing.bytes', type=sample.TYPE_CUMULATIVE, unit='B', volume=100, vnic_data=self.vnic0) user_metadata = sm.resource_metadata['user_metadata'] expected = self.INSTANCE_PROPERTIES[ 'metadata']['metering.autoscale.group'][:256] self.assertEqual(expected, user_metadata['autoscale_group']) self.assertEqual(2, len(user_metadata)) class TestNetPollsterCache(base.TestPollsterBase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples_cache(self, factory): vnic0 = virt_inspector.Interface( name='vnet0', fref='fa163e71ec6e', mac='fa:16:3e:71:ec:6d', parameters=dict(ip='10.0.0.2', projmask='255.255.255.0', projnet='proj1', dhcp_server='10.0.0.1')) stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, tx_bytes=3, tx_packets=4) vnics = [(vnic0, stats0)] mgr = manager.AgentManager() pollster = factory() cache = { pollster.CACHE_KEY_VNIC: { self.instance.id: vnics, }, } samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) def test_incoming_bytes(self): self._check_get_samples_cache(net.IncomingBytesPollster) def test_outgoing_bytes(self): self._check_get_samples_cache(net.OutgoingBytesPollster) def test_incoming_packets(self): self._check_get_samples_cache(net.IncomingPacketsPollster) def test_outgoing_packets(self): self._check_get_samples_cache(net.OutgoingPacketsPollster) class TestNetRatesPollster(base.TestPollsterBase): def setUp(self): super(TestNetRatesPollster, self).setUp() self.vnic0 = virt_inspector.Interface( name='vnet0', fref='fa163e71ec6e', mac='fa:16:3e:71:ec:6d', parameters=dict(ip='10.0.0.2', projmask='255.255.255.0', projnet='proj1', dhcp_server='10.0.0.1')) stats0 = virt_inspector.InterfaceRateStats(rx_bytes_rate=1, tx_bytes_rate=2) self.vnic1 = virt_inspector.Interface( name='vnet1', fref='fa163e71ec6f', mac='fa:16:3e:71:ec:6e', parameters=dict(ip='192.168.0.3', projmask='255.255.255.0', projnet='proj2', dhcp_server='10.0.0.2')) stats1 = virt_inspector.InterfaceRateStats(rx_bytes_rate=3, tx_bytes_rate=4) self.vnic2 = virt_inspector.Interface( name='vnet2', fref=None, mac='fa:18:4e:72:fc:7e', parameters=dict(ip='192.168.0.4', projmask='255.255.255.0', projnet='proj3', dhcp_server='10.0.0.3')) stats2 = virt_inspector.InterfaceRateStats(rx_bytes_rate=5, tx_bytes_rate=6) vnics = [ (self.vnic0, stats0), (self.vnic1, stats1), (self.vnic2, stats2), ] self.inspector.inspect_vnic_rates = mock.Mock(return_value=vnics) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def _check_get_samples(self, factory, expected): mgr = manager.AgentManager() pollster = factory() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(3, len(samples)) # one for each nic self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def _verify_vnic_metering(ip, expected_volume, expected_rid): match = [s for s in samples if s.resource_metadata['parameters']['ip'] == ip ] self.assertEqual(1, len(match), 'missing ip %s' % ip) self.assertEqual(expected_volume, match[0].volume) self.assertEqual('gauge', match[0].type) self.assertEqual(expected_rid, match[0].resource_id) for ip, volume, rid in expected: _verify_vnic_metering(ip, volume, rid) def test_incoming_bytes_rate(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.IncomingBytesRatePollster, [('10.0.0.2', 1, self.vnic0.fref), ('192.168.0.3', 3, self.vnic1.fref), ('192.168.0.4', 5, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_outgoing_bytes_rate(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingBytesRatePollster, [('10.0.0.2', 2, self.vnic0.fref), ('192.168.0.3', 4, self.vnic1.fref), ('192.168.0.4', 6, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) ceilometer-6.0.0/ceilometer/tests/unit/compute/pollsters/test_memory.py0000664000567000056710000001101712701406223027700 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from ceilometer.agent import manager from ceilometer.compute.pollsters import memory from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.tests.unit.compute.pollsters import base class TestMemoryPollster(base.TestPollsterBase): def setUp(self): super(TestMemoryPollster, self).setUp() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): next_value = iter(( virt_inspector.MemoryUsageStats(usage=1.0), virt_inspector.MemoryUsageStats(usage=2.0), virt_inspector.NoDataException(), virt_inspector.InstanceShutOffException(), )) def inspect_memory_usage(instance, duration): value = next(next_value) if isinstance(value, virt_inspector.MemoryUsageStats): return value else: raise value self.inspector.inspect_memory_usage = mock.Mock( side_effect=inspect_memory_usage) mgr = manager.AgentManager() pollster = memory.MemoryUsagePollster() @mock.patch('ceilometer.compute.pollsters.memory.LOG') def _verify_memory_metering(expected_count, expected_memory_mb, expected_warnings, mylog): samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(expected_count, len(samples)) if expected_count > 0: self.assertEqual(set(['memory.usage']), set([s.name for s in samples])) self.assertEqual(expected_memory_mb, samples[0].volume) else: self.assertEqual(expected_warnings, mylog.warning.call_count) self.assertEqual(0, mylog.exception.call_count) _verify_memory_metering(1, 1.0, 0) _verify_memory_metering(1, 2.0, 0) _verify_memory_metering(0, 0, 1) _verify_memory_metering(0, 0, 0) class TestResidentMemoryPollster(base.TestPollsterBase): def setUp(self): super(TestResidentMemoryPollster, self).setUp() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): next_value = iter(( virt_inspector.MemoryResidentStats(resident=1.0), virt_inspector.MemoryResidentStats(resident=2.0), virt_inspector.NoDataException(), virt_inspector.InstanceShutOffException(), )) def inspect_memory_resident(instance, duration): value = next(next_value) if isinstance(value, virt_inspector.MemoryResidentStats): return value else: raise value self.inspector.inspect_memory_resident = mock.Mock( side_effect=inspect_memory_resident) mgr = manager.AgentManager() pollster = memory.MemoryResidentPollster() @mock.patch('ceilometer.compute.pollsters.memory.LOG') def _verify_resident_memory_metering(expected_count, expected_resident_memory_mb, expected_warnings, mylog): samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(expected_count, len(samples)) if expected_count > 0: self.assertEqual(set(['memory.resident']), set([s.name for s in samples])) self.assertEqual(expected_resident_memory_mb, samples[0].volume) else: self.assertEqual(expected_warnings, mylog.warning.call_count) self.assertEqual(0, mylog.exception.call_count) _verify_resident_memory_metering(1, 1.0, 0) _verify_resident_memory_metering(1, 2.0, 0) _verify_resident_memory_metering(0, 0, 1) _verify_resident_memory_metering(0, 0, 0) ceilometer-6.0.0/ceilometer/tests/unit/compute/pollsters/test_instance.py0000664000567000056710000000724012701406223030177 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config from ceilometer.agent import manager from ceilometer.compute.pollsters import instance as pollsters_instance from ceilometer.tests.unit.compute.pollsters import base class TestInstancePollster(base.TestPollsterBase): def setUp(self): super(TestInstancePollster, self).setUp() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples_instance(self): mgr = manager.AgentManager() pollster = pollsters_instance.InstancePollster() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual('instance', samples[0].name) self.assertEqual(1, samples[0].resource_metadata['vcpus']) self.assertEqual(512, samples[0].resource_metadata['memory_mb']) self.assertEqual(20, samples[0].resource_metadata['disk_gb']) self.assertEqual(20, samples[0].resource_metadata['root_gb']) self.assertEqual(0, samples[0].resource_metadata['ephemeral_gb']) self.assertEqual('active', samples[0].resource_metadata['status']) self.assertEqual('active', samples[0].resource_metadata['state']) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_reserved_metadata_with_keys(self): self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override('reserved_metadata_keys', ['fqdn']) mgr = manager.AgentManager() pollster = pollsters_instance.InstancePollster() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual({'fqdn': 'vm_fqdn', 'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, samples[0].resource_metadata['user_metadata']) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_reserved_metadata_with_namespace(self): mgr = manager.AgentManager() pollster = pollsters_instance.InstancePollster() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual({'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, samples[0].resource_metadata['user_metadata']) self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override('reserved_metadata_namespace', []) mgr = manager.AgentManager() pollster = pollsters_instance.InstancePollster() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertNotIn('user_metadata', samples[0].resource_metadata) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_flavor_name_as_metadata_instance_type(self): mgr = manager.AgentManager() pollster = pollsters_instance.InstancePollster() samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual('m1.small', samples[0].resource_metadata['instance_type']) ceilometer-6.0.0/ceilometer/tests/unit/compute/pollsters/__init__.py0000664000567000056710000000000012701406223027056 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/pollsters/base.py0000664000567000056710000000415312701406223026246 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import mockpatch import ceilometer.tests.base as base class TestPollsterBase(base.BaseTestCase): def setUp(self): super(TestPollsterBase, self).setUp() self.inspector = mock.Mock() self.instance = mock.MagicMock() self.instance.name = 'instance-00000001' setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', self.instance.name) setattr(self.instance, 'OS-EXT-STS:vm_state', 'active') self.instance.id = 1 self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} self.instance.status = 'active' self.instance.metadata = { 'fqdn': 'vm_fqdn', 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', 'project_cos': 'dev'} patch_virt = mockpatch.Patch( 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', new=mock.Mock(return_value=self.inspector)) self.useFixture(patch_virt) # as we're having lazy hypervisor inspector singleton object in the # base compute pollster class, that leads to the fact that we # need to mock all this class property to avoid context sharing between # the tests patch_inspector = mockpatch.Patch( 'ceilometer.compute.pollsters.BaseComputePollster.inspector', self.inspector) self.useFixture(patch_inspector) ceilometer-6.0.0/ceilometer/tests/unit/compute/pollsters/test_cpu.py0000664000567000056710000000750712701406223027170 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import mock from ceilometer.agent import manager from ceilometer.compute.pollsters import cpu from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.tests.unit.compute.pollsters import base class TestCPUPollster(base.TestPollsterBase): def setUp(self): super(TestCPUPollster, self).setUp() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): next_value = iter(( virt_inspector.CPUStats(time=1 * (10 ** 6), number=2), virt_inspector.CPUStats(time=3 * (10 ** 6), number=2), # cpu_time resets on instance restart virt_inspector.CPUStats(time=2 * (10 ** 6), number=2), )) def inspect_cpus(name): return next(next_value) self.inspector.inspect_cpus = mock.Mock(side_effect=inspect_cpus) mgr = manager.AgentManager() pollster = cpu.CPUPollster() def _verify_cpu_metering(expected_time): cache = {} samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['cpu']), set([s.name for s in samples])) self.assertEqual(expected_time, samples[0].volume) self.assertEqual(2, samples[0].resource_metadata.get('cpu_number')) # ensure elapsed time between polling cycles is non-zero time.sleep(0.001) _verify_cpu_metering(1 * (10 ** 6)) _verify_cpu_metering(3 * (10 ** 6)) _verify_cpu_metering(2 * (10 ** 6)) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples_no_caching(self): cpu_stats = virt_inspector.CPUStats(time=1 * (10 ** 6), number=2) self.inspector.inspect_cpus = mock.Mock(return_value=cpu_stats) mgr = manager.AgentManager() pollster = cpu.CPUPollster() cache = {} samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(10 ** 6, samples[0].volume) self.assertEqual(0, len(cache)) class TestCPUUtilPollster(base.TestPollsterBase): def setUp(self): super(TestCPUUtilPollster, self).setUp() @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def test_get_samples(self): next_value = iter(( virt_inspector.CPUUtilStats(util=40), virt_inspector.CPUUtilStats(util=60), )) def inspect_cpu_util(name, duration): return next(next_value) self.inspector.inspect_cpu_util = (mock. Mock(side_effect=inspect_cpu_util)) mgr = manager.AgentManager() pollster = cpu.CPUUtilPollster() def _verify_cpu_util_metering(expected_util): cache = {} samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['cpu_util']), set([s.name for s in samples])) self.assertEqual(expected_util, samples[0].volume) _verify_cpu_util_metering(40) _verify_cpu_util_metering(60) ceilometer-6.0.0/ceilometer/tests/unit/compute/__init__.py0000664000567000056710000000000012701406223025027 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/test_discovery.py0000664000567000056710000001001112701406223026341 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from oslo_config import fixture as fixture_config from oslotest import mockpatch from ceilometer.compute import discovery import ceilometer.tests.base as base class TestDiscovery(base.BaseTestCase): def setUp(self): super(TestDiscovery, self).setUp() self.instance = mock.MagicMock() self.instance.name = 'instance-00000001' setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', self.instance.name) setattr(self.instance, 'OS-EXT-STS:vm_state', 'active') self.instance.id = 1 self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} self.instance.status = 'active' self.instance.metadata = { 'fqdn': 'vm_fqdn', 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', 'project_cos': 'dev'} # as we're having lazy hypervisor inspector singleton object in the # base compute pollster class, that leads to the fact that we # need to mock all this class property to avoid context sharing between # the tests self.client = mock.MagicMock() self.client.instance_get_all_by_host.return_value = [self.instance] patch_client = mockpatch.Patch('ceilometer.nova_client.Client', return_value=self.client) self.useFixture(patch_client) self.utc_now = mock.MagicMock( return_value=datetime.datetime(2016, 1, 1, tzinfo=iso8601.iso8601.UTC)) patch_timeutils = mockpatch.Patch('oslo_utils.timeutils.utcnow', self.utc_now) self.useFixture(patch_timeutils) self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override('host', 'test') def test_normal_discovery(self): dsc = discovery.InstanceDiscovery() resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_once_with( 'test', None) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_with( self.CONF.host, "2016-01-01T00:00:00+00:00") def test_discovery_with_resource_update_interval(self): self.CONF.set_override("resource_update_interval", 600, group="compute") dsc = discovery.InstanceDiscovery() dsc.last_run = datetime.datetime(2016, 1, 1, tzinfo=iso8601.iso8601.UTC) self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=5, tzinfo=iso8601.iso8601.UTC) resources = dsc.discover(mock.MagicMock()) self.assertEqual(0, len(resources)) self.client.instance_get_all_by_host.assert_not_called() self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=20, tzinfo=iso8601.iso8601.UTC) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_once_with( self.CONF.host, "2016-01-01T00:00:00+00:00") ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/0000775000567000056710000000000012701406364023722 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/__init__.py0000664000567000056710000000000012701406223026013 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/libvirt/0000775000567000056710000000000012701406364025375 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/libvirt/__init__.py0000664000567000056710000000000012701406223027466 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py0000664000567000056710000004320612701406223031013 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for libvirt inspector. """ try: import contextlib2 as contextlib # for Python < 3.3 except ImportError: import contextlib import fixtures import mock from oslo_utils import units from oslotest import base from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.libvirt import inspector as libvirt_inspector class TestLibvirtInspection(base.BaseTestCase): class fakeLibvirtError(Exception): pass def setUp(self): super(TestLibvirtInspection, self).setUp() class VMInstance(object): id = 'ff58e738-12f4-4c58-acde-77617b68da56' name = 'instance-00000001' self.instance = VMInstance self.inspector = libvirt_inspector.LibvirtInspector() self.inspector.connection = mock.Mock() libvirt_inspector.libvirt = mock.Mock() libvirt_inspector.libvirt.VIR_DOMAIN_SHUTOFF = 5 libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError self.domain = mock.Mock() self.addCleanup(mock.patch.stopall) def test_inspect_cpus(self): with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(self.inspector.connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) cpu_info = self.inspector.inspect_cpus(self.instance) self.assertEqual(2, cpu_info.number) self.assertEqual(999999, cpu_info.time) def test_inspect_cpus_with_domain_shutoff(self): connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999)): self.assertRaises(virt_inspector.InstanceShutOffException, self.inspector.inspect_cpus, self.instance) def test_inspect_vnics(self): dom_xml = """
""" interface_stats = { 'vnet0': (1, 2, 0, 0, 3, 4, 0, 0), 'vnet1': (5, 6, 0, 0, 7, 8, 0, 0), 'vnet2': (9, 10, 0, 0, 11, 12, 0, 0), } interfaceStats = interface_stats.__getitem__ connection = self.inspector.connection with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', return_value=dom_xml)) stack.enter_context(mock.patch.object(self.domain, 'interfaceStats', side_effect=interfaceStats)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) interfaces = list(self.inspector.inspect_vnics(self.instance)) self.assertEqual(3, len(interfaces)) vnic0, info0 = interfaces[0] self.assertEqual('vnet0', vnic0.name) self.assertEqual('fa:16:3e:71:ec:6d', vnic0.mac) self.assertEqual('nova-instance-00000001-fa163e71ec6d', vnic0.fref) self.assertEqual('255.255.255.0', vnic0.parameters.get('projmask')) self.assertEqual('10.0.0.2', vnic0.parameters.get('ip')) self.assertEqual('10.0.0.0', vnic0.parameters.get('projnet')) self.assertEqual('10.0.0.1', vnic0.parameters.get('dhcpserver')) self.assertEqual(1, info0.rx_bytes) self.assertEqual(2, info0.rx_packets) self.assertEqual(3, info0.tx_bytes) self.assertEqual(4, info0.tx_packets) vnic1, info1 = interfaces[1] self.assertEqual('vnet1', vnic1.name) self.assertEqual('fa:16:3e:71:ec:6e', vnic1.mac) self.assertEqual('nova-instance-00000001-fa163e71ec6e', vnic1.fref) self.assertEqual('255.255.255.0', vnic1.parameters.get('projmask')) self.assertEqual('192.168.0.2', vnic1.parameters.get('ip')) self.assertEqual('192.168.0.0', vnic1.parameters.get('projnet')) self.assertEqual('192.168.0.1', vnic1.parameters.get('dhcpserver')) self.assertEqual(5, info1.rx_bytes) self.assertEqual(6, info1.rx_packets) self.assertEqual(7, info1.tx_bytes) self.assertEqual(8, info1.tx_packets) vnic2, info2 = interfaces[2] self.assertEqual('vnet2', vnic2.name) self.assertEqual('fa:16:3e:96:33:f0', vnic2.mac) self.assertIsNone(vnic2.fref) self.assertEqual(dict(), vnic2.parameters) self.assertEqual(9, info2.rx_bytes) self.assertEqual(10, info2.rx_packets) self.assertEqual(11, info2.tx_bytes) self.assertEqual(12, info2.tx_packets) def test_inspect_vnics_with_domain_shutoff(self): connection = self.inspector.connection with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999))) inspect = self.inspector.inspect_vnics self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance)) def test_inspect_disks(self): dom_xml = """
""" with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(self.inspector.connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', return_value=dom_xml)) stack.enter_context(mock.patch.object(self.domain, 'blockStats', return_value=(1, 2, 3, 4, -1))) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) disks = list(self.inspector.inspect_disks(self.instance)) self.assertEqual(1, len(disks)) disk0, info0 = disks[0] self.assertEqual('vda', disk0.device) self.assertEqual(1, info0.read_requests) self.assertEqual(2, info0.read_bytes) self.assertEqual(3, info0.write_requests) self.assertEqual(4, info0.write_bytes) def test_inspect_disks_with_domain_shutoff(self): connection = self.inspector.connection with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999))) inspect = self.inspector.inspect_disks self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance)) def test_inspect_memory_usage(self): fake_memory_stats = {'available': 51200, 'unused': 25600} connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(0, 0, 51200, 2, 999999)): with mock.patch.object(self.domain, 'memoryStats', return_value=fake_memory_stats): memory = self.inspector.inspect_memory_usage( self.instance) self.assertEqual(25600 / units.Ki, memory.usage) def test_inspect_disk_info(self): dom_xml = """
""" with contextlib.ExitStack() as stack: stack.enter_context(mock.patch.object(self.inspector.connection, 'lookupByUUIDString', return_value=self.domain)) stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', return_value=dom_xml)) stack.enter_context(mock.patch.object(self.domain, 'blockInfo', return_value=(1, 2, 3, -1))) stack.enter_context(mock.patch.object(self.domain, 'info', return_value=(0, 0, 0, 2, 999999))) disks = list(self.inspector.inspect_disk_info(self.instance)) self.assertEqual(1, len(disks)) disk0, info0 = disks[0] self.assertEqual('vda', disk0.device) self.assertEqual(1, info0.capacity) self.assertEqual(2, info0.allocation) self.assertEqual(3, info0.physical) def test_inspect_memory_usage_with_domain_shutoff(self): connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(5, 0, 0, 2, 999999)): self.assertRaises(virt_inspector.InstanceShutOffException, self.inspector.inspect_memory_usage, self.instance) def test_inspect_memory_usage_with_empty_stats(self): connection = self.inspector.connection with mock.patch.object(connection, 'lookupByUUIDString', return_value=self.domain): with mock.patch.object(self.domain, 'info', return_value=(0, 0, 51200, 2, 999999)): with mock.patch.object(self.domain, 'memoryStats', return_value={}): self.assertRaises(virt_inspector.NoDataException, self.inspector.inspect_memory_usage, self.instance) class TestLibvirtInspectionWithError(base.BaseTestCase): class fakeLibvirtError(Exception): pass def setUp(self): super(TestLibvirtInspectionWithError, self).setUp() self.inspector = libvirt_inspector.LibvirtInspector() self.useFixture(fixtures.MonkeyPatch( 'ceilometer.compute.virt.libvirt.inspector.' 'LibvirtInspector._get_connection', self._dummy_get_connection)) libvirt_inspector.libvirt = mock.Mock() libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError @staticmethod def _dummy_get_connection(*args, **kwargs): raise Exception('dummy') def test_inspect_unknown_error(self): self.assertRaises(virt_inspector.InspectorException, self.inspector.inspect_cpus, 'foo') class TestLibvirtInitWithError(base.BaseTestCase): def setUp(self): super(TestLibvirtInitWithError, self).setUp() self.inspector = libvirt_inspector.LibvirtInspector() libvirt_inspector.libvirt = mock.Mock() @mock.patch('ceilometer.compute.virt.libvirt.inspector.' 'LibvirtInspector._get_connection', mock.Mock(return_value=None)) def test_init_error(self): self.assertRaises(virt_inspector.NoSanityException, self.inspector.check_sanity) @mock.patch('ceilometer.compute.virt.libvirt.inspector.' 'LibvirtInspector._get_connection', mock.Mock(side_effect=virt_inspector.NoDataException)) def test_init_exception(self): self.assertRaises(virt_inspector.NoDataException, self.inspector.check_sanity) ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/vmware/0000775000567000056710000000000012701406364025223 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/vmware/__init__.py0000664000567000056710000000000012701406223027314 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py0000664000567000056710000001535712701406223032560 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_vmware import api from oslotest import base from ceilometer.compute.virt.vmware import vsphere_operations class VsphereOperationsTest(base.BaseTestCase): def setUp(self): api_session = api.VMwareAPISession("test_server", "test_user", "test_password", 0, None, create_session=False) api_session._vim = mock.MagicMock() self._vsphere_ops = vsphere_operations.VsphereOperations(api_session, 1000) super(VsphereOperationsTest, self).setUp() def test_get_vm_moid(self): vm1_moid = "vm-1" vm2_moid = "vm-2" vm1_instance = "0a651a71-142c-4813-aaa6-42e5d5c80d85" vm2_instance = "db1d2533-6bef-4cb2-aef3-920e109f5693" def construct_mock_vm_object(vm_moid, vm_instance): vm_object = mock.MagicMock() vm_object.obj.value = vm_moid vm_object.propSet[0].val = vm_instance return vm_object def retrieve_props_side_effect(pc, specSet, options): # assert inputs self.assertEqual(self._vsphere_ops._max_objects, options.maxObjects) self.assertEqual(vsphere_operations.VM_INSTANCE_ID_PROPERTY, specSet[0].pathSet[0]) # mock return result vm1 = construct_mock_vm_object(vm1_moid, vm1_instance) vm2 = construct_mock_vm_object(vm2_moid, vm2_instance) result = mock.MagicMock() result.objects.__iter__.return_value = [vm1, vm2] return result vim_mock = self._vsphere_ops._api_session._vim vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect vim_mock.ContinueRetrievePropertiesEx.return_value = None vm_moid = self._vsphere_ops.get_vm_moid(vm1_instance) self.assertEqual(vm1_moid, vm_moid) vm_moid = self._vsphere_ops.get_vm_moid(vm2_instance) self.assertEqual(vm2_moid, vm_moid) def test_query_vm_property(self): vm_moid = "vm-21" vm_property_name = "runtime.powerState" vm_property_val = "poweredON" def retrieve_props_side_effect(pc, specSet, options): # assert inputs self.assertEqual(vm_moid, specSet[0].obj.value) self.assertEqual(vm_property_name, specSet[0].pathSet[0]) # mock return result result = mock.MagicMock() result.objects[0].propSet[0].val = vm_property_val return result vim_mock = self._vsphere_ops._api_session._vim vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect actual_val = self._vsphere_ops.query_vm_property(vm_moid, vm_property_name) self.assertEqual(vm_property_val, actual_val) def test_get_perf_counter_id(self): def construct_mock_counter_info(group_name, counter_name, rollup_type, counter_id): counter_info = mock.MagicMock() counter_info.groupInfo.key = group_name counter_info.nameInfo.key = counter_name counter_info.rollupType = rollup_type counter_info.key = counter_id return counter_info def retrieve_props_side_effect(pc, specSet, options): # assert inputs self.assertEqual(vsphere_operations.PERF_COUNTER_PROPERTY, specSet[0].pathSet[0]) # mock return result counter_info1 = construct_mock_counter_info("a", "b", "c", 1) counter_info2 = construct_mock_counter_info("x", "y", "z", 2) result = mock.MagicMock() (result.objects[0].propSet[0].val.PerfCounterInfo.__iter__. return_value) = [counter_info1, counter_info2] return result vim_mock = self._vsphere_ops._api_session._vim vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect counter_id = self._vsphere_ops.get_perf_counter_id("a:b:c") self.assertEqual(1, counter_id) counter_id = self._vsphere_ops.get_perf_counter_id("x:y:z") self.assertEqual(2, counter_id) def test_query_vm_stats(self): vm_moid = "vm-21" device1 = "device-1" device2 = "device-2" device3 = "device-3" counter_id = 5 def construct_mock_metric_series(device_name, stat_values): metric_series = mock.MagicMock() metric_series.value = stat_values metric_series.id.instance = device_name return metric_series def vim_query_perf_side_effect(perf_manager, querySpec): # assert inputs self.assertEqual(vm_moid, querySpec[0].entity.value) self.assertEqual(counter_id, querySpec[0].metricId[0].counterId) self.assertEqual(vsphere_operations.VC_REAL_TIME_SAMPLING_INTERVAL, querySpec[0].intervalId) # mock return result perf_stats = mock.MagicMock() perf_stats[0].sampleInfo = ["s1", "s2", "s3"] perf_stats[0].value.__iter__.return_value = [ construct_mock_metric_series(None, [111, 222, 333]), construct_mock_metric_series(device1, [100, 200, 300]), construct_mock_metric_series(device2, [10, 20, 30]), construct_mock_metric_series(device3, [1, 2, 3]) ] return perf_stats vim_mock = self._vsphere_ops._api_session._vim vim_mock.QueryPerf.side_effect = vim_query_perf_side_effect ops = self._vsphere_ops # test aggregate stat stat_val = ops.query_vm_aggregate_stats(vm_moid, counter_id, 60) self.assertEqual(222, stat_val) # test per-device(non-aggregate) stats expected_device_stats = { device1: 200, device2: 20, device3: 2 } stats = ops.query_vm_device_stats(vm_moid, counter_id, 60) self.assertEqual(expected_device_stats, stats) ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py0000664000567000056710000001465712701406223030651 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for VMware vSphere inspector. """ import mock from oslo_vmware import api from oslotest import base from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.vmware import inspector as vsphere_inspector class TestVsphereInspection(base.BaseTestCase): def setUp(self): api_session = api.VMwareAPISession("test_server", "test_user", "test_password", 0, None, create_session=False, port=7443) vsphere_inspector.get_api_session = mock.Mock( return_value=api_session) self._inspector = vsphere_inspector.VsphereInspector() self._inspector._ops = mock.MagicMock() super(TestVsphereInspection, self).setUp() def test_inspect_memory_usage(self): fake_instance_moid = 'fake_instance_moid' fake_instance_id = 'fake_instance_id' fake_perf_counter_id = 'fake_perf_counter_id' fake_memory_value = 1024.0 fake_stat = virt_inspector.MemoryUsageStats(usage=1.0) def construct_mock_instance_object(fake_instance_id): instance_object = mock.MagicMock() instance_object.id = fake_instance_id return instance_object fake_instance = construct_mock_instance_object(fake_instance_id) self._inspector._ops.get_vm_moid.return_value = fake_instance_moid (self._inspector._ops. get_perf_counter_id.return_value) = fake_perf_counter_id (self._inspector._ops.query_vm_aggregate_stats. return_value) = fake_memory_value memory_stat = self._inspector.inspect_memory_usage(fake_instance) self.assertEqual(fake_stat, memory_stat) def test_inspect_cpu_util(self): fake_instance_moid = 'fake_instance_moid' fake_instance_id = 'fake_instance_id' fake_perf_counter_id = 'fake_perf_counter_id' fake_cpu_util_value = 60 fake_stat = virt_inspector.CPUUtilStats(util=60) def construct_mock_instance_object(fake_instance_id): instance_object = mock.MagicMock() instance_object.id = fake_instance_id return instance_object fake_instance = construct_mock_instance_object(fake_instance_id) self._inspector._ops.get_vm_moid.return_value = fake_instance_moid (self._inspector._ops.get_perf_counter_id. return_value) = fake_perf_counter_id (self._inspector._ops.query_vm_aggregate_stats. return_value) = fake_cpu_util_value * 100 cpu_util_stat = self._inspector.inspect_cpu_util(fake_instance) self.assertEqual(fake_stat, cpu_util_stat) def test_inspect_vnic_rates(self): # construct test data test_vm_moid = "vm-21" vnic1 = "vnic-1" vnic2 = "vnic-2" counter_name_to_id_map = { vsphere_inspector.VC_NETWORK_RX_COUNTER: 1, vsphere_inspector.VC_NETWORK_TX_COUNTER: 2 } counter_id_to_stats_map = { 1: {vnic1: 1, vnic2: 3}, 2: {vnic1: 2, vnic2: 4}, } def get_counter_id_side_effect(counter_full_name): return counter_name_to_id_map[counter_full_name] def query_stat_side_effect(vm_moid, counter_id, duration): # assert inputs self.assertEqual(test_vm_moid, vm_moid) self.assertIn(counter_id, counter_id_to_stats_map) return counter_id_to_stats_map[counter_id] # configure vsphere operations mock with the test data ops_mock = self._inspector._ops ops_mock.get_vm_moid.return_value = test_vm_moid ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect result = self._inspector.inspect_vnic_rates(mock.MagicMock()) # validate result expected_stats = { vnic1: virt_inspector.InterfaceRateStats(1024, 2048), vnic2: virt_inspector.InterfaceRateStats(3072, 4096) } for vnic, rates_info in result: self.assertEqual(expected_stats[vnic.name], rates_info) def test_inspect_disk_rates(self): # construct test data test_vm_moid = "vm-21" disk1 = "disk-1" disk2 = "disk-2" counter_name_to_id_map = { vsphere_inspector.VC_DISK_READ_RATE_CNTR: 1, vsphere_inspector.VC_DISK_READ_REQUESTS_RATE_CNTR: 2, vsphere_inspector.VC_DISK_WRITE_RATE_CNTR: 3, vsphere_inspector.VC_DISK_WRITE_REQUESTS_RATE_CNTR: 4 } counter_id_to_stats_map = { 1: {disk1: 1, disk2: 2}, 2: {disk1: 300, disk2: 400}, 3: {disk1: 5, disk2: 6}, 4: {disk1: 700}, } def get_counter_id_side_effect(counter_full_name): return counter_name_to_id_map[counter_full_name] def query_stat_side_effect(vm_moid, counter_id, duration): # assert inputs self.assertEqual(test_vm_moid, vm_moid) self.assertIn(counter_id, counter_id_to_stats_map) return counter_id_to_stats_map[counter_id] # configure vsphere operations mock with the test data ops_mock = self._inspector._ops ops_mock.get_vm_moid.return_value = test_vm_moid ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect result = self._inspector.inspect_disk_rates(mock.MagicMock()) # validate result expected_stats = { disk1: virt_inspector.DiskRateStats(1024, 300, 5120, 700), disk2: virt_inspector.DiskRateStats(2048, 400, 6144, 0) } actual_stats = dict((disk.device, rates) for (disk, rates) in result) self.assertEqual(expected_stats, actual_stats) ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/xenapi/0000775000567000056710000000000012701406364025206 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/xenapi/__init__.py0000664000567000056710000000000012701406223027277 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py0000664000567000056710000001522012701406223030617 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for xenapi inspector. """ import mock from oslotest import base from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.xenapi import inspector as xenapi_inspector class TestSwapXapiHost(base.BaseTestCase): def test_swapping(self): self.assertEqual( "http://otherserver:8765/somepath", xenapi_inspector.swap_xapi_host( "http://someserver:8765/somepath", 'otherserver')) def test_no_port(self): self.assertEqual( "http://otherserver/somepath", xenapi_inspector.swap_xapi_host( "http://someserver/somepath", 'otherserver')) def test_no_path(self): self.assertEqual( "http://otherserver", xenapi_inspector.swap_xapi_host( "http://someserver", 'otherserver')) def test_same_hostname_path(self): self.assertEqual( "http://other:80/some", xenapi_inspector.swap_xapi_host( "http://some:80/some", 'other')) class TestXenapiInspection(base.BaseTestCase): def setUp(self): api_session = mock.Mock() xenapi_inspector.get_api_session = mock.Mock(return_value=api_session) self.inspector = xenapi_inspector.XenapiInspector() super(TestXenapiInspection, self).setUp() def test_inspect_cpu_util(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} fake_stat = virt_inspector.CPUUtilStats(util=40) def fake_xenapi_request(method, args): metrics_rec = { 'memory_actual': '536870912', 'VCPUs_number': '1', 'VCPUs_utilisation': {'0': 0.4, } } if method == 'VM.get_by_name_label': return ['vm_ref'] elif method == 'VM.get_metrics': return 'metrics_ref' elif method == 'VM_metrics.get_record': return metrics_rec else: return None session = self.inspector.session with mock.patch.object(session, 'xenapi_request', side_effect=fake_xenapi_request): cpu_util_stat = self.inspector.inspect_cpu_util(fake_instance) self.assertEqual(fake_stat, cpu_util_stat) def test_inspect_memory_usage(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} fake_stat = virt_inspector.MemoryUsageStats(usage=128) def fake_xenapi_request(method, args): metrics_rec = { 'memory_actual': '134217728', } if method == 'VM.get_by_name_label': return ['vm_ref'] elif method == 'VM.get_metrics': return 'metrics_ref' elif method == 'VM_metrics.get_record': return metrics_rec else: return None session = self.inspector.session with mock.patch.object(session, 'xenapi_request', side_effect=fake_xenapi_request): memory_stat = self.inspector.inspect_memory_usage(fake_instance) self.assertEqual(fake_stat, memory_stat) def test_inspect_vnic_rates(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} def fake_xenapi_request(method, args): vif_rec = { 'metrics': 'vif_metrics_ref', 'uuid': 'vif_uuid', 'MAC': 'vif_mac', } vif_metrics_rec = { 'io_read_kbs': '1', 'io_write_kbs': '2', } if method == 'VM.get_by_name_label': return ['vm_ref'] elif method == 'VM.get_VIFs': return ['vif_ref'] elif method == 'VIF.get_record': return vif_rec elif method == 'VIF.get_metrics': return 'vif_metrics_ref' elif method == 'VIF_metrics.get_record': return vif_metrics_rec else: return None session = self.inspector.session with mock.patch.object(session, 'xenapi_request', side_effect=fake_xenapi_request): interfaces = list(self.inspector.inspect_vnic_rates(fake_instance)) self.assertEqual(1, len(interfaces)) vnic0, info0 = interfaces[0] self.assertEqual('vif_uuid', vnic0.name) self.assertEqual('vif_mac', vnic0.mac) self.assertEqual(1024, info0.rx_bytes_rate) self.assertEqual(2048, info0.tx_bytes_rate) def test_inspect_disk_rates(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} def fake_xenapi_request(method, args): vbd_rec = { 'device': 'xvdd' } vbd_metrics_rec = { 'io_read_kbs': '1', 'io_write_kbs': '2' } if method == 'VM.get_by_name_label': return ['vm_ref'] elif method == 'VM.get_VBDs': return ['vbd_ref'] elif method == 'VBD.get_record': return vbd_rec elif method == 'VBD.get_metrics': return 'vbd_metrics_ref' elif method == 'VBD_metrics.get_record': return vbd_metrics_rec else: return None session = self.inspector.session with mock.patch.object(session, 'xenapi_request', side_effect=fake_xenapi_request): disks = list(self.inspector.inspect_disk_rates(fake_instance)) self.assertEqual(1, len(disks)) disk0, info0 = disks[0] self.assertEqual('xvdd', disk0.device) self.assertEqual(1024, info0.read_bytes_rate) self.assertEqual(2048, info0.write_bytes_rate) ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/hyperv/0000775000567000056710000000000012701406364025237 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/hyperv/__init__.py0000664000567000056710000000000012701406223027330 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py0000664000567000056710000001735512701406223030663 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Hyper-V inspector. """ import sys import mock from os_win import exceptions as os_win_exc from oslo_utils import units from oslotest import base from ceilometer.compute.virt.hyperv import inspector as hyperv_inspector from ceilometer.compute.virt import inspector as virt_inspector class TestHyperVInspection(base.BaseTestCase): @mock.patch.object(hyperv_inspector, 'utilsfactory', mock.MagicMock()) @mock.patch.object(hyperv_inspector.HyperVInspector, '_compute_host_max_cpu_clock') def setUp(self, mock_compute_host_cpu_clock): self._inspector = hyperv_inspector.HyperVInspector() self._inspector._utils = mock.MagicMock() super(TestHyperVInspection, self).setUp() def test_converted_exception(self): self._inspector._utils.get_cpu_metrics.side_effect = ( os_win_exc.OSWinException) self.assertRaises(virt_inspector.InspectorException, self._inspector.inspect_cpus, mock.sentinel.instance) self._inspector._utils.get_cpu_metrics.side_effect = ( os_win_exc.HyperVException) self.assertRaises(virt_inspector.InspectorException, self._inspector.inspect_cpus, mock.sentinel.instance) self._inspector._utils.get_cpu_metrics.side_effect = ( os_win_exc.NotFound(resource='foofoo')) self.assertRaises(virt_inspector.InstanceNotFoundException, self._inspector.inspect_cpus, mock.sentinel.instance) def test_assert_original_traceback_maintained(self): def bar(self): foo = "foofoo" raise os_win_exc.NotFound(resource=foo) self._inspector._utils.get_cpu_metrics.side_effect = bar try: self._inspector.inspect_cpus(mock.sentinel.instance) self.fail("Test expected exception, but it was not raised.") except virt_inspector.InstanceNotFoundException: # exception has been raised as expected. _, _, trace = sys.exc_info() while trace.tb_next: # iterate until the original exception source, bar. trace = trace.tb_next # original frame will contain the 'foo' variable. self.assertEqual('foofoo', trace.tb_frame.f_locals['foo']) @mock.patch.object(hyperv_inspector, 'utilsfactory') def test_compute_host_max_cpu_clock(self, mock_utilsfactory): mock_cpu = {'MaxClockSpeed': 1000} hostutils = mock_utilsfactory.get_hostutils.return_value.get_cpus_info hostutils.return_value = [mock_cpu, mock_cpu] cpu_clock = self._inspector._compute_host_max_cpu_clock() self.assertEqual(2000.0, cpu_clock) def test_inspect_cpus(self): fake_instance_name = 'fake_instance_name' fake_cpu_clock_used = 2000 fake_cpu_count = 3000 fake_uptime = 4000 self._inspector._host_max_cpu_clock = 4000.0 fake_cpu_percent_used = (fake_cpu_clock_used / self._inspector._host_max_cpu_clock) fake_cpu_time = (int(fake_uptime * fake_cpu_percent_used) * 1000) self._inspector._utils.get_cpu_metrics.return_value = ( fake_cpu_clock_used, fake_cpu_count, fake_uptime) cpu_stats = self._inspector.inspect_cpus(fake_instance_name) self.assertEqual(fake_cpu_count, cpu_stats.number) self.assertEqual(fake_cpu_time, cpu_stats.time) def test_inspect_memory_usage(self): fake_usage = self._inspector._utils.get_memory_metrics.return_value usage = self._inspector.inspect_memory_usage( mock.sentinel.FAKE_INSTANCE, mock.sentinel.FAKE_DURATION) self.assertEqual(fake_usage, usage.usage) def test_inspect_vnics(self): fake_instance_name = 'fake_instance_name' fake_rx_mb = 1000 fake_tx_mb = 2000 fake_element_name = 'fake_element_name' fake_address = 'fake_address' self._inspector._utils.get_vnic_metrics.return_value = [{ 'rx_mb': fake_rx_mb, 'tx_mb': fake_tx_mb, 'element_name': fake_element_name, 'address': fake_address}] inspected_vnics = list(self._inspector.inspect_vnics( fake_instance_name)) self.assertEqual(1, len(inspected_vnics)) self.assertEqual(2, len(inspected_vnics[0])) inspected_vnic, inspected_stats = inspected_vnics[0] self.assertEqual(fake_element_name, inspected_vnic.name) self.assertEqual(fake_address, inspected_vnic.mac) self.assertEqual(fake_rx_mb * units.Mi, inspected_stats.rx_bytes) self.assertEqual(fake_tx_mb * units.Mi, inspected_stats.tx_bytes) def test_inspect_disks(self): fake_instance_name = 'fake_instance_name' fake_read_mb = 1000 fake_write_mb = 2000 fake_instance_id = "fake_fake_instance_id" fake_host_resource = "fake_host_resource" self._inspector._utils.get_disk_metrics.return_value = [{ 'read_mb': fake_read_mb, 'write_mb': fake_write_mb, 'instance_id': fake_instance_id, 'host_resource': fake_host_resource}] inspected_disks = list(self._inspector.inspect_disks( fake_instance_name)) self.assertEqual(1, len(inspected_disks)) self.assertEqual(2, len(inspected_disks[0])) inspected_disk, inspected_stats = inspected_disks[0] self.assertEqual(fake_instance_id, inspected_disk.device) self.assertEqual(fake_read_mb * units.Mi, inspected_stats.read_bytes) self.assertEqual(fake_write_mb * units.Mi, inspected_stats.write_bytes) def test_inspect_disk_latency(self): fake_instance_name = mock.sentinel.INSTANCE_NAME fake_disk_latency = mock.sentinel.DISK_LATENCY fake_instance_id = mock.sentinel.INSTANCE_ID self._inspector._utils.get_disk_latency_metrics.return_value = [{ 'disk_latency': fake_disk_latency, 'instance_id': fake_instance_id}] inspected_disks = list(self._inspector.inspect_disk_latency( fake_instance_name)) self.assertEqual(1, len(inspected_disks)) self.assertEqual(2, len(inspected_disks[0])) inspected_disk, inspected_stats = inspected_disks[0] self.assertEqual(fake_instance_id, inspected_disk.device) self.assertEqual(fake_disk_latency, inspected_stats.disk_latency) def test_inspect_disk_iops_count(self): fake_instance_name = mock.sentinel.INSTANCE_NAME fake_disk_iops_count = mock.sentinel.DISK_IOPS_COUNT fake_instance_id = mock.sentinel.INSTANCE_ID self._inspector._utils.get_disk_iops_count.return_value = [{ 'iops_count': fake_disk_iops_count, 'instance_id': fake_instance_id}] inspected_disks = list(self._inspector.inspect_disk_iops( fake_instance_name)) self.assertEqual(1, len(inspected_disks)) self.assertEqual(2, len(inspected_disks[0])) inspected_disk, inspected_stats = inspected_disks[0] self.assertEqual(fake_instance_id, inspected_disk.device) self.assertEqual(fake_disk_iops_count, inspected_stats.iops_count) ceilometer-6.0.0/ceilometer/tests/unit/test_messaging.py0000664000567000056710000000473012701406223024646 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as fixture_config import oslo_messaging.conffixture from oslotest import base from ceilometer import messaging class MessagingTests(base.BaseTestCase): def setUp(self): super(MessagingTests, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF)) def test_get_transport_invalid_url(self): self.assertRaises(oslo_messaging.InvalidTransportURL, messaging.get_transport, "notvalid!") def test_get_transport_url_caching(self): t1 = messaging.get_transport('fake://') t2 = messaging.get_transport('fake://') self.assertEqual(t1, t2) def test_get_transport_default_url_caching(self): t1 = messaging.get_transport() t2 = messaging.get_transport() self.assertEqual(t1, t2) def test_get_transport_default_url_no_caching(self): t1 = messaging.get_transport(cache=False) t2 = messaging.get_transport(cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_no_caching(self): t1 = messaging.get_transport('fake://', cache=False) t2 = messaging.get_transport('fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_default_url_caching_mix(self): t1 = messaging.get_transport() t2 = messaging.get_transport(cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_caching_mix(self): t1 = messaging.get_transport('fake://') t2 = messaging.get_transport('fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_optional(self): self.CONF.set_override('rpc_backend', '') self.assertIsNone(messaging.get_transport(optional=True, cache=False)) ceilometer-6.0.0/ceilometer/tests/unit/objectstore/0000775000567000056710000000000012701406364023605 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/objectstore/test_swift.py0000664000567000056710000002253212701406223026350 0ustar jenkinsjenkins00000000000000# Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from keystoneauth1 import exceptions import mock from oslotest import base from oslotest import mockpatch from swiftclient import client as swift_client import testscenarios.testcase from ceilometer.agent import manager from ceilometer.objectstore import swift HEAD_ACCOUNTS = [('tenant-000', {'x-account-object-count': 12, 'x-account-bytes-used': 321321321, 'x-account-container-count': 7, }), ('tenant-001', {'x-account-object-count': 34, 'x-account-bytes-used': 9898989898, 'x-account-container-count': 17, }), ('tenant-002-ignored', {'x-account-object-count': 34, 'x-account-bytes-used': 9898989898, 'x-account-container-count': 17, })] GET_ACCOUNTS = [('tenant-000', ({'x-account-object-count': 10, 'x-account-bytes-used': 123123, 'x-account-container-count': 2, }, [{'count': 10, 'bytes': 123123, 'name': 'my_container'}, {'count': 0, 'bytes': 0, 'name': 'new_container' }])), ('tenant-001', ({'x-account-object-count': 0, 'x-account-bytes-used': 0, 'x-account-container-count': 0, }, [])), ('tenant-002-ignored', ({'x-account-object-count': 0, 'x-account-bytes-used': 0, 'x-account-container-count': 0, }, []))] Tenant = collections.namedtuple('Tenant', 'id') ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] class TestManager(manager.AgentManager): def __init__(self): super(TestManager, self).__init__() self._keystone = mock.MagicMock() self._keystone_last_exception = None self._service_catalog = (self._keystone.session.auth. get_access.return_value.service_catalog) self._auth_token = (self._keystone.session.auth. get_access.return_value.auth_token) class TestSwiftPollster(testscenarios.testcase.WithScenarios, base.BaseTestCase): # Define scenarios to run all of the tests against all of the # pollsters. scenarios = [ ('storage.objects', {'factory': swift.ObjectsPollster}), ('storage.objects.size', {'factory': swift.ObjectsSizePollster}), ('storage.objects.containers', {'factory': swift.ObjectsContainersPollster}), ('storage.containers.objects', {'factory': swift.ContainersObjectsPollster}), ('storage.containers.objects.size', {'factory': swift.ContainersSizePollster}), ] @staticmethod def fake_ks_service_catalog_url_for(*args, **kwargs): raise exceptions.EndpointNotFound("Fake keystone exception") def fake_iter_accounts(self, ksclient, cache, tenants): tenant_ids = [t.id for t in tenants] for i in self.ACCOUNTS: if i[0] in tenant_ids: yield i @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(TestSwiftPollster, self).setUp() self.pollster = self.factory() self.manager = TestManager() if self.pollster.CACHE_KEY_METHOD == 'swift.head_account': self.ACCOUNTS = HEAD_ACCOUNTS else: self.ACCOUNTS = GET_ACCOUNTS def tearDown(self): super(TestSwiftPollster, self).tearDown() swift._Base._ENDPOINT = None def test_iter_accounts_no_cache(self): cache = {} with mockpatch.PatchObject(self.factory, '_get_account_info', return_value=[]): data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) self.assertEqual([], data) def test_iter_accounts_cached(self): # Verify that if a method has already been called, _iter_accounts # uses the cached version and doesn't call swiftclient. mock_method = mock.Mock() mock_method.side_effect = AssertionError( 'should not be called', ) api_method = '%s_account' % self.pollster.METHOD with mockpatch.PatchObject(swift_client, api_method, new=mock_method): with mockpatch.PatchObject(self.factory, '_neaten_url'): cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertEqual([self.ACCOUNTS[0]], data) def test_neaten_url(self): test_endpoints = ['http://127.0.0.1:8080', 'http://127.0.0.1:8080/swift'] test_tenant_id = 'a7fd1695fa154486a647e44aa99a1b9b' for test_endpoint in test_endpoints: standard_url = test_endpoint + '/v1/AUTH_' + test_tenant_id url = swift._Base._neaten_url(test_endpoint, test_tenant_id) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(test_endpoint + '/', test_tenant_id) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(test_endpoint + '/v1', test_tenant_id) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(standard_url, test_tenant_id) self.assertEqual(standard_url, url) def test_metering(self): with mockpatch.PatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(2, len(samples), self.pollster.__class__) def test_get_meter_names(self): with mockpatch.PatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def test_only_poll_assigned(self): mock_method = mock.MagicMock() endpoint = 'end://point/' api_method = '%s_account' % self.pollster.METHOD with mockpatch.PatchObject(swift_client, api_method, new=mock_method): with mockpatch.PatchObject( self.manager._service_catalog, 'url_for', return_value=endpoint): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) expected = [mock.call(self.pollster._neaten_url(endpoint, t.id), self.manager._auth_token) for t in ASSIGNED_TENANTS] self.assertEqual(expected, mock_method.call_args_list) def test_get_endpoint_only_once(self): endpoint = 'end://point/' mock_url_for = mock.MagicMock(return_value=endpoint) api_method = '%s_account' % self.pollster.METHOD with mockpatch.PatchObject(swift_client, api_method, new=mock.MagicMock()): with mockpatch.PatchObject( self.manager._service_catalog, 'url_for', new=mock_url_for): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(1, mock_url_for.call_count) def test_endpoint_notfound(self): with mockpatch.PatchObject( self.manager._service_catalog, 'url_for', side_effect=self.fake_ks_service_catalog_url_for): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(0, len(samples)) ceilometer-6.0.0/ceilometer/tests/unit/objectstore/test_rgw_client.py0000664000567000056710000001407312701406223027352 0ustar jenkinsjenkins00000000000000# Copyright (C) 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import mock from oslotest import base from ceilometer.objectstore.rgw_client import RGWAdminAPIFailed from ceilometer.objectstore.rgw_client import RGWAdminClient RGW_ADMIN_BUCKETS = ''' [ { "max_marker": "", "ver": 2001, "usage": { "rgw.main": { "size_kb_actual": 16000, "num_objects": 1000, "size_kb": 1000 } }, "bucket": "somefoo", "owner": "admin", "master_ver": 0, "mtime": 1420176126, "marker": "default.4126.1", "bucket_quota": { "max_objects": -1, "enabled": false, "max_size_kb": -1 }, "id": "default.4126.1", "pool": ".rgw.buckets", "index_pool": ".rgw.buckets.index" }, { "max_marker": "", "ver": 3, "usage": { "rgw.main": { "size_kb_actual": 43, "num_objects": 1, "size_kb": 42 } }, "bucket": "somefoo31", "owner": "admin", "master_ver": 0, "mtime": 1420176134, "marker": "default.4126.5", "bucket_quota": { "max_objects": -1, "enabled": false, "max_size_kb": -1 }, "id": "default.4126.5", "pool": ".rgw.buckets", "index_pool": ".rgw.buckets.index" } ]''' RGW_ADMIN_USAGE = ''' { "entries": [ { "owner": "5f7fe2d5352e466f948f49341e33d107", "buckets": [ { "bucket": "", "time": "2015-01-23 09:00:00.000000Z", "epoch": 1422003600, "categories": [ { "category": "list_buckets", "bytes_sent": 46, "bytes_received": 0, "ops": 3, "successful_ops": 3}, { "category": "stat_account", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}]}, { "bucket": "foodsgh", "time": "2015-01-23 09:00:00.000000Z", "epoch": 1422003600, "categories": [ { "category": "create_bucket", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}, { "category": "get_obj", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 0}, { "category": "put_obj", "bytes_sent": 0, "bytes_received": 238, "ops": 1, "successful_ops": 1}]}]}], "summary": [ { "user": "5f7fe2d5352e466f948f49341e33d107", "categories": [ { "category": "create_bucket", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}, { "category": "get_obj", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 0}, { "category": "list_buckets", "bytes_sent": 46, "bytes_received": 0, "ops": 3, "successful_ops": 3}, { "category": "put_obj", "bytes_sent": 0, "bytes_received": 238, "ops": 1, "successful_ops": 1}, { "category": "stat_account", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}], "total": { "bytes_sent": 46, "bytes_received": 238, "ops": 7, "successful_ops": 6}}]} ''' buckets_json = json.loads(RGW_ADMIN_BUCKETS) usage_json = json.loads(RGW_ADMIN_USAGE) class TestRGWAdminClient(base.BaseTestCase): def setUp(self): super(TestRGWAdminClient, self).setUp() self.client = RGWAdminClient('http://127.0.0.1:8080/admin', 'abcde', 'secret') self.get_resp = mock.MagicMock() self.get = mock.patch('requests.get', return_value=self.get_resp).start() def test_make_request_exception(self): self.get_resp.status_code = 403 self.assertRaises(RGWAdminAPIFailed, self.client._make_request, *('foo', {})) def test_make_request(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = buckets_json actual = self.client._make_request('foo', []) self.assertEqual(buckets_json, actual) def test_get_buckets(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = buckets_json actual = self.client.get_bucket('foo') bucket_list = [RGWAdminClient.Bucket('somefoo', 1000, 1000), RGWAdminClient.Bucket('somefoo31', 1, 42), ] expected = {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list} self.assertEqual(expected, actual) def test_get_usage(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = usage_json actual = self.client.get_usage('foo') expected = 7 self.assertEqual(expected, actual) ceilometer-6.0.0/ceilometer/tests/unit/objectstore/test_rgw.py0000664000567000056710000001634312701406223026016 0ustar jenkinsjenkins00000000000000# Copyright 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from keystoneauth1 import exceptions import mock from oslotest import base from oslotest import mockpatch import testscenarios.testcase from ceilometer.agent import manager from ceilometer.objectstore import rgw from ceilometer.objectstore.rgw_client import RGWAdminClient as rgw_client bucket_list1 = [rgw_client.Bucket('somefoo1', 10, 7)] bucket_list2 = [rgw_client.Bucket('somefoo2', 2, 9)] bucket_list3 = [rgw_client.Bucket('unlisted', 100, 100)] GET_BUCKETS = [('tenant-000', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list1}), ('tenant-001', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list2}), ('tenant-002-ignored', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list3})] GET_USAGE = [('tenant-000', 10), ('tenant-001', 11), ('tenant-002-ignored', 12)] Tenant = collections.namedtuple('Tenant', 'id') ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] class TestManager(manager.AgentManager): def __init__(self): super(TestManager, self).__init__() self._keystone = mock.Mock() self._catalog = (self._keystone.session.auth.get_access. return_value.service_catalog) self._catalog.url_for.return_value = 'http://foobar/endpoint' class TestRgwPollster(testscenarios.testcase.WithScenarios, base.BaseTestCase): # Define scenarios to run all of the tests against all of the # pollsters. scenarios = [ ('radosgw.objects', {'factory': rgw.ObjectsPollster}), ('radosgw.objects.size', {'factory': rgw.ObjectsSizePollster}), ('radosgw.objects.containers', {'factory': rgw.ObjectsContainersPollster}), ('radosgw.containers.objects', {'factory': rgw.ContainersObjectsPollster}), ('radosgw.containers.objects.size', {'factory': rgw.ContainersSizePollster}), ('radosgw.api.request', {'factory': rgw.UsagePollster}), ] @staticmethod def fake_ks_service_catalog_url_for(*args, **kwargs): raise exceptions.EndpointNotFound("Fake keystone exception") def fake_iter_accounts(self, ksclient, cache, tenants): tenant_ids = [t.id for t in tenants] for i in self.ACCOUNTS: if i[0] in tenant_ids: yield i @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(TestRgwPollster, self).setUp() self.pollster = self.factory() self.manager = TestManager() if self.pollster.CACHE_KEY_METHOD == 'rgw.get_bucket': self.ACCOUNTS = GET_BUCKETS else: self.ACCOUNTS = GET_USAGE def tearDown(self): super(TestRgwPollster, self).tearDown() rgw._Base._ENDPOINT = None def test_iter_accounts_no_cache(self): cache = {} with mockpatch.PatchObject(self.factory, '_get_account_info', return_value=[]): data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) self.assertEqual([], data) def test_iter_accounts_cached(self): # Verify that if a method has already been called, _iter_accounts # uses the cached version and doesn't call rgw_clinet. mock_method = mock.Mock() mock_method.side_effect = AssertionError( 'should not be called', ) api_method = 'get_%s' % self.pollster.METHOD with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertEqual([self.ACCOUNTS[0]], data) def test_metering(self): with mockpatch.PatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(2, len(samples), self.pollster.__class__) def test_get_meter_names(self): with mockpatch.PatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def test_only_poll_assigned(self): mock_method = mock.MagicMock() endpoint = 'http://127.0.0.1:8000/admin' api_method = 'get_%s' % self.pollster.METHOD with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): with mockpatch.PatchObject( self.manager._catalog, 'url_for', return_value=endpoint): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) expected = [mock.call(t.id) for t in ASSIGNED_TENANTS] self.assertEqual(expected, mock_method.call_args_list) def test_get_endpoint_only_once(self): mock_url_for = mock.MagicMock() mock_url_for.return_value = '/endpoint' api_method = 'get_%s' % self.pollster.METHOD with mockpatch.PatchObject(rgw_client, api_method, new=mock.MagicMock()): with mockpatch.PatchObject( self.manager._catalog, 'url_for', new=mock_url_for): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(1, mock_url_for.call_count) def test_endpoint_notfound(self): with mockpatch.PatchObject( self.manager._catalog, 'url_for', side_effect=self.fake_ks_service_catalog_url_for): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(0, len(samples)) ceilometer-6.0.0/ceilometer/tests/unit/objectstore/__init__.py0000664000567000056710000000000012701406223025676 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/agent/0000775000567000056710000000000012701406364022360 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/agent/__init__.py0000664000567000056710000000000012701406223024451 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/agent/agentbase.py0000664000567000056710000007322412701406223024665 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 Intel corp. # Copyright 2013 eNovance # Copyright 2014 Red Hat, Inc # # Authors: Yunhong Jiang # Julien Danjou # Eoghan Glynn # Nejc Saje # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import datetime import mock from oslo_config import fixture as fixture_config from oslotest import mockpatch import six from stevedore import extension from ceilometer.agent import plugin_base from ceilometer import pipeline from ceilometer import publisher from ceilometer.publisher import test as test_publisher from ceilometer import sample from ceilometer.tests import base from ceilometer import utils class TestSample(sample.Sample): def __init__(self, name, type, unit, volume, user_id, project_id, resource_id, timestamp, resource_metadata, source=None): super(TestSample, self).__init__(name, type, unit, volume, user_id, project_id, resource_id, timestamp, resource_metadata, source) def __eq__(self, other): if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ return False def __ne__(self, other): return not self.__eq__(other) default_test_data = TestSample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'Pollster'}, ) class TestPollster(plugin_base.PollsterBase): test_data = default_test_data discovery = None @property def default_discovery(self): return self.discovery def get_samples(self, manager, cache, resources): resources = resources or [] self.samples.append((manager, resources)) self.resources.extend(resources) c = copy.deepcopy(self.test_data) c.resource_metadata['resources'] = resources return [c] class BatchTestPollster(TestPollster): test_data = default_test_data discovery = None @property def default_discovery(self): return self.discovery def get_samples(self, manager, cache, resources): resources = resources or [] self.samples.append((manager, resources)) self.resources.extend(resources) for resource in resources: c = copy.deepcopy(self.test_data) c.timestamp = datetime.datetime.utcnow().isoformat() c.resource_id = resource c.resource_metadata['resource'] = resource yield c class TestPollsterException(TestPollster): def get_samples(self, manager, cache, resources): resources = resources or [] self.samples.append((manager, resources)) self.resources.extend(resources) raise Exception() class TestDiscovery(plugin_base.DiscoveryBase): def discover(self, manager, param=None): self.params.append(param) return self.resources class TestDiscoveryException(plugin_base.DiscoveryBase): def discover(self, manager, param=None): self.params.append(param) raise Exception() @six.add_metaclass(abc.ABCMeta) class BaseAgentManagerTestCase(base.BaseTestCase): class Pollster(TestPollster): samples = [] resources = [] test_data = default_test_data class BatchPollster(BatchTestPollster): samples = [] resources = [] test_data = default_test_data class PollsterAnother(TestPollster): samples = [] resources = [] test_data = TestSample( name='testanother', type=default_test_data.type, unit=default_test_data.unit, volume=default_test_data.volume, user_id=default_test_data.user_id, project_id=default_test_data.project_id, resource_id=default_test_data.resource_id, timestamp=default_test_data.timestamp, resource_metadata=default_test_data.resource_metadata) class PollsterException(TestPollsterException): samples = [] resources = [] test_data = TestSample( name='testexception', type=default_test_data.type, unit=default_test_data.unit, volume=default_test_data.volume, user_id=default_test_data.user_id, project_id=default_test_data.project_id, resource_id=default_test_data.resource_id, timestamp=default_test_data.timestamp, resource_metadata=default_test_data.resource_metadata) class PollsterExceptionAnother(TestPollsterException): samples = [] resources = [] test_data = TestSample( name='testexceptionanother', type=default_test_data.type, unit=default_test_data.unit, volume=default_test_data.volume, user_id=default_test_data.user_id, project_id=default_test_data.project_id, resource_id=default_test_data.resource_id, timestamp=default_test_data.timestamp, resource_metadata=default_test_data.resource_metadata) class Discovery(TestDiscovery): params = [] resources = [] class DiscoveryAnother(TestDiscovery): params = [] resources = [] @property def group_id(self): return 'another_group' class DiscoveryException(TestDiscoveryException): params = [] def setup_polling(self): self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) def create_extension_list(self): return [extension.Extension('test', None, None, self.Pollster(), ), extension.Extension('testbatch', None, None, self.BatchPollster(), ), extension.Extension('testanother', None, None, self.PollsterAnother(), ), extension.Extension('testexception', None, None, self.PollsterException(), ), extension.Extension('testexceptionanother', None, None, self.PollsterExceptionAnother(), )] def create_discovery_manager(self): return extension.ExtensionManager.make_test_instance( [ extension.Extension( 'testdiscovery', None, None, self.Discovery(), ), extension.Extension( 'testdiscoveryanother', None, None, self.DiscoveryAnother(), ), extension.Extension( 'testdiscoveryexception', None, None, self.DiscoveryException(), ), ], ) @abc.abstractmethod def create_manager(self): """Return subclass specific manager.""" @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) def setUp(self): super(BaseAgentManagerTestCase, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override( 'pipeline_cfg_file', self.path_get('etc/ceilometer/pipeline.yaml') ) self.CONF(args=[]) self.mgr = self.create_manager() self.mgr.extensions = self.create_extension_list() self.mgr.partition_coordinator = mock.MagicMock() fake_subset = lambda _, x: x p_coord = self.mgr.partition_coordinator p_coord.extract_my_subset.side_effect = fake_subset self.mgr.tg = mock.MagicMock() self.pipeline_cfg = { 'sources': [{ 'name': 'test_pipeline', 'interval': 60, 'meters': ['test'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.setup_polling() self.useFixture(mockpatch.PatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) @staticmethod def get_publisher(url, namespace=''): fake_drivers = {'test://': test_publisher.TestPublisher, 'new://': test_publisher.TestPublisher, 'rpc://': test_publisher.TestPublisher} return fake_drivers[url](url) def tearDown(self): self.Pollster.samples = [] self.Pollster.discovery = [] self.PollsterAnother.samples = [] self.PollsterAnother.discovery = [] self.PollsterException.samples = [] self.PollsterException.discovery = [] self.PollsterExceptionAnother.samples = [] self.PollsterExceptionAnother.discovery = [] self.Pollster.resources = [] self.PollsterAnother.resources = [] self.PollsterException.resources = [] self.PollsterExceptionAnother.resources = [] self.Discovery.params = [] self.DiscoveryAnother.params = [] self.DiscoveryException.params = [] self.Discovery.resources = [] self.DiscoveryAnother.resources = [] super(BaseAgentManagerTestCase, self).tearDown() @mock.patch('ceilometer.pipeline.setup_polling') def test_start(self, setup_polling): self.mgr.join_partitioning_groups = mock.MagicMock() self.mgr.setup_polling_tasks = mock.MagicMock() self.CONF.set_override('heartbeat', 1.0, group='coordination') self.mgr.start() setup_polling.assert_called_once_with() self.mgr.partition_coordinator.start.assert_called_once_with() self.mgr.join_partitioning_groups.assert_called_once_with() self.mgr.setup_polling_tasks.assert_called_once_with() timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) self.assertEqual([timer_call], self.mgr.tg.add_timer.call_args_list) self.mgr.stop() self.mgr.partition_coordinator.stop.assert_called_once_with() @mock.patch('ceilometer.pipeline.setup_polling') def test_start_with_pipeline_poller(self, setup_polling): self.mgr.join_partitioning_groups = mock.MagicMock() self.mgr.setup_polling_tasks = mock.MagicMock() self.CONF.set_override('heartbeat', 1.0, group='coordination') self.CONF.set_override('refresh_pipeline_cfg', True) self.CONF.set_override('pipeline_polling_interval', 5) self.mgr.start() setup_polling.assert_called_once_with() self.mgr.partition_coordinator.start.assert_called_once_with() self.mgr.join_partitioning_groups.assert_called_once_with() self.mgr.setup_polling_tasks.assert_called_once_with() timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) pipeline_poller_call = mock.call(5, self.mgr.refresh_pipeline) self.assertEqual([timer_call, pipeline_poller_call], self.mgr.tg.add_timer.call_args_list) def test_join_partitioning_groups(self): self.mgr.discovery_manager = self.create_discovery_manager() self.mgr.join_partitioning_groups() p_coord = self.mgr.partition_coordinator static_group_ids = [utils.hash_of_set(p['resources']) for p in self.pipeline_cfg['sources'] if p['resources']] expected = [mock.call(self.mgr.construct_group_id(g)) for g in ['another_group', 'global'] + static_group_ids] self.assertEqual(len(expected), len(p_coord.join_group.call_args_list)) for c in expected: self.assertIn(c, p_coord.join_group.call_args_list) def test_setup_polling_tasks(self): polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) per_task_resources = polling_tasks[60].resources self.assertEqual(1, len(per_task_resources)) self.assertEqual(set(self.pipeline_cfg['sources'][0]['resources']), set(per_task_resources['test_pipeline-test'].get({}))) def test_setup_polling_tasks_multiple_interval(self): self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline_1', 'interval': 10, 'meters': ['test'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink'] }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(2, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.assertIn(10, polling_tasks.keys()) def test_setup_polling_tasks_mismatch_counter(self): self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline_1', 'interval': 10, 'meters': ['test_invalid'], 'resources': ['invalid://'], 'sinks': ['test_sink'] }) polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.assertNotIn(10, polling_tasks.keys()) def test_setup_polling_task_same_interval(self): self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline_1', 'interval': 60, 'meters': ['testanother'], 'resources': ['testanother://'] if self.source_resources else [], 'sinks': ['test_sink'] }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) pollsters = polling_tasks.get(60).pollster_matches self.assertEqual(2, len(pollsters)) per_task_resources = polling_tasks[60].resources self.assertEqual(2, len(per_task_resources)) key = 'test_pipeline-test' self.assertEqual(set(self.pipeline_cfg['sources'][0]['resources']), set(per_task_resources[key].get({}))) key = 'test_pipeline_1-testanother' self.assertEqual(set(self.pipeline_cfg['sources'][1]['resources']), set(per_task_resources[key].get({}))) def test_agent_manager_start(self): mgr = self.create_manager() mgr.extensions = self.mgr.extensions mgr.create_polling_task = mock.MagicMock() mgr.tg = mock.MagicMock() mgr.start() self.assertTrue(mgr.tg.add_timer.called) def test_manager_exception_persistency(self): self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline_1', 'interval': 60, 'meters': ['testanother'], 'sinks': ['test_sink'] }) self.setup_polling() def _verify_discovery_params(self, expected): self.assertEqual(expected, self.Discovery.params) self.assertEqual(expected, self.DiscoveryAnother.params) self.assertEqual(expected, self.DiscoveryException.params) def _do_test_per_pollster_discovery(self, discovered_resources, static_resources): self.Pollster.discovery = 'testdiscovery' self.mgr.discovery_manager = self.create_discovery_manager() self.Discovery.resources = discovered_resources self.DiscoveryAnother.resources = [d[::-1] for d in discovered_resources] if static_resources: # just so we can test that static + pre_pipeline amalgamated # override per_pollster self.pipeline_cfg['sources'][0]['discovery'] = [ 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.pipeline_cfg['sources'][0]['resources'] = static_resources self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) if static_resources: self.assertEqual(set(static_resources + self.DiscoveryAnother.resources), set(self.Pollster.resources)) else: self.assertEqual(set(self.Discovery.resources), set(self.Pollster.resources)) # Make sure no duplicated resource from discovery for x in self.Pollster.resources: self.assertEqual(1, self.Pollster.resources.count(x)) def test_per_pollster_discovery(self): self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], []) def test_per_pollster_discovery_overridden_by_per_pipeline_discovery(self): # ensure static+per_source_discovery overrides per_pollster_discovery self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], ['static_1', 'static_2']) def test_per_pollster_discovery_duplicated(self): self._do_test_per_pollster_discovery(['dup', 'discovered_1', 'dup'], []) def test_per_pollster_discovery_overridden_by_duplicated_static(self): self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], ['static_1', 'dup', 'dup']) def test_per_pollster_discovery_caching(self): # ensure single discovery associated with multiple pollsters # only called once per polling cycle discovered_resources = ['discovered_1', 'discovered_2'] self.Pollster.discovery = 'testdiscovery' self.PollsterAnother.discovery = 'testdiscovery' self.mgr.discovery_manager = self.create_discovery_manager() self.Discovery.resources = discovered_resources self.pipeline_cfg['sources'][0]['meters'].append('testanother') self.pipeline_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual(1, len(self.Discovery.params)) self.assertEqual(discovered_resources, self.Pollster.resources) self.assertEqual(discovered_resources, self.PollsterAnother.resources) def _do_test_per_pipeline_discovery(self, discovered_resources, static_resources): self.mgr.discovery_manager = self.create_discovery_manager() self.Discovery.resources = discovered_resources self.DiscoveryAnother.resources = [d[::-1] for d in discovered_resources] self.pipeline_cfg['sources'][0]['discovery'] = [ 'testdiscovery', 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.pipeline_cfg['sources'][0]['resources'] = static_resources self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) discovery = self.Discovery.resources + self.DiscoveryAnother.resources # compare resource lists modulo ordering self.assertEqual(set(static_resources + discovery), set(self.Pollster.resources)) # Make sure no duplicated resource from discovery for x in self.Pollster.resources: self.assertEqual(1, self.Pollster.resources.count(x)) def test_per_pipeline_discovery_discovered_only(self): self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], []) def test_per_pipeline_discovery_static_only(self): self._do_test_per_pipeline_discovery([], ['static_1', 'static_2']) def test_per_pipeline_discovery_discovered_augmented_by_static(self): self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], ['static_1', 'static_2']) def test_per_pipeline_discovery_discovered_duplicated_static(self): self._do_test_per_pipeline_discovery(['discovered_1', 'pud'], ['dup', 'static_1', 'dup']) def test_multiple_pipelines_different_static_resources(self): # assert that the individual lists of static and discovered resources # for each pipeline with a common interval are passed to individual # pollsters matching each pipeline self.pipeline_cfg['sources'][0]['resources'] = ['test://'] self.pipeline_cfg['sources'][0]['discovery'] = ['testdiscovery'] self.pipeline_cfg['sources'].append({ 'name': 'another_pipeline', 'interval': 60, 'meters': ['test'], 'resources': ['another://'], 'discovery': ['testdiscoveryanother'], 'sinks': ['test_sink_new'] }) self.mgr.discovery_manager = self.create_discovery_manager() self.Discovery.resources = ['discovered_1', 'discovered_2'] self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual([None], self.Discovery.params) self.assertEqual([None], self.DiscoveryAnother.params) self.assertEqual(2, len(self.Pollster.samples)) samples = self.Pollster.samples test_resources = ['test://', 'discovered_1', 'discovered_2'] another_resources = ['another://', 'discovered_3', 'discovered_4'] if samples[0][1] == test_resources: self.assertEqual(another_resources, samples[1][1]) elif samples[0][1] == another_resources: self.assertEqual(test_resources, samples[1][1]) else: self.fail('unexpected sample resources %s' % samples) def test_multiple_sources_different_discoverers(self): self.Discovery.resources = ['discovered_1', 'discovered_2'] self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] sources = [{'name': 'test_source_1', 'interval': 60, 'meters': ['test'], 'discovery': ['testdiscovery'], 'sinks': ['test_sink_1']}, {'name': 'test_source_2', 'interval': 60, 'meters': ['testanother'], 'discovery': ['testdiscoveryanother'], 'sinks': ['test_sink_2']}] sinks = [{'name': 'test_sink_1', 'transformers': [], 'publishers': ['test://']}, {'name': 'test_sink_2', 'transformers': [], 'publishers': ['test://']}] self.pipeline_cfg = {'sources': sources, 'sinks': sinks} self.mgr.discovery_manager = self.create_discovery_manager() self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual(1, len(self.Pollster.samples)) self.assertEqual(['discovered_1', 'discovered_2'], self.Pollster.resources) self.assertEqual(1, len(self.PollsterAnother.samples)) self.assertEqual(['discovered_3', 'discovered_4'], self.PollsterAnother.resources) def test_multiple_sinks_same_discoverer(self): self.Discovery.resources = ['discovered_1', 'discovered_2'] sources = [{'name': 'test_source_1', 'interval': 60, 'meters': ['test'], 'discovery': ['testdiscovery'], 'sinks': ['test_sink_1', 'test_sink_2']}] sinks = [{'name': 'test_sink_1', 'transformers': [], 'publishers': ['test://']}, {'name': 'test_sink_2', 'transformers': [], 'publishers': ['test://']}] self.pipeline_cfg = {'sources': sources, 'sinks': sinks} self.mgr.discovery_manager = self.create_discovery_manager() self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual(1, len(self.Pollster.samples)) self.assertEqual(['discovered_1', 'discovered_2'], self.Pollster.resources) def test_discovery_partitioning(self): self.mgr.discovery_manager = self.create_discovery_manager() p_coord = self.mgr.partition_coordinator self.pipeline_cfg['sources'][0]['discovery'] = [ 'testdiscovery', 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.pipeline_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) expected = [mock.call(self.mgr.construct_group_id(d.obj.group_id), d.obj.resources) for d in self.mgr.discovery_manager if hasattr(d.obj, 'resources')] self.assertEqual(len(expected), len(p_coord.extract_my_subset.call_args_list)) for c in expected: self.assertIn(c, p_coord.extract_my_subset.call_args_list) def test_static_resources_partitioning(self): p_coord = self.mgr.partition_coordinator static_resources = ['static_1', 'static_2'] static_resources2 = ['static_3', 'static_4'] self.pipeline_cfg['sources'][0]['resources'] = static_resources self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline2', 'interval': 60, 'meters': ['test', 'test2'], 'resources': static_resources2, 'sinks': ['test_sink'] }) # have one pipeline without static resources defined self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline3', 'interval': 60, 'meters': ['test', 'test2'], 'resources': [], 'sinks': ['test_sink'] }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) # Only two groups need to be created, one for each pipeline, # even though counter test is used twice expected = [mock.call(self.mgr.construct_group_id( utils.hash_of_set(resources)), resources) for resources in [static_resources, static_resources2]] self.assertEqual(len(expected), len(p_coord.extract_my_subset.call_args_list)) for c in expected: self.assertIn(c, p_coord.extract_my_subset.call_args_list) @mock.patch('ceilometer.agent.manager.LOG') def test_polling_and_notify_with_resources(self, LOG): self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] polling_task.poll_and_notify() LOG.info.assert_called_with( 'Polling pollster %(poll)s in the context of %(src)s', {'poll': 'test', 'src': 'test_pipeline'}) @mock.patch('ceilometer.agent.manager.LOG') def test_skip_polling_and_notify_with_no_resources(self, LOG): self.pipeline_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] pollster = list(polling_task.pollster_matches['test_pipeline'])[0] polling_task.poll_and_notify() LOG.info.assert_called_with( 'Skip pollster %(name)s, no %(p_context)sresources found this ' 'cycle', {'name': pollster.name, 'p_context': ''}) @mock.patch('ceilometer.agent.manager.LOG') def test_skip_polling_polled_resources(self, LOG): self.pipeline_cfg['sources'].append({ 'name': 'test_pipeline_1', 'interval': 60, 'meters': ['test'], 'resources': ['test://'], 'sinks': ['test_sink'] }) self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] polling_task.poll_and_notify() LOG.info.assert_called_with( 'Skip pollster %(name)s, no %(p_context)sresources found this ' 'cycle', {'name': 'test', 'p_context': 'new '}) ceilometer-6.0.0/ceilometer/tests/unit/agent/test_manager.py0000664000567000056710000004574612701406224025416 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer agent manager""" import shutil from keystoneclient import exceptions as ks_exceptions import mock from novaclient import client as novaclient from oslo_config import fixture as fixture_config from oslo_service import service as os_service from oslo_utils import fileutils from oslotest import base from oslotest import mockpatch import requests import six from stevedore import extension import yaml from ceilometer.agent import manager from ceilometer.agent import plugin_base from ceilometer.hardware import discovery from ceilometer import pipeline from ceilometer.tests.unit.agent import agentbase class PollingException(Exception): pass class TestPollsterBuilder(agentbase.TestPollster): @classmethod def build_pollsters(cls): return [('builder1', cls()), ('builder2', cls())] @mock.patch('ceilometer.compute.pollsters.' 'BaseComputePollster.setup_environment', mock.Mock(return_value=None)) class TestManager(base.BaseTestCase): def setUp(self): super(TestManager, self).setUp() self.conf = self.useFixture(fixture_config.Config()).conf self.conf(args=[]) @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) def test_load_plugins(self): mgr = manager.AgentManager() self.assertIsNotNone(list(mgr.extensions)) def test_load_plugins_pollster_list(self): mgr = manager.AgentManager(pollster_list=['disk.*']) # currently we do have 26 disk-related pollsters self.assertEqual(26, len(list(mgr.extensions))) def test_load_plugins_no_intersection(self): # Let's test nothing will be polled if namespace and pollsters # list have no intersection. mgr = manager.AgentManager(namespaces=['compute'], pollster_list=['storage.*']) self.assertEqual(0, len(list(mgr.extensions))) # Test plugin load behavior based on Node Manager pollsters. # pollster_list is just a filter, so sensor pollsters under 'ipmi' # namespace would be also instanced. Still need mock __init__ for it. @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(return_value=None)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) def test_load_normal_plugins(self): mgr = manager.AgentManager(namespaces=['ipmi'], pollster_list=['hardware.ipmi.node.*']) # 8 pollsters for Node Manager self.assertEqual(8, len(mgr.extensions)) # Skip loading pollster upon ExtensionLoadError @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(side_effect=plugin_base.ExtensionLoadError)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) @mock.patch('ceilometer.agent.manager.LOG') def test_load_failed_plugins(self, LOG): # Here we additionally check that namespaces will be converted to the # list if param was not set as a list. mgr = manager.AgentManager(namespaces='ipmi', pollster_list=['hardware.ipmi.node.*']) # 0 pollsters self.assertEqual(0, len(mgr.extensions)) err_msg = 'Skip loading extension for hardware.ipmi.node.%s' pollster_names = [ 'power', 'temperature', 'outlet_temperature', 'airflow', 'cups', 'cpu_util', 'mem_util', 'io_util'] calls = [mock.call(err_msg % n) for n in pollster_names] LOG.exception.assert_has_calls(calls=calls, any_order=True) # Skip loading pollster upon ImportError @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(side_effect=ImportError)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) def test_import_error_in_plugin(self): mgr = manager.AgentManager(namespaces=['ipmi'], pollster_list=['hardware.ipmi.node.*']) # 0 pollsters self.assertEqual(0, len(mgr.extensions)) # Exceptions other than ExtensionLoadError are propagated @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(side_effect=PollingException)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) def test_load_exceptional_plugins(self): self.assertRaises(PollingException, manager.AgentManager, ['ipmi'], ['hardware.ipmi.node.*']) def test_load_plugins_pollster_list_forbidden(self): manager.cfg.CONF.set_override('backend_url', 'http://', group='coordination') self.assertRaises(manager.PollsterListForbidden, manager.AgentManager, pollster_list=['disk.*']) manager.cfg.CONF.reset() def test_builder(self): @staticmethod def fake_get_ext_mgr(namespace): if 'builder' in namespace: return extension.ExtensionManager.make_test_instance( [ extension.Extension('builder', None, TestPollsterBuilder, None), ] ) else: return extension.ExtensionManager.make_test_instance( [ extension.Extension('test', None, None, agentbase.TestPollster()), ] ) with mock.patch.object(manager.AgentManager, '_get_ext_mgr', new=fake_get_ext_mgr): mgr = manager.AgentManager(namespaces=['central']) self.assertEqual(3, len(mgr.extensions)) for ext in mgr.extensions: self.assertIn(ext.name, ['builder1', 'builder2', 'test']) self.assertIsInstance(ext.obj, agentbase.TestPollster) class TestPollsterKeystone(agentbase.TestPollster): def get_samples(self, manager, cache, resources): # Just try to use keystone, that will raise an exception manager.keystone.projects.list() class TestPollsterPollingException(agentbase.TestPollster): polling_failures = 0 def get_samples(self, manager, cache, resources): func = super(TestPollsterPollingException, self).get_samples sample = func(manager=manager, cache=cache, resources=resources) # Raise polling exception after 2 times self.polling_failures += 1 if self.polling_failures > 2: raise plugin_base.PollsterPermanentError(resources) return sample class TestRunTasks(agentbase.BaseAgentManagerTestCase): class PollsterKeystone(TestPollsterKeystone): samples = [] resources = [] test_data = agentbase.TestSample( name='testkeystone', type=agentbase.default_test_data.type, unit=agentbase.default_test_data.unit, volume=agentbase.default_test_data.volume, user_id=agentbase.default_test_data.user_id, project_id=agentbase.default_test_data.project_id, resource_id=agentbase.default_test_data.resource_id, timestamp=agentbase.default_test_data.timestamp, resource_metadata=agentbase.default_test_data.resource_metadata) class PollsterPollingException(TestPollsterPollingException): samples = [] resources = [] test_data = agentbase.TestSample( name='testpollingexception', type=agentbase.default_test_data.type, unit=agentbase.default_test_data.unit, volume=agentbase.default_test_data.volume, user_id=agentbase.default_test_data.user_id, project_id=agentbase.default_test_data.project_id, resource_id=agentbase.default_test_data.resource_id, timestamp=agentbase.default_test_data.timestamp, resource_metadata=agentbase.default_test_data.resource_metadata) @staticmethod @mock.patch('ceilometer.compute.pollsters.' 'BaseComputePollster.setup_environment', mock.Mock(return_value=None)) def create_manager(): return manager.AgentManager() @staticmethod def setup_pipeline_file(pipeline): if six.PY3: pipeline = pipeline.encode('utf-8') pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, prefix="pipeline", suffix="yaml") return pipeline_cfg_file def fake_notifier_sample(self, ctxt, event_type, payload): for m in payload['samples']: del m['message_signature'] self.notified_samples.append(m) def setUp(self): self.notified_samples = [] self.notifier = mock.Mock() self.notifier.sample.side_effect = self.fake_notifier_sample self.useFixture(mockpatch.Patch('oslo_messaging.Notifier', return_value=self.notifier)) self.source_resources = True super(TestRunTasks, self).setUp() self.useFixture(mockpatch.Patch( 'keystoneclient.v2_0.client.Client', return_value=mock.Mock())) def tearDown(self): self.PollsterKeystone.samples = [] self.PollsterKeystone.resources = [] self.PollsterPollingException.samples = [] self.PollsterPollingException.resources = [] super(TestRunTasks, self).tearDown() def create_extension_list(self): exts = super(TestRunTasks, self).create_extension_list() exts.extend([extension.Extension('testkeystone', None, None, self.PollsterKeystone(), ), extension.Extension('testpollingexception', None, None, self.PollsterPollingException(), )]) return exts def test_get_sample_resources(self): polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(list(polling_tasks.values())[0]) self.assertTrue(self.Pollster.resources) def test_when_keystone_fail(self): """Test for bug 1316532.""" self.useFixture(mockpatch.Patch( 'keystoneclient.v2_0.client.Client', side_effect=ks_exceptions.ClientException)) self.pipeline_cfg = { 'sources': [{ 'name': "test_keystone", 'interval': 10, 'meters': ['testkeystone'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(list(polling_tasks.values())[0]) self.assertFalse(self.PollsterKeystone.samples) self.assertFalse(self.notified_samples) @mock.patch('ceilometer.agent.manager.LOG') @mock.patch('ceilometer.nova_client.LOG') def test_hardware_discover_fail_minimize_logs(self, novalog, baselog): self.useFixture(mockpatch.PatchObject( novaclient.HTTPClient, 'authenticate', side_effect=requests.ConnectionError)) class PollsterHardware(agentbase.TestPollster): discovery = 'tripleo_overcloud_nodes' class PollsterHardwareAnother(agentbase.TestPollster): discovery = 'tripleo_overcloud_nodes' self.mgr.extensions.extend([ extension.Extension('testhardware', None, None, PollsterHardware(), ), extension.Extension('testhardware2', None, None, PollsterHardwareAnother(), ) ]) ext = extension.Extension('tripleo_overcloud_nodes', None, None, discovery.NodesDiscoveryTripleO()) self.mgr.discovery_manager = (extension.ExtensionManager .make_test_instance([ext])) self.pipeline_cfg = { 'sources': [{ 'name': "test_hardware", 'interval': 10, 'meters': ['testhardware', 'testhardware2'], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(list(polling_tasks.values())[0]) self.assertEqual(1, novalog.exception.call_count) self.assertFalse(baselog.exception.called) @mock.patch('ceilometer.agent.manager.LOG') def test_polling_exception(self, LOG): source_name = 'test_pollingexception' self.pipeline_cfg = { 'sources': [{ 'name': source_name, 'interval': 10, 'meters': ['testpollingexception'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) polling_task = list(self.mgr.setup_polling_tasks().values())[0] pollster = list(polling_task.pollster_matches[source_name])[0] # 2 samples after 4 pollings, as pollster got disabled upon exception for x in range(0, 4): self.mgr.interval_task(polling_task) samples = self.notified_samples self.assertEqual(2, len(samples)) LOG.error.assert_called_once_with(( 'Prevent pollster %(name)s for ' 'polling source %(source)s anymore!') % ({'name': pollster.name, 'source': source_name})) def test_batching_polled_samples_false(self): self.CONF.set_override('batch_polled_samples', False) self._batching_samples(4, 4) def test_batching_polled_samples_true(self): self.CONF.set_override('batch_polled_samples', True) self._batching_samples(4, 1) def test_batching_polled_samples_default(self): self._batching_samples(4, 1) def _batching_samples(self, expected_samples, call_count): pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 1, 'meters': ['testbatch'], 'resources': ['alpha', 'beta', 'gamma', 'delta'], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] }) pipeline_cfg_file = self.setup_pipeline_file(pipeline) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.mgr.tg = os_service.threadgroup.ThreadGroup(1000) self.mgr.start() # Manually executes callbacks for timer in self.mgr.pollster_timers: timer.f(*timer.args, **timer.kw) samples = self.notified_samples self.assertEqual(expected_samples, len(samples)) self.assertEqual(call_count, self.notifier.sample.call_count) def test_start_with_reloadable_pipeline(self): self.CONF.set_override('heartbeat', 1.0, group='coordination') self.CONF.set_override('refresh_pipeline_cfg', True) self.CONF.set_override('pipeline_polling_interval', 2) pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 1, 'meters': ['test'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] }) pipeline_cfg_file = self.setup_pipeline_file(pipeline) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.mgr.tg = os_service.threadgroup.ThreadGroup(1000) self.mgr.start() # we only got the old name of meters for sample in self.notified_samples: self.assertEqual('test', sample['counter_name']) self.assertEqual(1, sample['counter_volume']) self.assertEqual('test_run_tasks', sample['resource_id']) # Modify the collection targets pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 1, 'meters': ['testanother'], 'resources': ['test://'] if self.source_resources else [], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] }) updated_pipeline_cfg_file = self.setup_pipeline_file(pipeline) # Move/re-name the updated pipeline file to the original pipeline # file path as recorded in oslo config shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) # Flush notified samples to test only new, nothing latent on # fake message bus. self.notified_samples = [] # we only got the new name of meters for sample in self.notified_samples: self.assertEqual('testanother', sample['counter_name']) self.assertEqual(1, sample['counter_volume']) self.assertEqual('test_run_tasks', sample['resource_id']) ceilometer-6.0.0/ceilometer/tests/unit/agent/test_discovery.py0000664000567000056710000001011612701406223025771 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/central/manager.py """ import mock from oslo_config import fixture as fixture_config from oslotest import base from ceilometer.agent.discovery import endpoint from ceilometer.agent.discovery import localnode from ceilometer.hardware import discovery as hardware class TestEndpointDiscovery(base.BaseTestCase): def setUp(self): super(TestEndpointDiscovery, self).setUp() self.discovery = endpoint.EndpointDiscovery() self.manager = mock.MagicMock() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override('interface', 'test-endpoint-type', group='service_credentials') self.CONF.set_override('region_name', 'test-region-name', group='service_credentials') self.catalog = (self.manager.keystone.session.auth.get_access. return_value.service_catalog) def test_keystone_called(self): self.discovery.discover(self.manager, param='test-service-type') expected = [mock.call(service_type='test-service-type', interface='test-endpoint-type', region_name='test-region-name')] self.assertEqual(expected, self.catalog.get_urls.call_args_list) def test_keystone_called_no_service_type(self): self.discovery.discover(self.manager) expected = [mock.call(service_type=None, interface='test-endpoint-type', region_name='test-region-name')] self.assertEqual(expected, self.catalog.get_urls .call_args_list) def test_keystone_called_no_endpoints(self): self.catalog.get_urls.return_value = [] self.assertEqual([], self.discovery.discover(self.manager)) class TestLocalnodeDiscovery(base.BaseTestCase): def setUp(self): super(TestLocalnodeDiscovery, self).setUp() self.discovery = localnode.LocalNodeDiscovery() self.manager = mock.MagicMock() def test_lockalnode_discovery(self): self.assertEqual(['local_host'], self.discovery.discover(self.manager)) class TestHardwareDiscovery(base.BaseTestCase): class MockInstance(object): addresses = {'ctlplane': [ {'addr': '0.0.0.0', 'OS-EXT-IPS-MAC:mac_addr': '01-23-45-67-89-ab'} ]} id = 'resource_id' image = {'id': 'image_id'} flavor = {'id': 'flavor_id'} expected = { 'resource_id': 'resource_id', 'resource_url': 'snmp://ro_snmp_user:password@0.0.0.0', 'mac_addr': '01-23-45-67-89-ab', 'image_id': 'image_id', 'flavor_id': 'flavor_id', } def setUp(self): super(TestHardwareDiscovery, self).setUp() self.discovery = hardware.NodesDiscoveryTripleO() self.discovery.nova_cli = mock.MagicMock() self.manager = mock.MagicMock() def test_hardware_discovery(self): self.discovery.nova_cli.instance_get_all.return_value = [ self.MockInstance()] resources = self.discovery.discover(self.manager) self.assertEqual(1, len(resources)) self.assertEqual(self.expected, resources[0]) def test_hardware_discovery_without_flavor(self): instance = self.MockInstance() instance.flavor = {} self.discovery.nova_cli.instance_get_all.return_value = [instance] resources = self.discovery.discover(self.manager) self.assertEqual(0, len(resources)) ceilometer-6.0.0/ceilometer/tests/unit/agent/test_plugin.py0000664000567000056710000000432312701406224025264 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config from oslotest import base from ceilometer.agent import plugin_base class NotificationBaseTestCase(base.BaseTestCase): def setUp(self): super(NotificationBaseTestCase, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf class FakePlugin(plugin_base.NotificationBase): event_types = ['compute.*'] def process_notification(self, message): pass def get_targets(self, conf): pass def test_plugin_info(self): plugin = self.FakePlugin(mock.Mock()) plugin.to_samples_and_publish = mock.Mock() message = { 'ctxt': {'user_id': 'fake_user_id', 'project_id': 'fake_project_id'}, 'publisher_id': 'fake.publisher_id', 'event_type': 'fake.event', 'payload': {'foo': 'bar'}, 'metadata': {'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8', 'timestamp': '2015-06-1909:19:35.786893'} } plugin.info([message]) notification = { 'priority': 'info', 'event_type': 'fake.event', 'timestamp': '2015-06-1909:19:35.786893', '_context_user_id': 'fake_user_id', '_context_project_id': 'fake_project_id', 'publisher_id': 'fake.publisher_id', 'payload': {'foo': 'bar'}, 'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8' } plugin.to_samples_and_publish.assert_called_with(mock.ANY, notification) ceilometer-6.0.0/ceilometer/tests/unit/test_neutronclient_lbaas_v2.py0000664000567000056710000003431312701406223027333 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutronclient.v2_0 import client from oslotest import base from ceilometer import neutron_client class TestNeutronClientLBaaSV2(base.BaseTestCase): def setUp(self): super(TestNeutronClientLBaaSV2, self).setUp() self.nc = neutron_client.Client() @staticmethod def fake_list_lbaas_pools(): return { 'pools': [{ 'lb_algorithm': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': 'simple pool', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'healthmonitor_id': None, 'listeners': [{ 'id': "35cb8516-1173-4035-8dae-0dae3453f37f" } ], 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858'} ], 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', 'name': 'pool1' }] } @staticmethod def fake_list_lbaas_members(): return { 'members': [{ 'weight': 1, 'admin_state_up': True, 'subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'address': '10.0.0.8', 'protocol_port': 80, 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858' }] } @staticmethod def fake_list_lbaas_healthmonitors(): return { 'healthmonitors': [{ 'admin_state_up': True, 'tenant_id': '6f3584d5754048a18e30685362b88411', 'delay': 1, 'expected_codes': '200,201,202', 'max_retries': 5, 'http_method': 'GET', 'timeout': 1, 'pools': [{ 'id': '74aa2010-a59f-4d35-a436-60a6da882819' }], 'url_path': '/index.html', 'type': 'HTTP', 'id': '0a9ac99d-0a09-4b18-8499-a0796850279a' }] } @staticmethod def fake_show_listener(): return { 'listener': { 'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'loadbalancers': [{ 'id': 'a9729389-6147-41a3-ab22-a24aed8692b2' }], 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'connection_limit': 100, 'protocol_port': 80, 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'name': '' } } @staticmethod def fake_retrieve_loadbalancer_status(): return { 'statuses': { 'loadbalancer': { 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'listeners': [{ 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'pools': [{ 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4518', 'provisioning_status': 'ACTIVE' } }] }] } } } @staticmethod def fake_retrieve_loadbalancer_status_complex(): return { 'statuses': { 'loadbalancer': { 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'listeners': [{ 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'pools': [{ 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }, { 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf969', 'operating_status': 'OFFLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4518', 'provisioning_status': 'ACTIVE' } }, { 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6', 'operating_status': 'OFFLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbfa7a', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4629', 'provisioning_status': 'ACTIVE' } }] }, { 'id': '35cb8516-1173-4035-8dae-0dae3453f48e', 'operating_status': 'OFFLINE', 'provisioning_status': 'ACTIVE', 'pools': [{ 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce7g7', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbfb8b', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b473a', 'provisioning_status': 'ACTIVE' } }] }] } } } @staticmethod def fake_list_lbaas_listeners(): return { 'listeners': [{ 'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'loadbalancers': [{ 'id': 'a9729389-6147-41a3-ab22-a24aed8692b2' }], 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'connection_limit': 100, 'protocol_port': 80, 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'name': 'listener_one' }]} @mock.patch.object(client.Client, 'list_lbaas_pools') @mock.patch.object(client.Client, 'show_listener') @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_list_pools_v2(self, mock_status, mock_show, mock_list): mock_status.return_value = self.fake_retrieve_loadbalancer_status() mock_show.return_value = self.fake_show_listener() mock_list.return_value = self.fake_list_lbaas_pools() pools = self.nc.list_pools_v2() self.assertEqual(1, len(pools)) for pool in pools: self.assertEqual('ONLINE', pool['status']) self.assertEqual('ROUND_ROBIN', pool['lb_method']) @mock.patch.object(client.Client, 'list_lbaas_pools') @mock.patch.object(client.Client, 'list_lbaas_members') @mock.patch.object(client.Client, 'show_listener') @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_list_members_v2(self, mock_status, mock_show, mock_list_members, mock_list_pools): mock_status.return_value = self.fake_retrieve_loadbalancer_status() mock_show.return_value = self.fake_show_listener() mock_list_pools.return_value = self.fake_list_lbaas_pools() mock_list_members.return_value = self.fake_list_lbaas_members() members = self.nc.list_members_v2() self.assertEqual(1, len(members)) for member in members: self.assertEqual('ONLINE', member['status']) self.assertEqual('4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', member['pool_id']) @mock.patch.object(client.Client, 'list_lbaas_healthmonitors') def test_list_health_monitors_v2(self, mock_list_healthmonitors): mock_list_healthmonitors.return_value = ( self.fake_list_lbaas_healthmonitors()) healthmonitors = self.nc.list_health_monitors_v2() self.assertEqual(1, len(healthmonitors)) for healthmonitor in healthmonitors: self.assertEqual(5, healthmonitor['max_retries']) @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_get_member_status(self, mock_status): mock_status.return_value = ( self.fake_retrieve_loadbalancer_status_complex()) loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' listener_id = '35cb8516-1173-4035-8dae-0dae3453f37f' pool_id = '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5' parent_id = [listener_id, pool_id] result_status = self.nc._get_member_status(loadbalancer_id, parent_id) expected_keys = ['fcf23bde-8cf9-4616-883f-208cebcbf858', 'fcf23bde-8cf9-4616-883f-208cebcbf969'] excepted_status = { 'fcf23bde-8cf9-4616-883f-208cebcbf858': 'ONLINE', 'fcf23bde-8cf9-4616-883f-208cebcbf969': 'OFFLINE'} for key in result_status.keys(): self.assertIn(key, expected_keys) self.assertEqual(excepted_status[key], result_status[key]) @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_get_pool_status(self, mock_status): mock_status.return_value = ( self.fake_retrieve_loadbalancer_status_complex()) loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' parent_id = '35cb8516-1173-4035-8dae-0dae3453f37f' result_status = self.nc._get_pool_status(loadbalancer_id, parent_id) expected_keys = ['4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6'] excepted_status = { '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5': 'ONLINE', '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6': 'OFFLINE'} for key in result_status.keys(): self.assertIn(key, expected_keys) self.assertEqual(excepted_status[key], result_status[key]) @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_get_listener_status(self, mock_status): mock_status.return_value = ( self.fake_retrieve_loadbalancer_status_complex()) loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' result_status = self.nc._get_listener_status(loadbalancer_id) expected_keys = ['35cb8516-1173-4035-8dae-0dae3453f37f', '35cb8516-1173-4035-8dae-0dae3453f48e'] excepted_status = { '35cb8516-1173-4035-8dae-0dae3453f37f': 'ONLINE', '35cb8516-1173-4035-8dae-0dae3453f48e': 'OFFLINE'} for key in result_status.keys(): self.assertIn(key, expected_keys) self.assertEqual(excepted_status[key], result_status[key]) @mock.patch.object(client.Client, 'list_listeners') @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_list_listener(self, mock_status, mock_list_listeners): mock_list_listeners.return_value = ( self.fake_list_lbaas_listeners()) mock_status.return_value = ( self.fake_retrieve_loadbalancer_status()) listeners = self.nc.list_listener() expected_key = '35cb8516-1173-4035-8dae-0dae3453f37f' expected_status = 'ONLINE' self.assertEqual(1, len(listeners)) self.assertEqual(expected_key, listeners[0]['id']) self.assertEqual(expected_status, listeners[0]['operating_status']) ceilometer-6.0.0/ceilometer/tests/unit/test_middleware.py0000664000567000056710000001030412701406223025000 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config from ceilometer import middleware from ceilometer.tests import base HTTP_REQUEST = { u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'_context_is_admin': True, u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': u'10.0.2.15', u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T20:23:41.425105', u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'event_type': u'http.request', u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', 'HTTP_X_USER_ID': 'jd-x32', 'HTTP_X_PROJECT_ID': 'project-id', 'HTTP_X_SERVICE_NAME': 'nova'}}, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 20:23:48.028195', } HTTP_RESPONSE = { u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'_context_is_admin': True, u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', u'_context_quota_class': None, u'_context_read_deleted': u'no', u'_context_remote_address': u'10.0.2.15', u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'_context_roles': [u'admin'], u'_context_timestamp': u'2012-05-08T20:23:41.425105', u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'event_type': u'http.response', u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', 'HTTP_X_USER_ID': 'jd-x32', 'HTTP_X_PROJECT_ID': 'project-id', 'HTTP_X_SERVICE_NAME': 'nova'}, u'response': {'status': '200 OK'}}, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'timestamp': u'2012-05-08 20:23:48.028195', } class TestNotifications(base.BaseTestCase): def setUp(self): super(TestNotifications, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.setup_messaging(self.CONF) def test_process_request_notification(self): sample = list(middleware.HTTPRequest(mock.Mock()).process_notification( HTTP_REQUEST ))[0] self.assertEqual(HTTP_REQUEST['payload']['request']['HTTP_X_USER_ID'], sample.user_id) self.assertEqual(HTTP_REQUEST['payload']['request'] ['HTTP_X_PROJECT_ID'], sample.project_id) self.assertEqual(HTTP_REQUEST['payload']['request'] ['HTTP_X_SERVICE_NAME'], sample.resource_id) self.assertEqual(1, sample.volume) def test_process_response_notification(self): sample = list(middleware.HTTPResponse( mock.Mock()).process_notification(HTTP_RESPONSE))[0] self.assertEqual(HTTP_RESPONSE['payload']['request']['HTTP_X_USER_ID'], sample.user_id) self.assertEqual(HTTP_RESPONSE['payload']['request'] ['HTTP_X_PROJECT_ID'], sample.project_id) self.assertEqual(HTTP_RESPONSE['payload']['request'] ['HTTP_X_SERVICE_NAME'], sample.resource_id) self.assertEqual(1, sample.volume) def test_targets(self): targets = middleware.HTTPRequest(mock.Mock()).get_targets(self.CONF) self.assertEqual(4, len(targets)) ceilometer-6.0.0/ceilometer/tests/unit/image/0000775000567000056710000000000012701406364022344 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/image/__init__.py0000664000567000056710000000000012701406223024435 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/image/test_glance.py0000664000567000056710000002022312701406223025177 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config from oslo_context import context from oslotest import base from oslotest import mockpatch from ceilometer.agent import manager from ceilometer.image import glance IMAGE_LIST = [ type('Image', (object,), {u'status': u'queued', u'name': "some name", u'deleted': False, u'container_format': None, u'created_at': u'2012-09-18T16:29:46', u'disk_format': None, u'updated_at': u'2012-09-18T16:29:46', u'properties': {}, u'min_disk': 0, u'protected': False, u'id': u'1d21a8d0-25f4-4e0a-b4ec-85f40237676b', u'location': None, u'checksum': None, u'owner': u'4c8364fc20184ed7971b76602aa96184', u'is_public': True, u'deleted_at': None, u'min_ram': 0, u'size': 2048}), type('Image', (object,), {u'status': u'active', u'name': "hello world", u'deleted': False, u'container_format': None, u'created_at': u'2012-09-18T16:27:41', u'disk_format': None, u'updated_at': u'2012-09-18T16:27:41', u'properties': {}, u'min_disk': 0, u'protected': False, u'id': u'22be9f90-864d-494c-aa74-8035fd535989', u'location': None, u'checksum': None, u'owner': u'9e4f98287a0246daa42eaf4025db99d4', u'is_public': True, u'deleted_at': None, u'min_ram': 0, u'size': 0}), type('Image', (object,), {u'status': u'queued', u'name': None, u'deleted': False, u'container_format': None, u'created_at': u'2012-09-18T16:23:27', u'disk_format': "raw", u'updated_at': u'2012-09-18T16:23:27', u'properties': {}, u'min_disk': 0, u'protected': False, u'id': u'8d133f6c-38a8-403c-b02c-7071b69b432d', u'location': None, u'checksum': None, u'owner': u'5f8806a76aa34ee8b8fc8397bd154319', u'is_public': True, u'deleted_at': None, u'min_ram': 0, u'size': 1024}), type('Image', (object,), {u'status': u'queued', u'name': "some name", u'deleted': False, u'container_format': None, u'created_at': u'2012-09-18T16:29:46', u'disk_format': None, u'updated_at': u'2012-09-18T16:29:46', u'properties': {}, u'min_disk': 0, u'protected': False, u'id': u'e753b196-49b4-48e8-8ca5-09ebd9805f40', u'location': None, u'checksum': None, u'owner': u'4c8364fc20184ed7971b76602aa96184', u'is_public': True, u'deleted_at': None, u'min_ram': 0, u'size': 2048}), ] ENDPOINT = 'end://point' class _BaseObject(object): pass class FakeGlanceClient(object): class images(object): pass class TestManager(manager.AgentManager): def __init__(self): super(TestManager, self).__init__() self._keystone = mock.Mock() access = self._keystone.session.auth.get_access.return_value access.service_catalog.get_endpoints = mock.Mock( return_value={'image': mock.ANY}) class TestImagePollsterPageSize(base.BaseTestCase): @staticmethod def fake_get_glance_client(ksclient, endpoint): glanceclient = FakeGlanceClient() glanceclient.images.list = mock.MagicMock(return_value=IMAGE_LIST) return glanceclient @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(TestImagePollsterPageSize, self).setUp() self.context = context.get_admin_context() self.manager = TestManager() self.useFixture(mockpatch.PatchObject( glance._Base, 'get_glance_client', side_effect=self.fake_get_glance_client)) self.CONF = self.useFixture(fixture_config.Config()).conf def _do_test_iter_images(self, page_size=0, length=0): self.CONF.set_override("glance_page_size", page_size) images = list(glance.ImagePollster(). _iter_images(self.manager.keystone, {}, ENDPOINT)) kwargs = {} if page_size > 0: kwargs['page_size'] = page_size FakeGlanceClient.images.list.assert_called_with( filters={'is_public': None}, **kwargs) self.assertEqual(length, len(images)) def test_page_size(self): self._do_test_iter_images(100, 4) def test_page_size_default(self): self._do_test_iter_images(length=4) def test_page_size_negative_number(self): self._do_test_iter_images(-1, 4) class TestImagePollster(base.BaseTestCase): @staticmethod def fake_get_glance_client(ksclient, endpoint): glanceclient = _BaseObject() setattr(glanceclient, "images", _BaseObject()) setattr(glanceclient.images, "list", lambda *args, **kwargs: iter(IMAGE_LIST)) return glanceclient @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(TestImagePollster, self).setUp() self.context = context.get_admin_context() self.manager = TestManager() self.useFixture(mockpatch.PatchObject( glance._Base, 'get_glance_client', side_effect=self.fake_get_glance_client)) def test_default_discovery(self): pollster = glance.ImagePollster() self.assertEqual('endpoint:image', pollster.default_discovery) def test_iter_images(self): # Tests whether the iter_images method returns a unique image # list when there is nothing in the cache images = list(glance.ImagePollster(). _iter_images(self.manager.keystone, {}, ENDPOINT)) self.assertEqual(len(set(image.id for image in images)), len(images)) def test_iter_images_cached(self): # Tests whether the iter_images method returns the values from # the cache cache = {'%s-images' % ENDPOINT: []} images = list(glance.ImagePollster(). _iter_images(self.manager.keystone, cache, ENDPOINT)) self.assertEqual([], images) def test_image(self): samples = list(glance.ImagePollster().get_samples(self.manager, {}, [ENDPOINT])) self.assertEqual(4, len(samples)) for sample in samples: self.assertEqual(1, sample.volume) def test_image_size(self): samples = list(glance.ImageSizePollster().get_samples(self.manager, {}, [ENDPOINT])) self.assertEqual(4, len(samples)) for image in IMAGE_LIST: self.assertTrue( any(map(lambda sample: sample.volume == image.size, samples))) def test_image_get_sample_names(self): samples = list(glance.ImagePollster().get_samples(self.manager, {}, [ENDPOINT])) self.assertEqual(set(['image']), set([s.name for s in samples])) def test_image_size_get_sample_names(self): samples = list(glance.ImageSizePollster().get_samples(self.manager, {}, [ENDPOINT])) self.assertEqual(set(['image.size']), set([s.name for s in samples])) ceilometer-6.0.0/ceilometer/tests/unit/telemetry/0000775000567000056710000000000012701406364023274 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/telemetry/__init__.py0000664000567000056710000000000012701406223025365 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/telemetry/test_notifications.py0000664000567000056710000000760212701406223027555 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from ceilometer.telemetry import notifications NOTIFICATION = { u'_context_domain': None, u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', 'event_type': u'telemetry.api', 'timestamp': u'2015-06-1909: 19: 35.786893', u'_context_auth_token': None, u'_context_read_only': False, 'payload': {'samples': [{'counter_name': u'instance100', u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', u'resource_id': u'instance', u'timestamp': u'2015-06-19T09: 19: 35.785330', u'message_signature': u'fake_signature1', u'resource_metadata': {u'foo': u'bar'}, u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', u'counter_unit': u'instance', u'counter_volume': 1.0, u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', u'message_id': u'4d865c6e-1664-11e5-9d41-0819a6cff905', u'counter_type': u'gauge'}, {u'counter_name': u'instance100', u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', u'resource_id': u'instance', u'timestamp': u'2015-06-19T09: 19: 35.785330', u'message_signature': u'fake_signature12', u'resource_metadata': {u'foo': u'bar'}, u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', u'counter_unit': u'instance', u'counter_volume': 1.0, u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', u'message_id': u'4d866da8-1664-11e5-9d41-0819a6cff905', u'counter_type': u'gauge'}]}, u'_context_resource_uuid': None, u'_context_user_identity': u'fake_user_identity---', u'_context_show_deleted': False, u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', 'priority': 'info', u'_context_is_admin': True, u'_context_project_domain': None, u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', u'_context_user_domain': None, 'publisher_id': u'ceilometer.api', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' } class TelemetryIpcTestCase(base.BaseTestCase): def test_process_notification(self): sample_creation = notifications.TelemetryIpc(None) samples = list(sample_creation.process_notification(NOTIFICATION)) self.assertEqual(2, len(samples)) payload = NOTIFICATION["payload"]['samples'] for index, sample in enumerate(samples): self.assertEqual(payload[index]["user_id"], sample.user_id) self.assertEqual(payload[index]["counter_name"], sample.name) self.assertEqual(payload[index]["resource_id"], sample.resource_id) self.assertEqual(payload[index]["timestamp"], sample.timestamp) self.assertEqual(payload[index]["resource_metadata"], sample.resource_metadata) self.assertEqual(payload[index]["counter_volume"], sample.volume) self.assertEqual(payload[index]["source"], sample.source) self.assertEqual(payload[index]["counter_type"], sample.type) self.assertEqual(payload[index]["message_id"], sample.id) self.assertEqual(payload[index]["counter_unit"], sample.unit) ceilometer-6.0.0/ceilometer/tests/unit/transformer/0000775000567000056710000000000012701406364023624 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/transformer/__init__.py0000664000567000056710000000000012701406223025715 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/transformer/test_conversions.py0000664000567000056710000001077112701406224027606 0ustar jenkinsjenkins00000000000000# # Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_context import context from oslo_utils import timeutils from oslotest import base from ceilometer import sample from ceilometer.transformer import conversions class AggregatorTransformerTestCase(base.BaseTestCase): SAMPLE = sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, unit='ns', volume='1234567', user_id='56c5692032f34041900342503fecab30', project_id='ac9494df2d9d4e709bac378cceabaf23', resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', timestamp="2015-10-29 14:12:15.485877+00:00", resource_metadata={} ) def setUp(self): super(AggregatorTransformerTestCase, self).setUp() self._sample_offset = 0 def test_init_input_validation(self): aggregator = conversions.AggregatorTransformer("2", "15", None, None, None) self.assertEqual(2, aggregator.size) self.assertEqual(15, aggregator.retention_time) def test_init_no_size_or_rention_time(self): aggregator = conversions.AggregatorTransformer() self.assertEqual(1, aggregator.size) self.assertIsNone(aggregator.retention_time) def test_init_size_zero(self): aggregator = conversions.AggregatorTransformer(size="0") self.assertEqual(1, aggregator.size) self.assertIsNone(aggregator.retention_time) def test_init_input_validation_size_invalid(self): self.assertRaises(ValueError, conversions.AggregatorTransformer, "abc", "15", None, None, None) def test_init_input_validation_retention_time_invalid(self): self.assertRaises(ValueError, conversions.AggregatorTransformer, "2", "abc", None, None, None) def test_init_no_timestamp(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None) self.assertEqual("first", aggregator.timestamp) def test_init_timestamp_none(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, None) self.assertEqual("first", aggregator.timestamp) def test_init_timestamp_first(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, "first") self.assertEqual("first", aggregator.timestamp) def test_init_timestamp_last(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, "last") self.assertEqual("last", aggregator.timestamp) def test_init_timestamp_invalid(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, "invalid_option") self.assertEqual("first", aggregator.timestamp) def test_size_unbounded(self): aggregator = conversions.AggregatorTransformer(size="0", retention_time="300") self._insert_sample_data(aggregator) samples = aggregator.flush(context.get_admin_context()) self.assertEqual([], samples) def test_size_bounded(self): aggregator = conversions.AggregatorTransformer(size="100") self._insert_sample_data(aggregator) samples = aggregator.flush(context.get_admin_context()) self.assertEqual(100, len(samples)) def _insert_sample_data(self, aggregator): for _ in range(100): sample = copy.copy(self.SAMPLE) sample.resource_id = sample.resource_id + str(self._sample_offset) sample.timestamp = timeutils.isotime() aggregator.handle_sample(context.get_admin_context(), sample) self._sample_offset += 1 ceilometer-6.0.0/ceilometer/tests/unit/hardware/0000775000567000056710000000000012701406364023057 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/hardware/pollsters/0000775000567000056710000000000012701406364025106 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/hardware/pollsters/test_util.py0000664000567000056710000000465312701406223027476 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp # # Authors: Lianhao Lu # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import netutils from ceilometer.hardware.pollsters import util from ceilometer import sample from ceilometer.tests import base as test_base class TestPollsterUtils(test_base.BaseTestCase): def setUp(self): super(TestPollsterUtils, self).setUp() self.host_url = netutils.urlsplit("snmp://127.0.0.1:161") def test_make_sample(self): s = util.make_sample_from_host(self.host_url, name='test', sample_type=sample.TYPE_GAUGE, unit='B', volume=1, res_metadata={ 'metakey': 'metaval', }) self.assertEqual('127.0.0.1', s.resource_id) self.assertIn('snmp://127.0.0.1:161', s.resource_metadata.values()) self.assertIn('metakey', s.resource_metadata.keys()) def test_make_sample_extra(self): extra = { 'project_id': 'project', 'resource_id': 'resource' } s = util.make_sample_from_host(self.host_url, name='test', sample_type=sample.TYPE_GAUGE, unit='B', volume=1, extra=extra) self.assertIsNone(s.user_id) self.assertEqual('project', s.project_id) self.assertEqual('resource', s.resource_id) self.assertEqual({'resource_url': 'snmp://127.0.0.1:161', 'project_id': 'project', 'resource_id': 'resource'}, s.resource_metadata) ceilometer-6.0.0/ceilometer/tests/unit/hardware/pollsters/__init__.py0000664000567000056710000000000012701406223027177 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/hardware/pollsters/test_generic.py0000664000567000056710000001626412701406224030137 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six import yaml from oslo_config import fixture as fixture_config from oslo_utils import fileutils from oslotest import mockpatch from ceilometer import declarative from ceilometer.hardware.inspector import base as inspector_base from ceilometer.hardware.pollsters import generic from ceilometer import sample from ceilometer.tests import base as test_base class TestMeterDefinition(test_base.BaseTestCase): def test_config_definition(self): cfg = dict(name='test', type='gauge', unit='B', snmp_inspector={}) definition = generic.MeterDefinition(cfg) self.assertEqual('test', definition.name) self.assertEqual('gauge', definition.type) self.assertEqual('B', definition.unit) self.assertEqual({}, definition.snmp_inspector) def test_config_missing_field(self): cfg = dict(name='test', type='gauge') try: generic.MeterDefinition(cfg) except generic.MeterDefinitionException as e: self.assertEqual("Missing field unit", e.message) def test_config_invalid_field(self): cfg = dict(name='test', type='gauge', unit='B', invalid={}) definition = generic.MeterDefinition(cfg) self.assertEqual("foobar", getattr(definition, 'invalid', 'foobar')) def test_config_invalid_type_field(self): cfg = dict(name='test', type='invalid', unit='B', snmp_inspector={}) try: generic.MeterDefinition(cfg) except generic.MeterDefinitionException as e: self.assertEqual("Unrecognized type value invalid", e.message) @mock.patch('ceilometer.hardware.pollsters.generic.LOG') def test_bad_metric_skip(self, LOG): cfg = {'metric': [dict(name='test1', type='gauge', unit='B', snmp_inspector={}), dict(name='test_bad', type='invalid', unit='B', snmp_inspector={}), dict(name='test2', type='gauge', unit='B', snmp_inspector={})]} data = generic.load_definition(cfg) self.assertEqual(2, len(data)) LOG.error.assert_called_with( "Error loading meter definition : " "Unrecognized type value invalid") class FakeInspector(inspector_base.Inspector): net_metadata = dict(name='test.teest', mac='001122334455', ip='10.0.0.2', speed=1000) DATA = { 'test': (0.99, {}, {}), 'test2': (90, net_metadata, {}), } def inspect_generic(self, host, cache, extra_metadata=None, param=None): yield self.DATA[host.hostname] class TestGenericPollsters(test_base.BaseTestCase): @staticmethod def faux_get_inspector(url, namespace=None): return FakeInspector() def setUp(self): super(TestGenericPollsters, self).setUp() self.conf = self.useFixture(fixture_config.Config()).conf self.resources = ["snmp://test", "snmp://test2"] self.useFixture(mockpatch.Patch( 'ceilometer.hardware.inspector.get_inspector', self.faux_get_inspector)) self.conf(args=[]) self.pollster = generic.GenericHardwareDeclarativePollster() def __setup_meter_def_file(self, cfg): if six.PY3: cfg = cfg.encode('utf-8') meter_cfg_file = fileutils.write_to_tempfile(content=cfg, prefix="snmp", suffix="yaml") self.conf.set_override( 'meter_definitions_file', meter_cfg_file, group='hardware') cfg = declarative.load_definitions( {}, self.conf.hardware.meter_definitions_file) return cfg def _check_get_samples(self, name, definition, expected_value, expected_type, expected_unit=None): self.pollster._update_meter_definition(definition) cache = {} samples = list(self.pollster.get_samples(None, cache, self.resources)) self.assertTrue(samples) self.assertIn(self.pollster.CACHE_KEY, cache) for resource in self.resources: self.assertIn(resource, cache[self.pollster.CACHE_KEY]) self.assertEqual(set([name]), set([s.name for s in samples])) match = [s for s in samples if s.name == name] self.assertEqual(expected_value, match[0].volume) self.assertEqual(expected_type, match[0].type) if expected_unit: self.assertEqual(expected_unit, match[0].unit) def test_get_samples(self): param = dict(matching_type='type_exact', oid='1.3.6.1.4.1.2021.10.1.3.1', type='lambda x: float(str(x))') meter_def = generic.MeterDefinition(dict(type='gauge', name='hardware.test1', unit='process', snmp_inspector=param)) self._check_get_samples('hardware.test1', meter_def, 0.99, sample.TYPE_GAUGE, expected_unit='process') def test_get_pollsters_extensions(self): param = dict(matching_type='type_exact', oid='1.3.6.1.4.1.2021.10.1.3.1', type='lambda x: float(str(x))') meter_cfg = yaml.dump( {'metric': [dict(type='gauge', name='hardware.test1', unit='process', snmp_inspector=param), dict(type='gauge', name='hardware.test2.abc', unit='process', snmp_inspector=param)]}) self.__setup_meter_def_file(meter_cfg) pollster = generic.GenericHardwareDeclarativePollster # Clear cached mapping pollster.mapping = None exts = pollster.get_pollsters_extensions() self.assertEqual(2, len(exts)) self.assertIn(exts[0].name, ['hardware.test1', 'hardware.test2.abc']) self.assertIn(exts[1].name, ['hardware.test1', 'hardware.test2.abc']) ceilometer-6.0.0/ceilometer/tests/unit/hardware/__init__.py0000664000567000056710000000000012701406223025150 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/hardware/inspector/0000775000567000056710000000000012701406364025065 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/hardware/inspector/__init__.py0000664000567000056710000000000012701406223027156 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/hardware/inspector/test_inspector.py0000664000567000056710000000217512701406223030503 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Intel Corp # # Authors: Lianhao Lu # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import netutils from ceilometer.hardware import inspector from ceilometer.tests import base class TestHardwareInspector(base.BaseTestCase): def test_get_inspector(self): url = netutils.urlsplit("snmp://") driver = inspector.get_inspector(url) self.assertTrue(driver) def test_get_inspector_illegal(self): url = netutils.urlsplit("illegal://") self.assertRaises(RuntimeError, inspector.get_inspector, url) ceilometer-6.0.0/ceilometer/tests/unit/hardware/inspector/test_snmp.py0000664000567000056710000001764212701406223027457 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp # # Authors: Lianhao Lu # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/hardware/inspector/snmp/inspector.py """ from oslo_utils import netutils from oslotest import mockpatch from ceilometer.hardware.inspector import snmp from ceilometer.tests import base as test_base ins = snmp.SNMPInspector class FakeObjectName(object): def __init__(self, name): self.name = name def __str__(self): return str(self.name) def faux_getCmd_new(authData, transportTarget, *oids, **kwargs): varBinds = [(FakeObjectName(oid), int(oid.split('.')[-1])) for oid in oids] return (None, None, 0, varBinds) def faux_bulkCmd_new(authData, transportTarget, nonRepeaters, maxRepetitions, *oids, **kwargs): varBindTable = [ [(FakeObjectName(oid + ".%d" % i), i) for i in range(1, 3)] for oid in oids ] return (None, None, 0, varBindTable) class TestSNMPInspector(test_base.BaseTestCase): mapping = { 'test_exact': { 'matching_type': snmp.EXACT, 'metric_oid': ('1.3.6.1.4.1.2021.10.1.3.1', int), 'metadata': { 'meta': ('1.3.6.1.4.1.2021.10.1.3.8', int) }, 'post_op': '_fake_post_op', }, 'test_prefix': { 'matching_type': snmp.PREFIX, 'metric_oid': ('1.3.6.1.4.1.2021.9.1.8', int), 'metadata': { 'meta': ('1.3.6.1.4.1.2021.9.1.3', int) }, 'post_op': None, }, } def setUp(self): super(TestSNMPInspector, self).setUp() self.inspector = snmp.SNMPInspector() self.host = netutils.urlsplit("snmp://localhost") self.useFixture(mockpatch.PatchObject( self.inspector._cmdGen, 'getCmd', new=faux_getCmd_new)) self.useFixture(mockpatch.PatchObject( self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) def test_snmp_error(self): def get_list(func, *args, **kwargs): return list(func(*args, **kwargs)) def faux_parse(ret, is_bulk): return (True, 'forced error') self.useFixture(mockpatch.PatchObject( snmp, 'parse_snmp_return', new=faux_parse)) self.assertRaises(snmp.SNMPException, get_list, self.inspector.inspect_generic, host=self.host, cache={}, extra_metadata={}, param=self.mapping['test_exact']) @staticmethod def _fake_post_op(host, cache, meter_def, value, metadata, extra, suffix): metadata.update(post_op_meta=4) extra.update(project_id=2) return value def test_inspect_generic_exact(self): self.inspector._fake_post_op = self._fake_post_op cache = {} ret = list(self.inspector.inspect_generic(self.host, cache, {}, self.mapping['test_exact'])) keys = cache[ins._CACHE_KEY_OID].keys() self.assertIn('1.3.6.1.4.1.2021.10.1.3.1', keys) self.assertIn('1.3.6.1.4.1.2021.10.1.3.8', keys) self.assertEqual(1, len(ret)) self.assertEqual(1, ret[0][0]) self.assertEqual(8, ret[0][1]['meta']) self.assertEqual(4, ret[0][1]['post_op_meta']) self.assertEqual(2, ret[0][2]['project_id']) def test_inspect_generic_prefix(self): cache = {} ret = list(self.inspector.inspect_generic(self.host, cache, {}, self.mapping['test_prefix'])) keys = cache[ins._CACHE_KEY_OID].keys() self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.1', keys) self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.2', keys) self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.1', keys) self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.2', keys) self.assertEqual(2, len(ret)) self.assertIn(ret[0][0], (1, 2)) self.assertEqual(ret[0][0], ret[0][1]['meta']) def test_post_op_net(self): self.useFixture(mockpatch.PatchObject( self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) cache = {} metadata = dict(name='lo', speed=0, mac='ba21e43302fe') extra = {} ret = self.inspector._post_op_net(self.host, cache, None, value=8, metadata=metadata, extra=extra, suffix=".2") self.assertEqual(8, ret) self.assertIn('ip', metadata) self.assertIn("2", metadata['ip']) self.assertIn('resource_id', extra) self.assertEqual("localhost.lo", extra['resource_id']) def test_post_op_disk(self): cache = {} metadata = dict(device='/dev/sda1', path='/') extra = {} ret = self.inspector._post_op_disk(self.host, cache, None, value=8, metadata=metadata, extra=extra, suffix=None) self.assertEqual(8, ret) self.assertIn('resource_id', extra) self.assertEqual("localhost./dev/sda1", extra['resource_id']) def test_prepare_params(self): param = {'post_op': '_post_op_disk', 'oid': '1.3.6.1.4.1.2021.9.1.6', 'type': 'int', 'matching_type': 'type_prefix', 'metadata': { 'device': {'oid': '1.3.6.1.4.1.2021.9.1.3', 'type': 'str'}, 'path': {'oid': '1.3.6.1.4.1.2021.9.1.2', 'type': "lambda x: str(x)"}}} processed = self.inspector.prepare_params(param) self.assertEqual('_post_op_disk', processed['post_op']) self.assertEqual('1.3.6.1.4.1.2021.9.1.6', processed['metric_oid'][0]) self.assertEqual(int, processed['metric_oid'][1]) self.assertEqual(snmp.PREFIX, processed['matching_type']) self.assertEqual(2, len(processed['metadata'].keys())) self.assertEqual('1.3.6.1.4.1.2021.9.1.2', processed['metadata']['path'][0]) self.assertEqual("4", processed['metadata']['path'][1](4)) def test_pysnmp_ver43(self): # Test pysnmp version >=4.3 compatibility of ObjectIdentifier from distutils.version import StrictVersion import pysnmp has43 = StrictVersion(pysnmp.__version__) >= StrictVersion('4.3.0') oid = '1.3.6.4.1.2021.11.57.0' if has43: from pysnmp.entity import engine from pysnmp.smi import rfc1902 from pysnmp.smi import view snmp_engine = engine.SnmpEngine() mvc = view.MibViewController(snmp_engine.getMibBuilder()) name = rfc1902.ObjectIdentity(oid) name.resolveWithMib(mvc) else: from pysnmp.proto import rfc1902 name = rfc1902.ObjectName(oid) self.assertEqual(oid, str(name)) ceilometer-6.0.0/ceilometer/tests/unit/storage/0000775000567000056710000000000012701406364022726 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/storage/__init__.py0000664000567000056710000000000012701406223025017 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/storage/sqlalchemy/0000775000567000056710000000000012701406364025070 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/storage/sqlalchemy/__init__.py0000664000567000056710000000000012701406223027161 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/storage/sqlalchemy/test_models.py0000664000567000056710000000731412701406223027763 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslotest import base import sqlalchemy from sqlalchemy.dialects.mysql import DECIMAL from sqlalchemy.types import NUMERIC from ceilometer.storage.sqlalchemy import models from ceilometer import utils class PreciseTimestampTest(base.BaseTestCase): @staticmethod def fake_dialect(name): def _type_descriptor_mock(desc): if type(desc) == DECIMAL: return NUMERIC(precision=desc.precision, scale=desc.scale) dialect = mock.MagicMock() dialect.name = name dialect.type_descriptor = _type_descriptor_mock return dialect def setUp(self): super(PreciseTimestampTest, self).setUp() self._mysql_dialect = self.fake_dialect('mysql') self._postgres_dialect = self.fake_dialect('postgres') self._type = models.PreciseTimestamp() self._date = datetime.datetime(2012, 7, 2, 10, 44) def test_load_dialect_impl_mysql(self): result = self._type.load_dialect_impl(self._mysql_dialect) self.assertEqual(NUMERIC, type(result)) self.assertEqual(20, result.precision) self.assertEqual(6, result.scale) self.assertTrue(result.asdecimal) def test_load_dialect_impl_postgres(self): result = self._type.load_dialect_impl(self._postgres_dialect) self.assertEqual(sqlalchemy.DateTime, type(result)) def test_process_bind_param_store_decimal_mysql(self): expected = utils.dt_to_decimal(self._date) result = self._type.process_bind_param(self._date, self._mysql_dialect) self.assertEqual(expected, result) def test_process_bind_param_store_datetime_postgres(self): result = self._type.process_bind_param(self._date, self._postgres_dialect) self.assertEqual(self._date, result) def test_process_bind_param_store_none_mysql(self): result = self._type.process_bind_param(None, self._mysql_dialect) self.assertIsNone(result) def test_process_bind_param_store_none_postgres(self): result = self._type.process_bind_param(None, self._postgres_dialect) self.assertIsNone(result) def test_process_result_value_datetime_mysql(self): dec_value = utils.dt_to_decimal(self._date) result = self._type.process_result_value(dec_value, self._mysql_dialect) self.assertEqual(self._date, result) def test_process_result_value_datetime_postgres(self): result = self._type.process_result_value(self._date, self._postgres_dialect) self.assertEqual(self._date, result) def test_process_result_value_none_mysql(self): result = self._type.process_result_value(None, self._mysql_dialect) self.assertIsNone(result) def test_process_result_value_none_postgres(self): result = self._type.process_result_value(None, self._postgres_dialect) self.assertIsNone(result) ceilometer-6.0.0/ceilometer/tests/unit/storage/test_models.py0000664000567000056710000000632112701406223025616 0ustar jenkinsjenkins00000000000000# # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslotest import base as testbase import six from ceilometer.event.storage import models as event_models from ceilometer.storage import base from ceilometer.storage import models class FakeModel(base.Model): def __init__(self, arg1, arg2): base.Model.__init__(self, arg1=arg1, arg2=arg2) class ModelTest(testbase.BaseTestCase): def test_create_attributes(self): m = FakeModel(1, 2) self.assertEqual(1, m.arg1) self.assertEqual(2, m.arg2) def test_as_dict(self): m = FakeModel(1, 2) d = m.as_dict() self.assertEqual({'arg1': 1, 'arg2': 2}, d) def test_as_dict_recursive(self): m = FakeModel(1, FakeModel('a', 'b')) d = m.as_dict() self.assertEqual({'arg1': 1, 'arg2': {'arg1': 'a', 'arg2': 'b'}}, d) def test_as_dict_recursive_list(self): m = FakeModel(1, [FakeModel('a', 'b')]) d = m.as_dict() self.assertEqual({'arg1': 1, 'arg2': [{'arg1': 'a', 'arg2': 'b'}]}, d) def test_event_repr_no_traits(self): x = event_models.Event("1", "name", "now", None, {}) self.assertEqual("", repr(x)) def test_get_field_names_of_sample(self): sample_fields = ["source", "counter_name", "counter_type", "counter_unit", "counter_volume", "user_id", "project_id", "resource_id", "timestamp", "resource_metadata", "message_id", "message_signature", "recorded_at"] self.assertEqual(set(sample_fields), set(models.Sample.get_field_names())) class TestTraitModel(testbase.BaseTestCase): def test_convert_value(self): v = event_models.Trait.convert_value( event_models.Trait.INT_TYPE, '10') self.assertEqual(10, v) self.assertIsInstance(v, int) v = event_models.Trait.convert_value( event_models.Trait.FLOAT_TYPE, '10') self.assertEqual(10.0, v) self.assertIsInstance(v, float) v = event_models.Trait.convert_value( event_models.Trait.DATETIME_TYPE, '2013-08-08 21:05:37.123456') self.assertEqual(datetime.datetime(2013, 8, 8, 21, 5, 37, 123456), v) self.assertIsInstance(v, datetime.datetime) v = event_models.Trait.convert_value( event_models.Trait.TEXT_TYPE, 10) self.assertEqual("10", v) self.assertIsInstance(v, six.text_type) ceilometer-6.0.0/ceilometer/tests/unit/storage/test_base.py0000664000567000056710000000422712701406223025250 0ustar jenkinsjenkins00000000000000# Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import math from oslotest import base as testbase from ceilometer.storage import base class BaseTest(testbase.BaseTestCase): def test_iter_period(self): times = list(base.iter_period( datetime.datetime(2013, 1, 1, 12, 0), datetime.datetime(2013, 1, 1, 13, 0), 60)) self.assertEqual(60, len(times)) self.assertEqual((datetime.datetime(2013, 1, 1, 12, 10), datetime.datetime(2013, 1, 1, 12, 11)), times[10]) self.assertEqual((datetime.datetime(2013, 1, 1, 12, 21), datetime.datetime(2013, 1, 1, 12, 22)), times[21]) def test_iter_period_bis(self): times = list(base.iter_period( datetime.datetime(2013, 1, 2, 13, 0), datetime.datetime(2013, 1, 2, 14, 0), 55)) self.assertEqual(math.ceil(3600 / 55.0), len(times)) self.assertEqual((datetime.datetime(2013, 1, 2, 13, 9, 10), datetime.datetime(2013, 1, 2, 13, 10, 5)), times[10]) self.assertEqual((datetime.datetime(2013, 1, 2, 13, 19, 15), datetime.datetime(2013, 1, 2, 13, 20, 10)), times[21]) def test_handle_sort_key(self): sort_keys_meter = base._handle_sort_key('meter', 'foo') self.assertEqual(['foo', 'user_id', 'project_id'], sort_keys_meter) sort_keys_resource = base._handle_sort_key('resource', 'project_id') self.assertEqual(['project_id', 'user_id', 'timestamp'], sort_keys_resource) ceilometer-6.0.0/ceilometer/tests/unit/storage/test_get_connection.py0000664000567000056710000001167512701406223027341 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/ """ import unittest import mock from oslo_config import fixture as fixture_config from oslotest import base import retrying try: from ceilometer.event.storage import impl_hbase as impl_hbase_event except ImportError: impl_hbase_event = None from ceilometer import storage from ceilometer.storage import impl_log from ceilometer.storage import impl_sqlalchemy import six class EngineTest(base.BaseTestCase): def test_get_connection(self): engine = storage.get_connection('log://localhost', 'ceilometer.metering.storage') self.assertIsInstance(engine, impl_log.Connection) def test_get_connection_no_such_engine(self): try: storage.get_connection('no-such-engine://localhost', 'ceilometer.metering.storage') except RuntimeError as err: self.assertIn('no-such-engine', six.text_type(err)) class ConnectionRetryTest(base.BaseTestCase): def setUp(self): super(ConnectionRetryTest, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_retries(self): with mock.patch.object( retrying.Retrying, 'should_reject') as retry_reject: try: self.CONF.set_override("connection", "no-such-engine://", group="database") self.CONF.set_override("retry_interval", 0.00001, group="database") storage.get_connection_from_config(self.CONF) except RuntimeError as err: self.assertIn('no-such-engine', six.text_type(err)) self.assertEqual(10, retry_reject.call_count) class ConnectionConfigTest(base.BaseTestCase): def setUp(self): super(ConnectionConfigTest, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_only_default_url(self): self.CONF.set_override("connection", "log://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) conn = storage.get_connection_from_config(self.CONF, 'metering') self.assertIsInstance(conn, impl_log.Connection) def test_two_urls(self): self.CONF.set_override("connection", "log://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) conn = storage.get_connection_from_config(self.CONF, 'metering') self.assertIsInstance(conn, impl_log.Connection) @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') def test_three_urls(self): self.CONF.set_override("connection", "log://", group="database") self.CONF.set_override("event_connection", "hbase://__test__", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) conn = storage.get_connection_from_config(self.CONF, 'metering') self.assertIsInstance(conn, impl_log.Connection) conn = storage.get_connection_from_config(self.CONF, 'event') self.assertIsInstance(conn, impl_hbase_event.Connection) @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') def test_three_urls_no_default(self): self.CONF.set_override("connection", None, group="database") self.CONF.set_override("metering_connection", "log://", group="database") self.CONF.set_override("event_connection", "hbase://__test__", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) conn = storage.get_connection_from_config(self.CONF, 'event') self.assertIsInstance(conn, impl_hbase_event.Connection) def test_sqlalchemy_driver(self): self.CONF.set_override("connection", "sqlite+pysqlite://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_sqlalchemy.Connection) conn = storage.get_connection_from_config(self.CONF, 'metering') self.assertIsInstance(conn, impl_sqlalchemy.Connection) ceilometer-6.0.0/ceilometer/tests/unit/test_sample.py0000664000567000056710000000523612701406223024154 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/sample.py""" import datetime from ceilometer import sample from ceilometer.tests import base class TestSample(base.BaseTestCase): SAMPLE = sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, unit='ns', volume='1234567', user_id='56c5692032f34041900342503fecab30', project_id='ac9494df2d9d4e709bac378cceabaf23', resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', timestamp=datetime.datetime(2014, 10, 29, 14, 12, 15, 485877), resource_metadata={} ) def test_sample_string_format(self): expected = ('') self.assertEqual(expected, str(self.SAMPLE)) def test_sample_from_notifications_list(self): msg = { 'event_type': u'sample.create', 'timestamp': u'2015-06-1909: 19: 35.786893', 'payload': [{u'counter_name': u'instance100'}], 'priority': 'info', 'publisher_id': u'ceilometer.api', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' } s = sample.Sample.from_notification( 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) expected = {'event_type': msg['event_type'], 'host': msg['publisher_id']} self.assertEqual(expected, s.resource_metadata) def test_sample_from_notifications_dict(self): msg = { 'event_type': u'sample.create', 'timestamp': u'2015-06-1909: 19: 35.786893', 'payload': {u'counter_name': u'instance100'}, 'priority': 'info', 'publisher_id': u'ceilometer.api', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' } s = sample.Sample.from_notification( 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) msg['payload']['event_type'] = msg['event_type'] msg['payload']['host'] = msg['publisher_id'] self.assertEqual(msg['payload'], s.resource_metadata) ceilometer-6.0.0/ceilometer/tests/unit/api/0000775000567000056710000000000012701406364022033 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/api/__init__.py0000664000567000056710000000000012701406223024124 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/api/test_versions.py0000664000567000056710000000310112701406223025301 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from ceilometer.tests.functional import api V2_MEDIA_TYPES = [ { 'base': 'application/json', 'type': 'application/vnd.openstack.telemetry-v2+json' }, { 'base': 'application/xml', 'type': 'application/vnd.openstack.telemetry-v2+xml' } ] V2_HTML_DESCRIPTION = { 'href': 'http://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html', } V2_EXPECTED_RESPONSE = { 'id': 'v2', 'links': [ { 'rel': 'self', 'href': 'http://localhost/v2', }, V2_HTML_DESCRIPTION ], 'media-types': V2_MEDIA_TYPES, 'status': 'stable', 'updated': '2013-02-13T00:00:00Z', } V2_VERSION_RESPONSE = { "version": V2_EXPECTED_RESPONSE } VERSIONS_RESPONSE = { "versions": { "values": [ V2_EXPECTED_RESPONSE ] } } class TestVersions(api.FunctionalTest): def test_versions(self): data = self.get_json('/') self.assertEqual(VERSIONS_RESPONSE, data) ceilometer-6.0.0/ceilometer/tests/unit/api/test_hooks.py0000664000567000056710000000250112701406223024557 0ustar jenkinsjenkins00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as fixture_config import oslo_messaging from ceilometer.api import hooks from ceilometer.tests import base class TestTestNotifierHook(base.BaseTestCase): def setUp(self): super(TestTestNotifierHook, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf def test_init_notifier_with_drivers(self): self.CONF.set_override('telemetry_driver', 'messagingv2', group='publisher_notifier') hook = hooks.NotifierHook() notifier = hook.notifier self.assertIsInstance(notifier, oslo_messaging.Notifier) self.assertEqual(['messagingv2'], notifier._driver_names) ceilometer-6.0.0/ceilometer/tests/unit/api/test_app.py0000664000567000056710000000455312701406223024225 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_config import fixture as fixture_config from oslo_log import log from ceilometer.api import app from ceilometer.tests import base class TestApp(base.BaseTestCase): def setUp(self): super(TestApp, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf log.register_options(cfg.CONF) def test_api_paste_file_not_exist(self): self.CONF.set_override('api_paste_config', 'non-existent-file') with mock.patch.object(self.CONF, 'find_file') as ff: ff.return_value = None self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app) @mock.patch('ceilometer.storage.get_connection_from_config', mock.MagicMock()) @mock.patch('pecan.make_app') def test_pecan_debug(self, mocked): def _check_pecan_debug(g_debug, p_debug, expected, workers=1): self.CONF.set_override('debug', g_debug) if p_debug is not None: self.CONF.set_override('pecan_debug', p_debug, group='api') self.CONF.set_override('workers', workers, group='api') app.setup_app() args, kwargs = mocked.call_args self.assertEqual(expected, kwargs.get('debug')) _check_pecan_debug(g_debug=False, p_debug=None, expected=False) _check_pecan_debug(g_debug=True, p_debug=None, expected=False) _check_pecan_debug(g_debug=True, p_debug=False, expected=False) _check_pecan_debug(g_debug=False, p_debug=True, expected=True) _check_pecan_debug(g_debug=True, p_debug=None, expected=False, workers=5) _check_pecan_debug(g_debug=False, p_debug=True, expected=False, workers=5) ceilometer-6.0.0/ceilometer/tests/unit/api/v2/0000775000567000056710000000000012701406364022362 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/api/v2/test_statistics.py0000664000567000056710000001022012701406223026152 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test statistics objects.""" import datetime from oslotest import base from ceilometer.api.controllers.v2 import meters class TestStatisticsDuration(base.BaseTestCase): def setUp(self): super(TestStatisticsDuration, self).setUp() # Create events relative to the range and pretend # that the intervening events exist. self.early1 = datetime.datetime(2012, 8, 27, 7, 0) self.early2 = datetime.datetime(2012, 8, 27, 17, 0) self.start = datetime.datetime(2012, 8, 28, 0, 0) self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) self.end = datetime.datetime(2012, 8, 28, 23, 59) self.late1 = datetime.datetime(2012, 8, 29, 9, 0) self.late2 = datetime.datetime(2012, 8, 29, 19, 0) def test_nulls(self): s = meters.Statistics(duration_start=None, duration_end=None, start_timestamp=None, end_timestamp=None) self.assertIsNone(s.duration_start) self.assertIsNone(s.duration_end) self.assertIsNone(s.duration) def test_overlap_range_start(self): s = meters.Statistics(duration_start=self.early1, duration_end=self.middle1, start_timestamp=self.start, end_timestamp=self.end) self.assertEqual(self.start, s.duration_start) self.assertEqual(self.middle1, s.duration_end) self.assertEqual(8 * 60 * 60, s.duration) def test_within_range(self): s = meters.Statistics(duration_start=self.middle1, duration_end=self.middle2, start_timestamp=self.start, end_timestamp=self.end) self.assertEqual(self.middle1, s.duration_start) self.assertEqual(self.middle2, s.duration_end) self.assertEqual(10 * 60 * 60, s.duration) def test_within_range_zero_duration(self): s = meters.Statistics(duration_start=self.middle1, duration_end=self.middle1, start_timestamp=self.start, end_timestamp=self.end) self.assertEqual(self.middle1, s.duration_start) self.assertEqual(self.middle1, s.duration_end) self.assertEqual(0, s.duration) def test_overlap_range_end(self): s = meters.Statistics(duration_start=self.middle2, duration_end=self.late1, start_timestamp=self.start, end_timestamp=self.end) self.assertEqual(self.middle2, s.duration_start) self.assertEqual(self.end, s.duration_end) self.assertEqual(((6 * 60) - 1) * 60, s.duration) def test_after_range(self): s = meters.Statistics(duration_start=self.late1, duration_end=self.late2, start_timestamp=self.start, end_timestamp=self.end) self.assertIsNone(s.duration_start) self.assertIsNone(s.duration_end) self.assertIsNone(s.duration) def test_without_timestamp(self): s = meters.Statistics(duration_start=self.late1, duration_end=self.late2, start_timestamp=None, end_timestamp=None) self.assertEqual(self.late1, s.duration_start) self.assertEqual(self.late2, s.duration_end) ceilometer-6.0.0/ceilometer/tests/unit/api/v2/__init__.py0000664000567000056710000000000012701406223024453 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/api/v2/test_query.py0000664000567000056710000004155512701406223025144 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test the methods related to query.""" import datetime import fixtures import mock from oslo_utils import timeutils from oslotest import base from oslotest import mockpatch import wsme from ceilometer.api.controllers.v2 import base as v2_base from ceilometer.api.controllers.v2 import events from ceilometer.api.controllers.v2 import meters from ceilometer.api.controllers.v2 import utils from ceilometer import storage from ceilometer.storage import base as storage_base from ceilometer.tests import base as tests_base class TestQuery(base.BaseTestCase): def setUp(self): super(TestQuery, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'pecan.response', mock.MagicMock())) self.useFixture(mockpatch.Patch('ceilometer.api.controllers.v2.events' '._build_rbac_query_filters', return_value={'t_filter': [], 'admin_proj': None})) def test_get_value_as_type_with_integer(self): query = v2_base.Query(field='metadata.size', op='eq', value='123', type='integer') expected = 123 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_float(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456', type='float') expected = 123.456 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_boolean(self): query = v2_base.Query(field='metadata.is_public', op='eq', value='True', type='boolean') expected = True self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_string(self): query = v2_base.Query(field='metadata.name', op='eq', value='linux', type='string') expected = 'linux' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_datetime(self): query = v2_base.Query(field='metadata.date', op='eq', value='2014-01-01T05:00:00', type='datetime') self.assertIsInstance(query._get_value_as_type(), datetime.datetime) self.assertIsNone(query._get_value_as_type().tzinfo) def test_get_value_as_type_with_integer_without_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123') expected = 123 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_float_without_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456') expected = 123.456 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_boolean_without_type(self): query = v2_base.Query(field='metadata.is_public', op='eq', value='True') expected = True self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_string_without_type(self): query = v2_base.Query(field='metadata.name', op='eq', value='linux') expected = 'linux' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_bad_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456', type='blob') self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) def test_get_value_as_type_with_bad_value(self): query = v2_base.Query(field='metadata.size', op='eq', value='fake', type='integer') self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) def test_get_value_as_type_integer_expression_without_type(self): # bug 1221736 query = v2_base.Query(field='should_be_a_string', op='eq', value='WWW-Layer-4a80714f') expected = 'WWW-Layer-4a80714f' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_boolean_expression_without_type(self): # bug 1221736 query = v2_base.Query(field='should_be_a_string', op='eq', value='True or False') expected = 'True or False' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_syntax_error(self): # bug 1221736 value = 'WWW-Layer-4a80714f-0232-4580-aa5e-81494d1a4147-uolhh25p5xxm' query = v2_base.Query(field='group_id', op='eq', value=value) expected = value self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_syntax_error_colons(self): # bug 1221736 value = 'Ref::StackId' query = v2_base.Query(field='field_name', op='eq', value=value) expected = value self.assertEqual(expected, query._get_value_as_type()) def test_event_query_to_event_filter_with_bad_op(self): # bug 1511592 query = v2_base.Query(field='event_type', op='ne', value='compute.instance.create.end', type='string') self.assertRaises(v2_base.ClientSideError, events._event_query_to_event_filter, [query]) class TestValidateGroupByFields(base.BaseTestCase): def test_valid_field(self): result = meters._validate_groupby_fields(['user_id']) self.assertEqual(['user_id'], result) def test_valid_fields_multiple(self): result = set(meters._validate_groupby_fields( ['user_id', 'project_id', 'source'])) self.assertEqual(set(['user_id', 'project_id', 'source']), result) def test_invalid_field(self): self.assertRaises(wsme.exc.UnknownArgument, meters._validate_groupby_fields, ['wtf']) def test_invalid_field_multiple(self): self.assertRaises(wsme.exc.UnknownArgument, meters._validate_groupby_fields, ['user_id', 'wtf', 'project_id', 'source']) def test_duplicate_fields(self): result = set( meters._validate_groupby_fields(['user_id', 'source', 'user_id']) ) self.assertEqual(set(['user_id', 'source']), result) class TestQueryToKwArgs(tests_base.BaseTestCase): def setUp(self): super(TestQueryToKwArgs, self).setUp() self.useFixture(mockpatch.PatchObject( utils, 'sanitize_query', side_effect=lambda x, y, **z: x)) self.useFixture(mockpatch.PatchObject( utils, '_verify_query_segregation', side_effect=lambda x, **z: x)) def test_sample_filter_single(self): q = [v2_base.Query(field='user_id', op='eq', value='uid')] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertIn('user', kwargs) self.assertEqual(1, len(kwargs)) self.assertEqual('uid', kwargs['user']) def test_sample_filter_multi(self): q = [v2_base.Query(field='user_id', op='eq', value='uid'), v2_base.Query(field='project_id', op='eq', value='pid'), v2_base.Query(field='resource_id', op='eq', value='rid'), v2_base.Query(field='source', op='eq', value='source_name'), v2_base.Query(field='meter', op='eq', value='meter_name')] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertEqual(5, len(kwargs)) self.assertEqual('uid', kwargs['user']) self.assertEqual('pid', kwargs['project']) self.assertEqual('rid', kwargs['resource']) self.assertEqual('source_name', kwargs['source']) self.assertEqual('meter_name', kwargs['meter']) def test_sample_filter_timestamp(self): ts_start = timeutils.utcnow() ts_end = ts_start + datetime.timedelta(minutes=5) q = [v2_base.Query(field='timestamp', op='lt', value=str(ts_end)), v2_base.Query(field='timestamp', op='gt', value=str(ts_start))] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertEqual(4, len(kwargs)) self.assertTimestampEqual(kwargs['start_timestamp'], ts_start) self.assertTimestampEqual(kwargs['end_timestamp'], ts_end) self.assertEqual('gt', kwargs['start_timestamp_op']) self.assertEqual('lt', kwargs['end_timestamp_op']) def test_sample_filter_meta(self): q = [v2_base.Query(field='metadata.size', op='eq', value='20'), v2_base.Query(field='resource_metadata.id', op='eq', value='meta_id')] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertEqual(1, len(kwargs)) self.assertEqual(2, len(kwargs['metaquery'])) self.assertEqual(20, kwargs['metaquery']['metadata.size']) self.assertEqual('meta_id', kwargs['metaquery']['metadata.id']) def test_sample_filter_non_equality_on_metadata(self): queries = [v2_base.Query(field='resource_metadata.image_id', op='gt', value='image', type='string'), v2_base.Query(field='metadata.ramdisk_id', op='le', value='ramdisk', type='string')] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, queries, storage.SampleFilter.__init__) def test_sample_filter_invalid_field(self): q = [v2_base.Query(field='invalid', op='eq', value='20')] self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_invalid_op(self): q = [v2_base.Query(field='user_id', op='lt', value='20')] self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_timestamp_invalid_op(self): ts_start = timeutils.utcnow() q = [v2_base.Query(field='timestamp', op='eq', value=str(ts_start))] self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_exclude_internal(self): queries = [v2_base.Query(field=f, op='eq', value='fake', type='string') for f in ['y', 'on_behalf_of', 'x']] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} self.assertRaises(wsme.exc.ClientSideError, utils.query_to_kwargs, queries, storage.SampleFilter.__init__, internal_keys=['on_behalf_of']) def test_sample_filter_self_always_excluded(self): queries = [v2_base.Query(field='user_id', op='eq', value='20')] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} kwargs = utils.query_to_kwargs(queries, storage.SampleFilter.__init__) self.assertNotIn('self', kwargs) def test_sample_filter_translation(self): queries = [v2_base.Query(field=f, op='eq', value='fake_%s' % f, type='string') for f in ['user_id', 'project_id', 'resource_id']] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} kwargs = utils.query_to_kwargs(queries, storage.SampleFilter.__init__) for o in ['user', 'project', 'resource']: self.assertEqual('fake_%s_id' % o, kwargs.get(o)) def test_timestamp_validation(self): q = [v2_base.Query(field='timestamp', op='le', value='123')] exc = self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) expected_exc = wsme.exc.InvalidInput('timestamp', '123', 'invalid timestamp format') self.assertEqual(str(expected_exc), str(exc)) def test_sample_filter_valid_fields(self): q = [v2_base.Query(field='abc', op='eq', value='abc')] exc = self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, storage.SampleFilter.__init__) valid_keys = ['message_id', 'meter', 'project', 'resource', 'search_offset', 'source', 'timestamp', 'user'] msg = ("unrecognized field in query: %s, " "valid keys: %s") % (q, valid_keys) expected_exc = wsme.exc.UnknownArgument('abc', msg) self.assertEqual(str(expected_exc), str(exc)) def test_get_meters_filter_valid_fields(self): q = [v2_base.Query(field='abc', op='eq', value='abc')] exc = self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, storage_base.Connection.get_meters, ['limit', 'unique']) valid_keys = ['project', 'resource', 'source', 'user'] msg = ("unrecognized field in query: %s, " "valid keys: %s") % (q, valid_keys) expected_exc = wsme.exc.UnknownArgument('abc', msg) self.assertEqual(str(expected_exc), str(exc)) def test_get_resources_filter_valid_fields(self): q = [v2_base.Query(field='abc', op='eq', value='abc')] exc = self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, storage_base.Connection.get_resources, ['limit']) valid_keys = ['project', 'resource', 'search_offset', 'source', 'timestamp', 'user'] msg = ("unrecognized field in query: %s, " "valid keys: %s") % (q, valid_keys) expected_exc = wsme.exc.UnknownArgument('abc', msg) self.assertEqual(str(expected_exc), str(exc)) ceilometer-6.0.0/ceilometer/tests/unit/api/v2/test_wsme_custom_type.py0000664000567000056710000000214412701406223027374 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base import wsme from ceilometer.api.controllers.v2 import base as v2_base class TestWsmeCustomType(base.BaseTestCase): def test_advenum_default(self): class dummybase(wsme.types.Base): ae = v2_base.AdvEnum("name", str, "one", "other", default="other") obj = dummybase() self.assertEqual("other", obj.ae) obj = dummybase(ae="one") self.assertEqual("one", obj.ae) self.assertRaises(wsme.exc.InvalidInput, dummybase, ae="not exists") ceilometer-6.0.0/ceilometer/tests/unit/api/v2/test_complex_query.py0000664000567000056710000003573312701406223026674 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Authors: Ildiko Vancsa # Balazs Gibizer # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test the methods related to complex query.""" import datetime import fixtures import jsonschema import mock from oslotest import base import wsme from ceilometer.api.controllers.v2 import query from ceilometer.storage import models class FakeComplexQuery(query.ValidatedComplexQuery): def __init__(self, db_model, additional_name_mapping=None, metadata=False): super(FakeComplexQuery, self).__init__(query=None, db_model=db_model, additional_name_mapping=( additional_name_mapping or {}), metadata_allowed=metadata) sample_name_mapping = {"resource": "resource_id", "meter": "counter_name", "type": "counter_type", "unit": "counter_unit", "volume": "counter_volume"} class TestComplexQuery(base.BaseTestCase): def setUp(self): super(TestComplexQuery, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'pecan.response', mock.MagicMock())) self.query = FakeComplexQuery(models.Sample, sample_name_mapping, True) def test_replace_isotime_utc(self): filter_expr = {"=": {"timestamp": "2013-12-05T19:38:29Z"}} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["="]["timestamp"]) def test_replace_isotime_timezone_removed(self): filter_expr = {"=": {"timestamp": "2013-12-05T20:38:29+01:00"}} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 20, 38, 29), filter_expr["="]["timestamp"]) def test_replace_isotime_wrong_syntax(self): filter_expr = {"=": {"timestamp": "not a valid isotime string"}} self.assertRaises(wsme.exc.ClientSideError, self.query._replace_isotime_with_datetime, filter_expr) def test_replace_isotime_in_complex_filter(self): filter_expr = {"and": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["and"][0]["="]["timestamp"]) self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), filter_expr["and"][1]["="]["timestamp"]) def test_replace_isotime_in_complex_filter_with_unbalanced_tree(self): subfilter = {"and": [{"=": {"project_id": 42}}, {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} filter_expr = {"or": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, subfilter]} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["or"][0]["="]["timestamp"]) self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), filter_expr["or"][1]["and"][1]["="]["timestamp"]) def test_convert_operator_to_lower_case(self): filter_expr = {"AND": [{"=": {"project_id": 42}}, {"=": {"project_id": 44}}]} self.query._convert_operator_to_lower_case(filter_expr) self.assertEqual("and", list(filter_expr.keys())[0]) filter_expr = {"Or": [{"=": {"project_id": 43}}, {"anD": [{"=": {"project_id": 44}}, {"=": {"project_id": 42}}]}]} self.query._convert_operator_to_lower_case(filter_expr) self.assertEqual("or", list(filter_expr.keys())[0]) self.assertEqual("and", list(filter_expr["or"][1].keys())[0]) def test_invalid_filter_misstyped_field_name_samples(self): filter = {"=": {"project_id11": 42}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_invalid_complex_filter_wrong_field_names(self): filter = {"and": [{"=": {"non_existing_field": 42}}, {"=": {"project_id": 42}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"or": [{"=": {"non_existing_field": 42}}, {"and": [{"=": {"project_id": 44}}, {"=": {"project_id": 42}}]}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_convert_orderby(self): orderby = [] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([], orderby) orderby = [{"project_id": "DESC"}] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([{"project_id": "desc"}], orderby) orderby = [{"project_id": "ASC"}, {"resource_id": "DESC"}] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([{"project_id": "asc"}, {"resource_id": "desc"}], orderby) def test_validate_orderby_empty_direction(self): orderby = [{"project_id": ""}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) orderby = [{"project_id": "asc"}, {"resource_id": ""}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_order_string(self): orderby = [{"project_id": "not a valid order"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_multiple_item_order_string(self): orderby = [{"project_id": "not a valid order"}, {"resource_id": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_empty_field_name(self): orderby = [{"": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) orderby = [{"project_id": "asc"}, {"": "desc"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_field_name(self): orderby = [{"project_id11": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_field_name_multiple_item_orderby(self): orderby = [{"project_id": "asc"}, {"resource_id11": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_metadata_is_not_allowed(self): orderby = [{"metadata.display_name": "asc"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) class TestFilterSyntaxValidation(base.BaseTestCase): def setUp(self): super(TestFilterSyntaxValidation, self).setUp() self.query = FakeComplexQuery(models.Sample, sample_name_mapping, True) def test_simple_operator(self): filter = {"=": {"project_id": "string_value"}} self.query._validate_filter(filter) filter = {"=>": {"project_id": "string_value"}} self.query._validate_filter(filter) def test_valid_value_types(self): filter = {"=": {"project_id": "string_value"}} self.query._validate_filter(filter) filter = {"=": {"project_id": 42}} self.query._validate_filter(filter) filter = {"=": {"project_id": 3.14}} self.query._validate_filter(filter) filter = {"=": {"project_id": True}} self.query._validate_filter(filter) filter = {"=": {"project_id": False}} self.query._validate_filter(filter) def test_invalid_simple_operator(self): filter = {"==": {"project_id": "string_value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"": {"project_id": "string_value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_more_than_one_operator_is_invalid(self): filter = {"=": {"project_id": "string_value"}, "<": {"": ""}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_empty_expression_is_invalid(self): filter = {} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_invalid_field_name(self): filter = {"=": {"": "value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"=": {" ": "value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"=": {"\t": "value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_more_than_one_field_is_invalid(self): filter = {"=": {"project_id": "value", "resource_id": "value"}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_missing_field_after_simple_op_is_invalid(self): filter = {"=": {}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_and_or(self): filter = {"and": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}]} self.query._validate_filter(filter) filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}]}, {"=": {"counter_name": "value"}}]} self.query._validate_filter(filter) filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}, {"<": {"counter_name": 42}}]}, {"=": {"counter_name": "value"}}]} self.query._validate_filter(filter) def test_complex_operator_with_in(self): filter = {"and": [{"<": {"counter_volume": 42}}, {">=": {"counter_volume": 36}}, {"in": {"project_id": ["project_id1", "project_id2", "project_id3"]}}]} self.query._validate_filter(filter) def test_invalid_complex_operator(self): filter = {"xor": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_and_or_with_one_child_is_invalid(self): filter = {"or": [{"=": {"project_id": "string_value"}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_complex_operator_with_zero_child_is_invalid(self): filter = {"or": []} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_more_than_one_complex_operator_is_invalid(self): filter = {"and": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}], "or": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_not(self): filter = {"not": {"=": {"project_id": "value"}}} self.query._validate_filter(filter) filter = { "not": {"or": [{"and": [{"=": {"project_id": "string_value"}}, {"=": {"resource_id": "value"}}, {"<": {"counter_name": 42}}]}, {"=": {"counter_name": "value"}}]}} self.query._validate_filter(filter) def test_not_with_zero_child_is_invalid(self): filter = {"not": {}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_not_with_more_than_one_child_is_invalid(self): filter = {"not": {"=": {"project_id": "value"}, "!=": {"resource_id": "value"}}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_empty_in_query_not_passing(self): filter = {"in": {"resource_id": []}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) ceilometer-6.0.0/ceilometer/tests/unit/test_novaclient.py0000664000567000056710000002263212701406223025034 0ustar jenkinsjenkins00000000000000# Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import novaclient from oslo_config import fixture as fixture_config from oslotest import base from oslotest import mockpatch from ceilometer import nova_client class TestNovaClient(base.BaseTestCase): def setUp(self): super(TestNovaClient, self).setUp() self._flavors_count = 0 self._images_count = 0 self.nv = nova_client.Client() self.useFixture(mockpatch.PatchObject( self.nv.nova_client.flavors, 'get', side_effect=self.fake_flavors_get)) self.useFixture(mockpatch.PatchObject( self.nv.nova_client.images, 'get', side_effect=self.fake_images_get)) self.CONF = self.useFixture(fixture_config.Config()).conf def fake_flavors_get(self, *args, **kwargs): self._flavors_count += 1 a = mock.MagicMock() a.id = args[0] if a.id == 1: a.name = 'm1.tiny' elif a.id == 2: a.name = 'm1.large' else: raise novaclient.exceptions.NotFound('foobar') return a def fake_images_get(self, *args, **kwargs): self._images_count += 1 a = mock.MagicMock() a.id = args[0] image_details = { 1: ('ubuntu-12.04-x86', dict(kernel_id=11, ramdisk_id=21)), 2: ('centos-5.4-x64', dict(kernel_id=12, ramdisk_id=22)), 3: ('rhel-6-x64', None), 4: ('rhel-6-x64', dict()), 5: ('rhel-6-x64', dict(kernel_id=11)), 6: ('rhel-6-x64', dict(ramdisk_id=21)) } if a.id in image_details: a.name = image_details[a.id][0] a.metadata = image_details[a.id][1] else: raise novaclient.exceptions.NotFound('foobar') return a @staticmethod def fake_servers_list(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': 1} b = mock.MagicMock() b.id = 43 b.flavor = {'id': 2} b.image = {'id': 2} return [a, b] def test_instance_get_all_by_host(self): with mock.patch.object(self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(2, len(instances)) self.assertEqual('m1.tiny', instances[0].flavor['name']) self.assertEqual('ubuntu-12.04-x86', instances[0].image['name']) self.assertEqual(11, instances[0].kernel_id) self.assertEqual(21, instances[0].ramdisk_id) def test_instance_get_all(self): with mock.patch.object(self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list): instances = self.nv.instance_get_all() self.assertEqual(2, len(instances)) self.assertEqual(42, instances[0].id) self.assertEqual(1, instances[0].flavor['id']) self.assertEqual(1, instances[0].image['id']) @staticmethod def fake_servers_list_unknown_flavor(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 666} a.image = {'id': 1} return [a] def test_instance_get_all_by_host_unknown_flavor(self): with mock.patch.object( self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list_unknown_flavor): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(1, len(instances)) self.assertEqual('unknown-id-666', instances[0].flavor['name']) @staticmethod def fake_servers_list_unknown_image(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': 666} return [a] @staticmethod def fake_servers_list_image_missing_metadata(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': args[0]} return [a] @staticmethod def fake_instance_image_missing(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 666} a.image = None return [a] def test_instance_get_all_by_host_unknown_image(self): with mock.patch.object( self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list_unknown_image): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(1, len(instances)) self.assertEqual('unknown-id-666', instances[0].image['name']) def test_with_flavor_and_image(self): results = self.nv._with_flavor_and_image(self.fake_servers_list()) instance = results[0] self.assertEqual(2, len(results)) self.assertEqual('ubuntu-12.04-x86', instance.image['name']) self.assertEqual('m1.tiny', instance.flavor['name']) self.assertEqual(11, instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_unknown_image(self): instances = self.fake_servers_list_unknown_image() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual('unknown-id-666', instance.image['name']) self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_unknown_flavor(self): instances = self.fake_servers_list_unknown_flavor() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual('unknown-id-666', instance.flavor['name']) self.assertEqual(0, instance.flavor['vcpus']) self.assertEqual(0, instance.flavor['ram']) self.assertEqual(0, instance.flavor['disk']) self.assertNotEqual(instance.image['name'], 'unknown-id-666') self.assertEqual(11, instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_none_metadata(self): instances = self.fake_servers_list_image_missing_metadata(3) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_metadata(self): instances = self.fake_servers_list_image_missing_metadata(4) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_ramdisk(self): instances = self.fake_servers_list_image_missing_metadata(5) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual(11, instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_kernel(self): instances = self.fake_servers_list_image_missing_metadata(6) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_no_cache(self): results = self.nv._with_flavor_and_image(self.fake_servers_list()) self.assertEqual(2, len(results)) self.assertEqual(2, self._flavors_count) self.assertEqual(2, self._images_count) def test_with_flavor_and_image_cache(self): results = self.nv._with_flavor_and_image(self.fake_servers_list() * 2) self.assertEqual(4, len(results)) self.assertEqual(2, self._flavors_count) self.assertEqual(2, self._images_count) def test_with_flavor_and_image_unknown_image_cache(self): instances = self.fake_servers_list_unknown_image() results = self.nv._with_flavor_and_image(instances * 2) self.assertEqual(2, len(results)) self.assertEqual(1, self._flavors_count) self.assertEqual(1, self._images_count) for instance in results: self.assertEqual('unknown-id-666', instance.image['name']) self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_missing_image_instance(self): instances = self.fake_instance_image_missing() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.image) self.assertIsNone(instance.ramdisk_id) def test_with_nova_http_log_debug(self): self.CONF.set_override("nova_http_log_debug", True) self.nv = nova_client.Client() self.assertIsNotNone(self.nv.nova_client.client.logger) ceilometer-6.0.0/ceilometer/tests/unit/test_event_pipeline.py0000664000567000056710000004211412701406224025676 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import traceback import uuid import mock from oslo_config import fixture as fixture_config import oslo_messaging from oslotest import base from oslotest import mockpatch from ceilometer.event.storage import models from ceilometer import pipeline from ceilometer import publisher from ceilometer.publisher import test as test_publisher from ceilometer.publisher import utils class EventPipelineTestCase(base.BaseTestCase): def get_publisher(self, url, namespace=''): fake_drivers = {'test://': test_publisher.TestPublisher, 'new://': test_publisher.TestPublisher, 'except://': self.PublisherClassException} return fake_drivers[url](url) class PublisherClassException(publisher.PublisherBase): def publish_samples(self, ctxt, samples): pass def publish_events(self, ctxt, events): raise Exception() def setUp(self): super(EventPipelineTestCase, self).setUp() self.p_type = pipeline.EVENT_TYPE self.transformer_manager = None self.test_event = models.Event( message_id=uuid.uuid4(), event_type='a', generated=datetime.datetime.utcnow(), traits=[ models.Trait('t_text', 1, 'text_trait'), models.Trait('t_int', 2, 'int_trait'), models.Trait('t_float', 3, 'float_trait'), models.Trait('t_datetime', 4, 'datetime_trait') ], raw={'status': 'started'} ) self.test_event2 = models.Event( message_id=uuid.uuid4(), event_type='b', generated=datetime.datetime.utcnow(), traits=[ models.Trait('t_text', 1, 'text_trait'), models.Trait('t_int', 2, 'int_trait'), models.Trait('t_float', 3, 'float_trait'), models.Trait('t_datetime', 4, 'datetime_trait') ], raw={'status': 'stopped'} ) self.useFixture(mockpatch.PatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) self._setup_pipeline_cfg() self._reraise_exception = True self.useFixture(mockpatch.Patch( 'ceilometer.pipeline.LOG.exception', side_effect=self._handle_reraise_exception)) def _handle_reraise_exception(self, msg): if self._reraise_exception: raise Exception(traceback.format_exc()) def _setup_pipeline_cfg(self): """Setup the appropriate form of pipeline config.""" source = {'name': 'test_source', 'events': ['a'], 'sinks': ['test_sink']} sink = {'name': 'test_sink', 'publishers': ['test://']} self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} def _augment_pipeline_cfg(self): """Augment the pipeline config with an additional element.""" self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'events': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'publishers': ['new://'], }) def _break_pipeline_cfg(self): """Break the pipeline config with a malformed element.""" self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'events': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'publishers': ['except'], }) def _dup_pipeline_name_cfg(self): """Break the pipeline config with duplicate pipeline name.""" self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'events': ['a'], 'sinks': ['test_sink'] }) def _set_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field] = value else: self.pipeline_cfg['sinks'][0][field] = value def _extend_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field].extend(value) else: self.pipeline_cfg['sinks'][0][field].extend(value) def _unset_pipeline_cfg(self, field): if field in self.pipeline_cfg['sources'][0]: del self.pipeline_cfg['sources'][0][field] else: del self.pipeline_cfg['sinks'][0][field] def _exception_create_pipelinemanager(self): self.assertRaises(pipeline.PipelineException, pipeline.PipelineManager, self.pipeline_cfg, self.transformer_manager, self.p_type) def test_no_events(self): self._unset_pipeline_cfg('events') self._exception_create_pipelinemanager() def test_no_name(self): self._unset_pipeline_cfg('name') self._exception_create_pipelinemanager() def test_name(self): pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) for pipe in pipeline_manager.pipelines: self.assertTrue(pipe.name.startswith('event:')) def test_no_publishers(self): self._unset_pipeline_cfg('publishers') self._exception_create_pipelinemanager() def test_check_events_include_exclude_same(self): event_cfg = ['a', '!a'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_events_include_exclude(self): event_cfg = ['a', '!b'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_events_wildcard_included(self): event_cfg = ['a', '*'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_publishers_invalid_publisher(self): publisher_cfg = ['test_invalid'] self._set_pipeline_cfg('publishers', publisher_cfg) def test_multiple_included_events(self): event_cfg = ['a', 'b'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) with pipeline_manager.publisher(None) as p: p([self.test_event2]) self.assertEqual(2, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) self.assertEqual('b', getattr(publisher.events[1], 'event_type')) def test_event_non_match(self): event_cfg = ['nomatch'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.events)) self.assertEqual(0, publisher.calls) def test_wildcard_event(self): event_cfg = ['*'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_wildcard_excluded_events(self): event_cfg = ['*', '!a'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) def test_wildcard_excluded_events_not_excluded(self): event_cfg = ['*', '!b'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_all_excluded_events_not_excluded(self): event_cfg = ['!b', '!c'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_all_excluded_events_excluded(self): event_cfg = ['!a', '!c'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) self.assertTrue(pipeline_manager.pipelines[0].support_event('b')) self.assertFalse(pipeline_manager.pipelines[0].support_event('c')) def test_wildcard_and_excluded_wildcard_events(self): event_cfg = ['*', '!compute.*'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) self.assertFalse(pipeline_manager.pipelines[0]. support_event('compute.instance.create.start')) self.assertTrue(pipeline_manager.pipelines[0]. support_event('identity.user.create')) def test_included_event_and_wildcard_events(self): event_cfg = ['compute.instance.create.start', 'identity.*'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) self.assertTrue(pipeline_manager.pipelines[0]. support_event('identity.user.create')) self.assertTrue(pipeline_manager.pipelines[0]. support_event('compute.instance.create.start')) self.assertFalse(pipeline_manager.pipelines[0]. support_event('compute.instance.create.stop')) def test_excluded_event_and_excluded_wildcard_events(self): event_cfg = ['!compute.instance.create.start', '!identity.*'] self._set_pipeline_cfg('events', event_cfg) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) self.assertFalse(pipeline_manager.pipelines[0]. support_event('identity.user.create')) self.assertFalse(pipeline_manager.pipelines[0]. support_event('compute.instance.create.start')) self.assertTrue(pipeline_manager.pipelines[0]. support_event('compute.instance.create.stop')) def test_multiple_pipeline(self): self._augment_pipeline_cfg() pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event, self.test_event2]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual(1, publisher.calls) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) new_publisher = pipeline_manager.pipelines[1].publishers[0] self.assertEqual(1, len(new_publisher.events)) self.assertEqual(1, new_publisher.calls) self.assertEqual('b', getattr(new_publisher.events[0], 'event_type')) def test_multiple_publisher(self): self._set_pipeline_cfg('publishers', ['test://', 'new://']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.events)) self.assertEqual(1, len(new_publisher.events)) self.assertEqual('a', getattr(new_publisher.events[0], 'event_type')) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_multiple_publisher_isolation(self): self._reraise_exception = False self._set_pipeline_cfg('publishers', ['except://', 'new://']) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) with pipeline_manager.publisher(None) as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_unique_pipeline_names(self): self._dup_pipeline_name_cfg() self._exception_create_pipelinemanager() def test_event_pipeline_endpoint_requeue_on_failure(self): self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF([]) self.CONF.set_override("ack_on_event_error", False, group="notification") self.CONF.set_override("telemetry_secret", "not-so-secret", group="publisher") test_data = { 'message_id': uuid.uuid4(), 'event_type': 'a', 'generated': '2013-08-08 21:06:37.803826', 'traits': [ {'name': 't_text', 'value': 1, 'dtype': 'text_trait' } ], 'raw': {'status': 'started'} } message_sign = utils.compute_signature(test_data, 'not-so-secret') test_data['message_signature'] = message_sign fake_publisher = mock.Mock() self.useFixture(mockpatch.Patch( 'ceilometer.publisher.test.TestPublisher', return_value=fake_publisher)) pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager, self.p_type) event_pipeline_endpoint = pipeline.EventPipelineEndpoint( mock.Mock(), pipeline_manager.pipelines[0]) fake_publisher.publish_events.side_effect = Exception ret = event_pipeline_endpoint.sample([ {'ctxt': {}, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'a', 'payload': [test_data], 'metadata': {}}]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) ceilometer-6.0.0/ceilometer/tests/unit/energy/0000775000567000056710000000000012701406364022553 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/energy/__init__.py0000664000567000056710000000000012701406223024644 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/unit/energy/test_kwapi.py0000664000567000056710000001066712701406223025303 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from keystoneauth1 import exceptions import mock from oslo_context import context from oslotest import base from oslotest import mockpatch import six from ceilometer.agent import manager from ceilometer.energy import kwapi PROBE_DICT = { "probes": { "A": { "timestamp": 1357730232.68754, "w": 107.3, "kwh": 0.001058255421506034 }, "B": { "timestamp": 1357730232.048158, "w": 15.0, "kwh": 0.029019045026169896 }, "C": { "timestamp": 1357730232.223375, "w": 95.0, "kwh": 0.17361822634312918 } } } ENDPOINT = 'end://point' class TestManager(manager.AgentManager): def __init__(self): super(TestManager, self).__init__() self._keystone = mock.Mock() class _BaseTestCase(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestCase, self).setUp() self.context = context.get_admin_context() self.manager = TestManager() class TestKwapi(_BaseTestCase): @staticmethod def fake_get_kwapi_client(ksclient, endpoint): raise exceptions.EndpointNotFound("fake keystone exception") def test_endpoint_not_exist(self): with mockpatch.PatchObject(kwapi._Base, 'get_kwapi_client', side_effect=self.fake_get_kwapi_client): pollster = kwapi.EnergyPollster() samples = list(pollster.get_samples(self.manager, {}, [ENDPOINT])) self.assertEqual(0, len(samples)) class TestEnergyPollster(_BaseTestCase): pollster_cls = kwapi.EnergyPollster unit = 'kwh' def setUp(self): super(TestEnergyPollster, self).setUp() self.useFixture(mockpatch.PatchObject( kwapi._Base, '_iter_probes', side_effect=self.fake_iter_probes)) @staticmethod def fake_iter_probes(ksclient, cache, endpoint): probes = PROBE_DICT['probes'] for key, value in six.iteritems(probes): probe_dict = value probe_dict['id'] = key yield probe_dict def test_default_discovery(self): pollster = kwapi.EnergyPollster() self.assertEqual('endpoint:energy', pollster.default_discovery) def test_sample(self): cache = {} samples = list(self.pollster_cls().get_samples(self.manager, cache, [ENDPOINT])) self.assertEqual(len(PROBE_DICT['probes']), len(samples)) samples_by_name = dict((s.resource_id, s) for s in samples) for name, probe in PROBE_DICT['probes'].items(): sample = samples_by_name[name] expected = datetime.datetime.fromtimestamp( probe['timestamp'] ).isoformat() self.assertEqual(expected, sample.timestamp) self.assertEqual(probe[self.unit], sample.volume) class TestPowerPollster(TestEnergyPollster): pollster_cls = kwapi.PowerPollster unit = 'w' class TestEnergyPollsterCache(_BaseTestCase): pollster_cls = kwapi.EnergyPollster def test_get_samples_cached(self): probe = {'id': 'A'} probe.update(PROBE_DICT['probes']['A']) cache = { '%s-%s' % (ENDPOINT, self.pollster_cls.CACHE_KEY_PROBE): [probe], } self.manager._keystone = mock.Mock() pollster = self.pollster_cls() with mock.patch.object(pollster, '_get_probes') as do_not_call: do_not_call.side_effect = AssertionError('should not be called') samples = list(pollster.get_samples(self.manager, cache, [ENDPOINT])) self.assertEqual(1, len(samples)) class TestPowerPollsterCache(TestEnergyPollsterCache): pollster_cls = kwapi.PowerPollster ceilometer-6.0.0/ceilometer/tests/integration/0000775000567000056710000000000012701406364022626 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/integration/__init__.py0000664000567000056710000000000012701406223024717 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/integration/gabbi/0000775000567000056710000000000012701406364023672 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/integration/gabbi/gabbits-live/0000775000567000056710000000000012701406364026242 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json0000664000567000056710000000536712701406223031572 0ustar jenkinsjenkins00000000000000{ "stack_name": "integration_test", "template": { "heat_template_version": "2013-05-23", "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh", "resources": { "asg": { "type": "OS::Heat::AutoScalingGroup", "properties": { "min_size": 1, "max_size": 2, "resource": { "type": "OS::Nova::Server", "properties": { "networks": [{ "network": "private" }], "flavor": "m1.tiny", "image": "$ENVIRON['GLANCE_IMAGE_NAME']", "metadata": { "metering.server_group": { "get_param": "OS::stack_id" } }, "user_data_format": "RAW", "user_data": {"Fn::Join": ["", [ "#!/bin/sh\n", "echo 'Loading CPU'\n", "set -v\n", "cat /dev/urandom > /dev/null\n" ]]} } } } }, "web_server_scaleup_policy": { "type": "OS::Heat::ScalingPolicy", "properties": { "adjustment_type": "change_in_capacity", "auto_scaling_group_id": { "get_resource": "asg" }, "cooldown": 2, "scaling_adjustment": 1 } }, "cpu_alarm_high": { "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm", "properties": { "description": "Scale-up if the mean CPU > 10% on 1 minute", "metric": "cpu_util", "aggregation_method": "mean", "granularity": 60, "evaluation_periods": 1, "threshold": 10, "comparison_operator": "gt", "alarm_actions": [ { "get_attr": [ "web_server_scaleup_policy", "alarm_url" ] } ], "resource_type": "instance", "query": { "str_replace": { "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}", "params": { "stack_id": { "get_param": "OS::stack_id" } } } } } } } } } ceilometer-6.0.0/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml0000664000567000056710000001173712701406223031442 0ustar jenkinsjenkins00000000000000defaults: request_headers: x-auth-token: $ENVIRON['ADMIN_TOKEN'] tests: - name: list alarms none desc: Lists alarms, none yet exist url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms method: GET response_strings: - "[]" - name: list servers none desc: List servers, none yet exists url: $ENVIRON['NOVA_SERVICE_URL']/servers method: GET response_strings: - "[]" - name: create stack desc: Create an autoscaling stack url: $ENVIRON['HEAT_SERVICE_URL']/stacks method: POST request_headers: content-type: application/json data: <@create_stack.json status: 201 - name: waiting for stack creation desc: Wait for the second event on the stack resource, it can be a success or failure url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test/events?resource_name=integration_test redirects: true method: GET status: 200 poll: count: 300 delay: 1 response_json_paths: $.events[1].resource_name: integration_test - name: control stack status desc: Checks the stack have been created successfully url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test redirects: true method: GET status: 200 poll: count: 5 delay: 1 response_json_paths: $.stack.stack_status: "CREATE_COMPLETE" - name: list servers desc: Wait the autoscaling stack grow to two servers url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail method: GET poll: count: 600 delay: 1 response_json_paths: $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id'] $.servers[1].metadata.'metering.server_group': $RESPONSE['$.stack.id'] $.servers[0].status: ACTIVE $.servers[1].status: ACTIVE $.servers.`len`: 2 - name: check gnocchi resources desc: Check the gnocchi resources for this two servers exists url: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/instance method: GET poll: count: 30 delay: 1 response_strings: - '"id": "$RESPONSE["$.servers[0].id"]"' - '"id": "$RESPONSE["$.servers[1].id"]"' - name: check alarm desc: Check the aodh alarm and its state url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms method: GET poll: count: 30 delay: 1 response_strings: - "integration_test-cpu_alarm_high-" response_json_paths: $[0].state: alarm - name: get stack location for update desc: Get the stack location url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test method: GET status: 302 - name: update stack desc: Update an autoscaling stack url: $LOCATION method: PUT request_headers: content-type: application/json data: <@update_stack.json status: 202 - name: waiting for stack update desc: Wait for the third event on the stack resource, it can be a success or failure url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test/events?resource_name=integration_test redirects: true method: GET status: 200 poll: count: 300 delay: 1 response_json_paths: $.events[3].resource_name: integration_test - name: control stack status desc: Checks the stack have been created successfully url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test redirects: true method: GET status: 200 poll: count: 5 delay: 1 response_json_paths: $.stack.stack_status: "UPDATE_COMPLETE" - name: list servers desc: Wait the autoscaling stack shrink to one server url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail method: GET poll: count: 600 delay: 1 response_json_paths: $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id'] $.servers[0].status: ACTIVE $.servers.`len`: 1 - name: get stack location desc: Get the stack location url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test method: GET status: 302 - name: delete stack desc: Delete the stack url: $LOCATION method: DELETE status: 204 - name: get deleted stack desc: Check the stack have been deleted url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test redirects: true method: GET poll: count: 240 delay: 1 status: 404 - name: list alarms deleted desc: List alarms, no more exist url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms method: GET response_strings: - "[]" - name: list servers deleted desc: List servers, no more exists url: $ENVIRON['NOVA_SERVICE_URL']/servers method: GET response_strings: - "[]" ceilometer-6.0.0/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json0000664000567000056710000000533012701406223031577 0ustar jenkinsjenkins00000000000000{ "template": { "heat_template_version": "2013-05-23", "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh", "resources": { "asg": { "type": "OS::Heat::AutoScalingGroup", "properties": { "min_size": 1, "max_size": 2, "resource": { "type": "OS::Nova::Server", "properties": { "networks": [{ "network": "private" }], "flavor": "m1.tiny", "image": "$ENVIRON['GLANCE_IMAGE_NAME']", "metadata": { "metering.server_group": { "get_param": "OS::stack_id" } }, "user_data_format": "RAW", "user_data": {"Fn::Join": ["", [ "#!/bin/sh\n", "echo 'Loading CPU'\n", "set -v\n", "cat /dev/urandom > /dev/null\n" ]]} } } } }, "web_server_scaledown_policy": { "type": "OS::Heat::ScalingPolicy", "properties": { "adjustment_type": "change_in_capacity", "auto_scaling_group_id": { "get_resource": "asg" }, "cooldown": 2, "scaling_adjustment": -1 } }, "cpu_alarm_high": { "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm", "properties": { "description": "Scale-down if the mean CPU > 10% on 1 minute", "metric": "cpu_util", "aggregation_method": "mean", "granularity": 60, "evaluation_periods": 1, "threshold": 10, "comparison_operator": "gt", "alarm_actions": [ { "get_attr": [ "web_server_scaledown_policy", "alarm_url" ] } ], "resource_type": "instance", "query": { "str_replace": { "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}", "params": { "stack_id": { "get_param": "OS::stack_id" } } } } } } } } } ceilometer-6.0.0/ceilometer/tests/integration/gabbi/__init__.py0000664000567000056710000000000012701406223025763 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/integration/gabbi/test_gabbi_live.py0000664000567000056710000000254312701406223027364 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A test module to exercise the Gnocchi API with gabbi.""" import os from gabbi import driver TESTS_DIR = 'gabbits-live' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" NEEDED_ENV = ["AODH_SERVICE_URL", "GNOCCHI_SERVICE_URL", "HEAT_SERVICE_URL", "NOVA_SERVICE_URL", "GLANCE_IMAGE_NAME", "ADMIN_TOKEN"] for env_variable in NEEDED_ENV: if not os.getenv(env_variable): if os.getenv("GABBI_LIVE_FAIL_IF_NO_TEST"): raise RuntimeError('%s is not set' % env_variable) else: return test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host="localhost", port=8041) ceilometer-6.0.0/ceilometer/tests/integration/hooks/0000775000567000056710000000000012701406364023751 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/integration/hooks/post_test_hook.sh0000775000567000056710000000673012701406224027355 0ustar jenkinsjenkins00000000000000#!/bin/bash -xe # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside post_test_hook function in devstack gate. function generate_testr_results { if [ -f .testrepository/0 ]; then sudo .tox/functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html sudo gzip -9 $BASE/logs/testrepository.subunit sudo gzip -9 $BASE/logs/testr_results.html sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz fi } # If we're running in the gate find our keystone endpoint to give to # gabbi tests and do a chown. Otherwise the existing environment # should provide URL and TOKEN. if [ -d $BASE/new/devstack ]; then export CEILOMETER_DIR="$BASE/new/ceilometer" STACK_USER=stack sudo chown -R $STACK_USER:stack $CEILOMETER_DIR source $BASE/new/devstack/openrc admin admin # Go to the ceilometer dir cd $CEILOMETER_DIR fi openstack catalog list export AODH_SERVICE_URL=$(openstack catalog show alarming -c endpoints -f value | awk '/public/{print $2}') export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value | awk '/public/{print $2}') export HEAT_SERVICE_URL=$(openstack catalog show orchestration -c endpoints -f value | awk '/public/{print $2}') export NOVA_SERVICE_URL=$(openstack catalog show compute -c endpoints -f value | awk '/public/{print $2}') export GLANCE_IMAGE_NAME=$(openstack image list | awk '/ cirros.*uec /{print $4}') export ADMIN_TOKEN=$(openstack token issue -c id -f value) # Run tests echo "Running telemetry integration test suite" set +e sudo -E -H -u ${STACK_USER:-${USER}} tox -eintegration EXIT_CODE=$? echo "* Message queue status:" sudo rabbitmqctl list_queues | grep -e \\.sample -e \\.info if [ $EXIT_CODE -ne 0 ] ; then set +x echo "* Heat stack:" heat stack-show integration_test echo "* Alarm list:" ceilometer alarm-list echo "* Nova instance list:" openstack server list echo "* Gnocchi instance list:" gnocchi resource list -t instance for instance_id in $(openstack server list -f value -c ID); do echo "* Nova instance detail:" openstack server show $instance_id echo "* Gnocchi instance detail:" gnocchi resource show -t instance $instance_id echo "* Gnocchi measures for instance ${instance_id}:" gnocchi measures show -r $instance_id cpu_util done gnocchi status # Be sure to source Gnocchi settings before source $BASE/new/gnocchi/devstack/settings echo "* Unprocessed measures:" sudo find $GNOCCHI_DATA_DIR set -x fi set -e # Collect and parse result if [ -n "$CEILOMETER_DIR" ]; then generate_testr_results fi exit $EXIT_CODE ceilometer-6.0.0/ceilometer/tests/functional/0000775000567000056710000000000012701406364022445 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/test_collector.py0000664000567000056710000002235512701406223026045 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import mock import msgpack from oslo_config import fixture as fixture_config import oslo_messaging from oslo_utils import timeutils from oslotest import mockpatch from stevedore import extension from ceilometer import collector from ceilometer import dispatcher from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests import base as tests_base class FakeException(Exception): pass class FakeConnection(object): def create_worker(self, topic, proxy, pool_name): pass class TestCollector(tests_base.BaseTestCase): def setUp(self): super(TestCollector, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.import_opt("connection", "oslo_db.options", group="database") self.CONF.set_override("connection", "log://", group='database') self.CONF.set_override('telemetry_secret', 'not-so-secret', group='publisher') self._setup_messaging() self.counter = sample.Sample( name='foobar', type='bad', unit='F', volume=1, user_id='jd', project_id='ceilometer', resource_id='cat', timestamp=timeutils.utcnow().isoformat(), resource_metadata={}, ).as_dict() self.utf8_msg = utils.meter_message_from_counter( sample.Sample( name=u'test', type=sample.TYPE_CUMULATIVE, unit=u'', volume=1, user_id=u'test', project_id=u'test', resource_id=u'test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={u'name': [([u'TestPublish'])]}, source=u'testsource', ), 'not-so-secret') self.srv = collector.CollectorService() self.useFixture(mockpatch.PatchObject( self.srv.tg, 'add_thread', side_effect=self._dummy_thread_group_add_thread)) @staticmethod def _dummy_thread_group_add_thread(method): method() def _setup_messaging(self, enabled=True): if enabled: self.setup_messaging(self.CONF) else: self.useFixture(mockpatch.Patch( 'ceilometer.messaging.get_transport', return_value=None)) def _setup_fake_dispatcher(self): plugin = mock.MagicMock() fake_dispatcher = extension.ExtensionManager.make_test_instance([ extension.Extension('test', None, None, plugin,), ], propagate_map_exceptions=True) self.useFixture(mockpatch.Patch( 'ceilometer.dispatcher.load_dispatcher_manager', return_value=(fake_dispatcher, fake_dispatcher))) return plugin def _make_fake_socket(self, sample): def recvfrom(size): # Make the loop stop self.srv.stop() return msgpack.dumps(sample), ('127.0.0.1', 12345) sock = mock.Mock() sock.recvfrom = recvfrom return sock def _verify_udp_socket(self, udp_socket): conf = self.CONF.collector udp_socket.setsockopt.assert_called_once_with(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) udp_socket.bind.assert_called_once_with((conf.udp_address, conf.udp_port)) def test_record_metering_data(self): mock_dispatcher = self._setup_fake_dispatcher() dps = dispatcher.load_dispatcher_manager() (self.srv.meter_manager, self.srv.manager) = dps self.srv.record_metering_data(None, self.counter) mock_dispatcher.record_metering_data.assert_called_once_with( data=self.counter) def test_udp_receive_base(self): self._setup_messaging(False) mock_dispatcher = self._setup_fake_dispatcher() self.counter['source'] = 'mysource' self.counter['counter_name'] = self.counter['name'] self.counter['counter_volume'] = self.counter['volume'] self.counter['counter_type'] = self.counter['type'] self.counter['counter_unit'] = self.counter['unit'] udp_socket = self._make_fake_socket(self.counter) with mock.patch('socket.socket') as mock_socket: mock_socket.return_value = udp_socket self.srv.start() mock_socket.assert_called_with(socket.AF_INET, socket.SOCK_DGRAM) self._verify_udp_socket(udp_socket) mock_dispatcher.record_metering_data.assert_called_once_with( self.counter) def test_udp_socket_ipv6(self): self._setup_messaging(False) self.CONF.set_override('udp_address', '::1', group='collector') self._setup_fake_dispatcher() sock = self._make_fake_socket('data') with mock.patch.object(socket, 'socket') as mock_socket: mock_socket.return_value = sock self.srv.start() mock_socket.assert_called_with(socket.AF_INET6, socket.SOCK_DGRAM) def test_udp_receive_storage_error(self): self._setup_messaging(False) mock_dispatcher = self._setup_fake_dispatcher() mock_dispatcher.record_metering_data.side_effect = self._raise_error self.counter['source'] = 'mysource' self.counter['counter_name'] = self.counter['name'] self.counter['counter_volume'] = self.counter['volume'] self.counter['counter_type'] = self.counter['type'] self.counter['counter_unit'] = self.counter['unit'] udp_socket = self._make_fake_socket(self.counter) with mock.patch('socket.socket', return_value=udp_socket): self.srv.start() self._verify_udp_socket(udp_socket) mock_dispatcher.record_metering_data.assert_called_once_with( self.counter) @staticmethod def _raise_error(*args, **kwargs): raise Exception def test_udp_receive_bad_decoding(self): self._setup_messaging(False) self._setup_fake_dispatcher() udp_socket = self._make_fake_socket(self.counter) with mock.patch('socket.socket', return_value=udp_socket): with mock.patch('msgpack.loads', self._raise_error): self.srv.start() self._verify_udp_socket(udp_socket) @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start') @mock.patch.object(collector.CollectorService, 'start_udp') def test_only_udp(self, udp_start, rpc_start): """Check that only UDP is started if messaging transport is unset.""" self._setup_messaging(False) self._setup_fake_dispatcher() udp_socket = self._make_fake_socket(self.counter) with mock.patch('socket.socket', return_value=udp_socket): self.srv.start() self.assertEqual(0, rpc_start.call_count) self.assertEqual(1, udp_start.call_count) def test_udp_receive_valid_encoding(self): self._setup_messaging(False) mock_dispatcher = self._setup_fake_dispatcher() self.data_sent = [] with mock.patch('socket.socket', return_value=self._make_fake_socket(self.utf8_msg)): self.srv.start() self.assertTrue(utils.verify_signature( mock_dispatcher.method_calls[0][1][0], "not-so-secret")) def _test_collector_requeue(self, listener, batch_listener=False): mock_dispatcher = self._setup_fake_dispatcher() self.srv.dispatcher_manager = dispatcher.load_dispatcher_manager() mock_dispatcher.record_metering_data.side_effect = Exception('boom') mock_dispatcher.record_events.side_effect = Exception('boom') self.srv.start() endp = getattr(self.srv, listener).dispatcher.endpoints[0] ret = endp.sample([{'ctxt': {}, 'publisher_id': 'pub_id', 'event_type': 'event', 'payload': {}, 'metadata': {}}]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', mock.Mock()) @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) def test_collector_sample_requeue(self): self._test_collector_requeue('sample_listener') @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', mock.Mock()) @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) def test_collector_event_requeue(self): self.CONF.set_override('store_events', True, group='notification') self._test_collector_requeue('event_listener') ceilometer-6.0.0/ceilometer/tests/functional/__init__.py0000664000567000056710000000000012701406223024536 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/test_notification.py0000664000567000056710000005766012701406223026554 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Ceilometer notify daemon.""" import shutil import mock from oslo_config import fixture as fixture_config from oslo_context import context import oslo_messaging import oslo_messaging.conffixture import oslo_service.service from oslo_utils import fileutils from oslo_utils import timeutils import six from stevedore import extension import yaml from ceilometer.compute.notifications import instance from ceilometer import messaging from ceilometer import notification from ceilometer.publisher import test as test_publisher from ceilometer.tests import base as tests_base TEST_NOTICE_CTXT = { u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'is_admin': True, u'project_id': u'7c150a59fe714e6f9263774af9688f0e', u'quota_class': None, u'read_deleted': u'no', u'remote_address': u'10.0.2.15', u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'roles': [u'admin'], u'timestamp': u'2012-05-08T20:23:41.425105', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', } TEST_NOTICE_METADATA = { u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'timestamp': u'2012-05-08 20:23:48.028195', } TEST_NOTICE_PAYLOAD = { u'created_at': u'2012-05-08 20:23:41', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'fixed_ips': [{u'address': u'10.0.0.2', u'floating_ips': [], u'meta': {}, u'type': u'fixed', u'version': 4}], u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-08 20:23:47.985999', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', } class TestNotification(tests_base.BaseTestCase): def setUp(self): super(TestNotification, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF.set_override("connection", "log://", group='database') self.CONF.set_override("backend_url", None, group="coordination") self.CONF.set_override("store_events", False, group="notification") self.CONF.set_override("disable_non_metric_meters", False, group="notification") self.setup_messaging(self.CONF) self.srv = notification.NotificationService() def fake_get_notifications_manager(self, pm): self.plugin = instance.Instance(pm) return extension.ExtensionManager.make_test_instance( [ extension.Extension('test', None, None, self.plugin) ] ) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', mock.MagicMock()) @mock.patch('ceilometer.event.endpoint.EventsNotificationEndpoint') def _do_process_notification_manager_start(self, fake_event_endpoint_class): with mock.patch.object(self.srv, '_get_notifications_manager') as get_nm: get_nm.side_effect = self.fake_get_notifications_manager self.srv.start() self.fake_event_endpoint = fake_event_endpoint_class.return_value def test_start_multiple_listeners(self): urls = ["fake://vhost1", "fake://vhost2"] self.CONF.set_override("messaging_urls", urls, group="notification") self._do_process_notification_manager_start() self.assertEqual(2, len(self.srv.listeners)) def test_process_notification(self): self._do_process_notification_manager_start() self.srv.pipeline_manager.pipelines[0] = mock.MagicMock() self.plugin.info([{'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) self.assertTrue(self.srv.pipeline_manager.publisher.called) def test_process_notification_no_events(self): self._do_process_notification_manager_start() self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) self.assertNotEqual(self.fake_event_endpoint, self.srv.listeners[0].dispatcher.endpoints[0]) @mock.patch('ceilometer.pipeline.setup_event_pipeline', mock.MagicMock()) def test_process_notification_with_events(self): self.CONF.set_override("store_events", True, group="notification") self._do_process_notification_manager_start() self.assertEqual(2, len(self.srv.listeners[0].dispatcher.endpoints)) self.assertEqual(self.fake_event_endpoint, self.srv.listeners[0].dispatcher.endpoints[0]) @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start', mock.MagicMock()) @mock.patch('ceilometer.event.endpoint.EventsNotificationEndpoint') def test_unique_consumers(self, fake_event_endpoint_class): def fake_get_notifications_manager_dup_targets(pm): plugin = instance.Instance(pm) return extension.ExtensionManager.make_test_instance( [extension.Extension('test', None, None, plugin), extension.Extension('test', None, None, plugin)]) with mock.patch.object(self.srv, '_get_notifications_manager') as get_nm: get_nm.side_effect = fake_get_notifications_manager_dup_targets self.srv.start() self.assertEqual(1, len(self.srv.listeners[0].dispatcher.targets)) class BaseRealNotification(tests_base.BaseTestCase): def setup_pipeline(self, counter_names): pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 5, 'meters': counter_names, 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ['test://'] }] }) if six.PY3: pipeline = pipeline.encode('utf-8') pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, prefix="pipeline", suffix="yaml") return pipeline_cfg_file def setup_event_pipeline(self, event_names): ev_pipeline = yaml.dump({ 'sources': [{ 'name': 'test_event', 'events': event_names, 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'publishers': ['test://'] }] }) if six.PY3: ev_pipeline = ev_pipeline.encode('utf-8') ev_pipeline_cfg_file = fileutils.write_to_tempfile( content=ev_pipeline, prefix="event_pipeline", suffix="yaml") return ev_pipeline_cfg_file def setUp(self): super(BaseRealNotification, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf # Dummy config file to avoid looking for system config self.CONF([], project='ceilometer', validate_default_values=True) self.setup_messaging(self.CONF, 'nova') pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.expected_samples = 2 self.CONF.set_override("backend_url", None, group="coordination") self.CONF.set_override("store_events", True, group="notification") self.CONF.set_override("disable_non_metric_meters", False, group="notification") ev_pipeline_cfg_file = self.setup_event_pipeline( ['compute.instance.*']) self.expected_events = 1 self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file) self.CONF.set_override( "definitions_cfg_file", self.path_get('etc/ceilometer/event_definitions.yaml'), group='event') self.publisher = test_publisher.TestPublisher("") def _check_notification_service(self): self.srv.start() notifier = messaging.get_notifier(self.transport, "compute.vagrant-precise") notifier.info(context.RequestContext(), 'compute.instance.create.end', TEST_NOTICE_PAYLOAD) start = timeutils.utcnow() while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: if (len(self.publisher.samples) >= self.expected_samples and len(self.publisher.events) >= self.expected_events): break self.assertNotEqual(self.srv.listeners, self.srv.pipeline_listeners) self.srv.stop() resources = list(set(s.resource_id for s in self.publisher.samples)) self.assertEqual(self.expected_samples, len(self.publisher.samples)) self.assertEqual(self.expected_events, len(self.publisher.events)) self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources) class TestRealNotificationReloadablePipeline(BaseRealNotification): def setUp(self): super(TestRealNotificationReloadablePipeline, self).setUp() self.CONF.set_override('refresh_pipeline_cfg', True) self.CONF.set_override('refresh_event_pipeline_cfg', True) self.CONF.set_override('pipeline_polling_interval', 1) self.srv = notification.NotificationService() @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_pipeline_poller(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self.srv.tg = mock.MagicMock() self.srv.start() pipeline_poller_call = mock.call(1, self.srv.refresh_pipeline) self.assertIn(pipeline_poller_call, self.srv.tg.add_timer.call_args_list) self.srv.stop() def test_notification_reloaded_pipeline(self): pipeline_cfg_file = self.setup_pipeline(['instance']) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.srv.start() pipeline = self.srv.pipe_manager # Modify the collection targets updated_pipeline_cfg_file = self.setup_pipeline(['vcpus', 'disk.root.size']) # Move/re-name the updated pipeline file to the original pipeline # file path as recorded in oslo config shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) self.srv.refresh_pipeline() self.assertNotEqual(pipeline, self.srv.pipe_manager) self.srv.stop() def test_notification_reloaded_event_pipeline(self): ev_pipeline_cfg_file = self.setup_event_pipeline( ['compute.instance.create.start']) self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file) self.CONF.set_override("store_events", True, group="notification") self.srv.start() pipeline = self.srv.event_pipe_manager # Modify the collection targets updated_ev_pipeline_cfg_file = self.setup_event_pipeline( ['compute.instance.*']) # Move/re-name the updated pipeline file to the original pipeline # file path as recorded in oslo config shutil.move(updated_ev_pipeline_cfg_file, ev_pipeline_cfg_file) self.srv.refresh_pipeline() self.assertNotEqual(pipeline, self.srv.pipe_manager) self.srv.stop() class TestRealNotification(BaseRealNotification): def setUp(self): super(TestRealNotification, self).setUp() self.srv = notification.NotificationService() @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_service(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self._check_notification_service() @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_service_error_topic(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self.srv.start() notifier = messaging.get_notifier(self.transport, 'compute.vagrant-precise') notifier.error(context.RequestContext(), 'compute.instance.error', TEST_NOTICE_PAYLOAD) start = timeutils.utcnow() while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: if len(self.publisher.events) >= self.expected_events: break self.srv.stop() self.assertEqual(self.expected_events, len(self.publisher.events)) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_disable_non_metrics(self, fake_publisher_cls): self.CONF.set_override("disable_non_metric_meters", True, group="notification") # instance is a not a metric. we should only get back memory self.expected_samples = 1 fake_publisher_cls.return_value = self.publisher self._check_notification_service() self.assertEqual('memory', self.publisher.samples[0].name) @mock.patch.object(oslo_service.service.Service, 'stop') def test_notification_service_start_abnormal(self, mocked): try: self.srv.stop() except Exception: pass self.assertEqual(1, mocked.call_count) class TestRealNotificationHA(BaseRealNotification): def setUp(self): super(TestRealNotificationHA, self).setUp() self.CONF.set_override('workload_partitioning', True, group='notification') self.srv = notification.NotificationService() @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_service(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self._check_notification_service() def test_reset_listeners_on_refresh(self): self.srv.start() listeners = self.srv.pipeline_listeners self.assertEqual(20, len(listeners)) self.srv._configure_pipeline_listeners() self.assertEqual(20, len(self.srv.pipeline_listeners)) for listener in listeners: self.assertNotIn(listeners, set(self.srv.pipeline_listeners)) self.srv.stop() def test_retain_common_listeners_on_refresh(self): with mock.patch('ceilometer.coordination.PartitionCoordinator' '.extract_my_subset', return_value=[1, 2]): self.srv.start() self.assertEqual(4, len(self.srv.pipeline_listeners)) listeners = [listener for listener in self.srv.pipeline_listeners] with mock.patch('ceilometer.coordination.PartitionCoordinator' '.extract_my_subset', return_value=[1, 3]): self.srv._refresh_agent(None) self.assertEqual(4, len(self.srv.pipeline_listeners)) for listener in listeners: if listener.dispatcher.targets[0].topic.endswith('1'): self.assertIn(listener, set(self.srv.pipeline_listeners)) else: self.assertNotIn(listener, set(self.srv.pipeline_listeners)) self.srv.stop() @mock.patch('oslo_messaging.Notifier.sample') def test_broadcast_to_relevant_pipes_only(self, mock_notifier): self.srv.start() for endpoint in self.srv.listeners[0].dispatcher.endpoints: if (hasattr(endpoint, 'filter_rule') and not endpoint.filter_rule.match(None, None, 'nonmatching.end', None, None)): continue endpoint.info([{ 'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'nonmatching.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertFalse(mock_notifier.called) for endpoint in self.srv.listeners[0].dispatcher.endpoints: if (hasattr(endpoint, 'filter_rule') and not endpoint.filter_rule.match(None, None, 'compute.instance.create.end', None, None)): continue endpoint.info([{ 'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertTrue(mock_notifier.called) self.assertEqual(3, mock_notifier.call_count) self.assertEqual('pipeline.event', mock_notifier.call_args_list[0][1]['event_type']) self.assertEqual('ceilometer.pipeline', mock_notifier.call_args_list[1][1]['event_type']) self.assertEqual('ceilometer.pipeline', mock_notifier.call_args_list[2][1]['event_type']) self.srv.stop() class TestRealNotificationMultipleAgents(tests_base.BaseTestCase): def setup_pipeline(self, transformers): pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 5, 'meters': ['instance', 'memory'], 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'transformers': transformers, 'publishers': ['test://'] }] }) if six.PY3: pipeline = pipeline.encode('utf-8') pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, prefix="pipeline", suffix="yaml") return pipeline_cfg_file def setUp(self): super(TestRealNotificationMultipleAgents, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.CONF([], project='ceilometer', validate_default_values=True) self.setup_messaging(self.CONF, 'nova') pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.CONF.set_override("backend_url", None, group="coordination") self.CONF.set_override("store_events", False, group="notification") self.CONF.set_override("disable_non_metric_meters", False, group="notification") self.CONF.set_override('workload_partitioning', True, group='notification') self.CONF.set_override('pipeline_processing_queues', 2, group='notification') self.publisher = test_publisher.TestPublisher("") self.publisher2 = test_publisher.TestPublisher("") def _check_notifications(self, fake_publisher_cls): fake_publisher_cls.side_effect = [self.publisher, self.publisher2] self.srv = notification.NotificationService() self.srv2 = notification.NotificationService() with mock.patch('ceilometer.coordination.PartitionCoordinator' '._get_members', return_value=['harry', 'lloyd']): with mock.patch('uuid.uuid4', return_value='harry'): self.srv.start() with mock.patch('uuid.uuid4', return_value='lloyd'): self.srv2.start() notifier = messaging.get_notifier(self.transport, "compute.vagrant-precise") payload1 = TEST_NOTICE_PAYLOAD.copy() payload1['instance_id'] = '0' notifier.info(context.RequestContext(), 'compute.instance.create.end', payload1) payload2 = TEST_NOTICE_PAYLOAD.copy() payload2['instance_id'] = '1' notifier.info(context.RequestContext(), 'compute.instance.create.end', payload2) self.expected_samples = 4 start = timeutils.utcnow() with mock.patch('six.moves.builtins.hash', lambda x: int(x)): while timeutils.delta_seconds(start, timeutils.utcnow()) < 60: if (len(self.publisher.samples + self.publisher2.samples) >= self.expected_samples): break self.srv.stop() self.srv2.stop() self.assertEqual(2, len(self.publisher.samples)) self.assertEqual(2, len(self.publisher2.samples)) self.assertEqual(1, len(set( s.resource_id for s in self.publisher.samples))) self.assertEqual(1, len(set( s.resource_id for s in self.publisher2.samples))) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_multiple_agents_no_transform(self, fake_publisher_cls): pipeline_cfg_file = self.setup_pipeline([]) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self._check_notifications(fake_publisher_cls) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_multiple_agents_transform(self, fake_publisher_cls): pipeline_cfg_file = self.setup_pipeline( [{ 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }]) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self._check_notifications(fake_publisher_cls) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_multiple_agents_multiple_transform(self, fake_publisher_cls): pipeline_cfg_file = self.setup_pipeline( [{ 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }, { 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }]) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self._check_notifications(fake_publisher_cls) ceilometer-6.0.0/ceilometer/tests/functional/publisher/0000775000567000056710000000000012701406364024442 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/publisher/__init__.py0000664000567000056710000000000012701406223026533 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/publisher/test_direct.py0000664000567000056710000000651612701406224027330 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/direct.py """ import datetime import uuid from oslo_utils import netutils from ceilometer.event.storage import models as event from ceilometer.publisher import direct from ceilometer import sample from ceilometer.tests import db as tests_db class TestDirectPublisher(tests_db.TestBase): resource_id = str(uuid.uuid4()) test_data = [ sample.Sample( name='alpha', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='beta', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='gamma', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.now().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def test_direct_publisher(self): """Test samples are saved.""" self.CONF.set_override('connection', self.db_manager.url, group='database') parsed_url = netutils.urlsplit('direct://') publisher = direct.DirectPublisher(parsed_url) publisher.publish_samples(None, self.test_data) meters = list(self.conn.get_meters(resource=self.resource_id)) names = sorted([meter.name for meter in meters]) self.assertEqual(3, len(meters), 'There should be 3 samples') self.assertEqual(['alpha', 'beta', 'gamma'], names) class TestEventDirectPublisher(tests_db.TestBase): test_data = [event.Event(message_id=str(uuid.uuid4()), event_type='event_%d' % i, generated=datetime.datetime.utcnow(), traits=[], raw={}) for i in range(0, 5)] def test_direct_publisher(self): parsed_url = netutils.urlsplit('direct://') publisher = direct.DirectPublisher(parsed_url) publisher.publish_events(None, self.test_data) e_types = list(self.event_conn.get_event_types()) self.assertEqual(5, len(e_types)) self.assertEqual(['event_%d' % i for i in range(0, 5)], sorted(e_types)) ceilometer-6.0.0/ceilometer/tests/functional/gabbi/0000775000567000056710000000000012701406364023511 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/0000775000567000056710000000000012701406364025124 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/meters.yaml0000664000567000056710000002560712701406223027313 0ustar jenkinsjenkins00000000000000# # Tests to explore and cover the /v2/meters section of the # Ceilometer API. # fixtures: - ConfigFixture tests: # Generic HTTP health explorations of all meters. - name: empty meters list url: /v2/meters response_headers: content-type: /application/json/ response_strings: - "[]" - name: meters list bad accept url: /v2/meters request_headers: accept: text/plain status: 406 - name: meters list bad method url: /v2/meters method: POST status: 405 response_headers: allow: GET - name: try to delete meters url: /v2/meters method: DELETE status: 405 response_headers: allow: GET # Generic HTTP health explorations of single meter. - name: get non exist meter url: /v2/meters/noexist response_strings: - "[]" - name: meter bad accept url: /v2/meters/noexist?direct=True request_headers: accept: text/plain status: 406 - name: meter delete noexist url: /v2/meters/noexist method: DELETE status: "404 || 405" - name: post meter no data url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: "" status: 400 - name: post meter error is JSON url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: "" status: 400 response_headers: content-type: /application/json/ response_json_paths: $.error_message.faultstring: "Samples should be included in request body" - name: post meter bad content-type url: /v2/meters/apples?direct=True method: POST request_headers: content-type: text/plain data: hello status: 415 - name: post bad samples to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: samples: - red - blue - yellow status: 400 # POST variations on a malformed sample - name: post limited counter to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_unit: instance counter_volume: 1 resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 status: 400 response_strings: - "Invalid input for field/attribute counter_name" - name: post mismatched counter name to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: cars counter_type: gauge counter_unit: instance counter_volume: 1 resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 status: 400 response_strings: - "Invalid input for field/attribute counter_name" - "should be apples" - name: post counter no resource to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 1 status: 400 response_strings: - "Invalid input for field/attribute resource_id" - "Mandatory field missing." - name: post counter bad type to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples counter_type: elevation counter_unit: instance counter_volume: 1 resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 status: 400 response_strings: - "Invalid input for field/attribute counter_type." - "The counter type must be: gauge, delta, cumulative" # Manipulate samples - name: post counter to meter url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 1 resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 status: 201 - name: list apple samples url: /v2/meters/apples response_json_paths: $[0].counter_volume: 1.0 $[0].counter_name: apples $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - name: list meters url: /v2/meters response_json_paths: $[0].name: apples $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 $[0].type: gauge $[-1].name: apples - name: negative limit on meter list url: /v2/meters/apples?limit=-5 status: 400 response_strings: - Limit must be positive - name: nan limit on meter list url: /v2/meters/apples?limit=NaN status: 400 response_strings: - unable to convert to int - name: post counter to meter different resource url: /v2/meters/apples?direct=True method: POST status: 201 request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 2 resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - name: query for resource url: /v2/meters/apples?q.field=resource_id&q.value=aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa&q.op=eq response_json_paths: $[0].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa $[-1].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa # Explore posting samples with less than perfect data. - name: post counter with bad timestamp url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 3 resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa timestamp: "2013-01-bad 23:23:20" status: 400 response_strings: - 'Invalid input for field/attribute samples' - name: post counter with good timestamp url: /v2/meters/apples?direct=True method: POST status: 201 request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 3 resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa timestamp: "2013-01-01 23:23:20" - name: post counter with wrong metadata url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 3 resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa timestamp: "2013-01-01 23:23:20" resource_metadata: "a string" status: 400 response_strings: - "Invalid input for field/attribute samples" - name: post counter with empty metadata url: /v2/meters/apples?direct=True method: POST status: 201 request_headers: content-type: application/json data: - counter_name: apples counter_type: gauge counter_unit: instance counter_volume: 3 resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa timestamp: "2013-01-01 23:23:20" resource_metadata: {} # Statistics - name: get sample statistics url: /v2/meters/apples/statistics response_json_paths: $[0].groupby: null $[0].unit: instance $[0].sum: 9.0 $[0].min: 1.0 $[0].max: 3.0 $[0].count: 4 - name: get incorrectly grouped sample statistics url: /v2/meters/apples/statistics?groupby=house_id status: 400 response_strings: - Invalid groupby fields - name: get grouped sample statistics url: /v2/meters/apples/statistics?groupby=resource_id response_json_paths: $[1].max: 3.0 $[0].max: 1.0 - name: get sample statistics bad period url: /v2/meters/apples/statistics?period=seven status: 400 response_strings: - unable to convert to int - name: get sample statistics negative period url: /v2/meters/apples/statistics?period=-7 status: 400 response_strings: - Period must be positive. - name: get sample statistics 600 period url: /v2/meters/apples/statistics?period=600 response_json_paths: $[0].period: 600 - name: get sample statistics time limit not time url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=Remember%20Remember status: 400 response_strings: - invalid timestamp format - name: get sample statistics time limit gt url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2014-01-01 response_json_paths: $[0].count: 2 - name: get sample statistics time limit lt url: /v2/meters/apples/statistics?q.field=timestamp&q.op=lt&q.value=2014-01-01 response_json_paths: $[0].count: 2 - name: get sample statistics time limit bounded url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2013-06-01&q.field=timestamp&q.op=lt&q.value=2014-01-01 response_strings: - "[]" - name: get sample statistics select aggregate bad format url: /v2/meters/apples/statistics?aggregate=max status: 400 - name: get sample statistics select aggregate url: /v2/meters/apples/statistics?aggregate.func=max response_json_paths: $[0].aggregate.max: 3.0 # limit meters results - name: get meters ulimited url: /v2/meters response_json_paths: $.`len`: 2 - name: get meters limited url: /v2/meters?limit=1 response_json_paths: $.`len`: 1 - name: get meters double limit url: /v2/meters?limit=1&limit=1 status: 400 - name: get meters filter limit desc: expressing limit this way is now disallowed url: /v2/meters?q.field=limit&q.op=eq&q.type=&q.value=1 status: 400 response_strings: - 'Unknown argument: \"limit\": unrecognized field in query' - name: get meters filter limit and limit url: /v2/meters?q.field=limit&q.op=eq&q.type=&q.value=1&limit=1 status: 400 response_strings: - 'Unknown argument: \"limit\": unrecognized field in query' ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml0000664000567000056710000000070412701406223031133 0ustar jenkinsjenkins00000000000000# # Demonstrate a simple sample fixture. # fixtures: - ConfigFixture - SampleDataFixture tests: - name: get fixture samples desc: get all the samples at livestock url: /v2/meters/livestock response_json_paths: $.[0].counter_name: livestock $.[1].counter_name: livestock $.[2].counter_name: livestock $.[2].user_id: farmerjon $.[0].resource_metadata.breed: cow $.[1].resource_metadata.farmed_by: nancy ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/basic.yaml0000664000567000056710000000127712701406223027072 0ustar jenkinsjenkins00000000000000# # Some simple tests just to confirm that the system works. # fixtures: - ConfigFixture tests: # Root gives us some information on where to go from here. - name: quick root check url: / response_headers: content-type: application/json; charset=UTF-8 response_strings: - '"base": "application/json"' response_json_paths: versions.values.[0].status: stable versions.values.[0].media-types.[0].base: application/json # NOTE(chdent): Ideally since / has a links ref to /v2, /v2 ought not 404! - name: v2 visit desc: this demonstrates a bug in the info in / url: $RESPONSE['versions.values.[0].links.[0].href'] status: 404 ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml0000664000567000056710000000262212701406223031152 0ustar jenkinsjenkins00000000000000# # Explore and cover resources API with gabbi tests when there are no # resources. # fixtures: - ConfigFixture tests: # Check for a list of resources, modifying the request in various # ways. - name: list resources no extra desc: Provide no additional header guidelines url: /v2/resources response_headers: content-type: /application/json/ response_strings: - "[]" - name: list resources but get url wrong url: /v2/resrces status: 404 - name: list resources explicit accept url: /v2/resources request_headers: accept: application/json response_strings: - "[]" - name: list resources bad accept url: /v2/resources request_headers: accept: text/plain status: 406 - name: list resources with bad query field url: /v2/resources?q.field=id&q.value=cars status: 400 response_strings: - unrecognized field in query - name: list resources with query url: /v2/resources?q.field=resource&q.value=cars response_strings: - "[]" - name: list resource bad type meter links url: /v2/resources?meter_links=yes%20please status: 400 response_strings: - unable to convert to int - name: list resource meter links int url: /v2/resources?meter_links=0 response_strings: - "[]" ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/api_events_with_data.yaml0000664000567000056710000001647512701406223032200 0ustar jenkinsjenkins00000000000000# These test run against the Events API with data preloaded into the datastore. fixtures: - ConfigFixture - EventDataFixture tests: # this attempts to get all the events and checks to make sure they are valid - name: get all events url: /v2/events request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.[0].event_type: cookies_chocolate.chip $.[0].traits.[0].value: chocolate.chip $.[0].traits.[1].value: '0' $.[0].raw.nested.inside: value $.[1].event_type: cookies_peanut.butter $.[1].traits.[0].name: type $.[1].traits.[1].name: ate $.[1].raw.nested.inside: value $.[2].event_type: cookies_sugar $.[2].traits.[0].type: string $.[2].traits.[1].type: integer $.[2].raw.nested.inside: value # this attempts to get all the events with invalid parameters and expects a 400 - name: get events with bad params url: /v2/events?bad_Stuff_here request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 400 # this attempts to query the events with the correct parameterized query syntax # and expects a matching event - name: get events that match query url: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.[0].event_type: cookies_chocolate.chip $.[0].traits.[0].value: chocolate.chip # this attempts to query the events with the correct data query syntax and # expects a matching event - name: get events that match query via data url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: event_type op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.[0].event_type: cookies_chocolate.chip $.[0].traits.[0].value: chocolate.chip # this attempts to query the events with the correct parameterized query syntax # but a bad field name and expects an empty list - name: get events that match bad query url: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the correct data query syntax and # a bad field name and expects an empty list - name: get events that match bad query via data url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: bad_field op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the wrong data query syntax missing the # q object but supplying the field list and a bad field name and expects a 400 - name: get events that match bad query via data list url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: - field: bad_field op: eq type: string value: cookies_chocolate.chip status: 400 # Get a single event by message_id should return an event - name: get a single event url: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.event_type: cookies_chocolate.chip $.traits.[0].value: chocolate.chip $.traits.[1].value: '0' # Get a single event by message_id no data is present so should return a 404 - name: get a single event that does not exist url: /v2/events/bad-id request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all the event types should return a list of event types - name: get all event types url: /v2/event_types request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - cookies_chocolate.chip - cookies_peanut.butter - cookies_sugar # Get a single event type by valid name, this API is unused and should return a 404 - name: get event types for good event_type unused api url: /v2/event_types/cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get a single event type by invalid name, this API is unused and should return a 404 - name: get event types for bad event_type unused api url: /v2/event_types/bad_event_type request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all traits for a valid event type should return an list of traits - name: get all traits for event type url: /v2/event_types/cookies_chocolate.chip/traits request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.[0].type: string $.[1].name: ate # Get all traits for an invalid event type should return an empty list - name: get all traits names for event type bad event type url: /v2/event_types/bad_event_type/traits request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # Get all traits of type ate for a valid event type should return an list of # traits - name: get all traits of type ate for event type url: /v2/event_types/cookies_chocolate.chip/traits/ate request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $.[0].name: ate $.[0].value: '0' # Get all traits of type ate for a invalid event type should return an empty # list - name: get all traits of type for event type bad event type url: /v2/event_types/bad_event_type/traits/ate request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # Get all traits of type bad_trait_name for a valid event type should return an # empty list - name: get all traits of type instances for event type bad trait name url: /v2/event_types/cookies_chocolate.chip/traits/bad_trait_name request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/middleware.yaml0000664000567000056710000000210512701406223030115 0ustar jenkinsjenkins00000000000000# # Test the middlewares. Just CORS for now. # fixtures: - ConfigFixture - CORSConfigFixture tests: - name: valid cors options OPTIONS: / status: 200 request_headers: origin: http://valid.example.com access-control-request-method: GET response_headers: access-control-allow-origin: http://valid.example.com - name: invalid cors options OPTIONS: / status: 200 request_headers: origin: http://invalid.example.com access-control-request-method: GET response_forbidden_headers: - access-control-allow-origin - name: valid cors get GET: / status: 200 request_headers: origin: http://valid.example.com access-control-request-method: GET response_headers: access-control-allow-origin: http://valid.example.com - name: invalid cors get GET: / status: 200 request_headers: origin: http://invalid.example.com response_forbidden_headers: - access-control-allow-origin ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/samples.yaml0000664000567000056710000001053612701406223027453 0ustar jenkinsjenkins00000000000000# # Explore and test the samples controller, using samples supplied by # the SampleDataFixture. # fixtures: - ConfigFixture - SampleDataFixture tests: # Confirm all the samples are there and expected requests behave. # TODO(chdent): There's a danger here that the ordering of multiple # samples will not be consistent. - name: lists samples url: /v2/samples response_headers: content-type: /application/json/ response_json_paths: $[0].meter: livestock $[0].metadata.breed: cow $[1].metadata.breed: pig $[2].metadata.breed: sheep - name: get just one url: /v2/samples/$RESPONSE['$[0].id'] response_json_paths: $.meter: livestock $.metadata.breed: cow - name: list samples with limit url: /v2/samples?limit=1 response_json_paths: $[0].meter: livestock $[0].metadata.breed: cow $[-1].metadata.breed: cow - name: list zero samples with zero limit url: /v2/samples?limit=0 status: 400 - name: list samples with query url: /v2/samples?q.field=resource_metadata.breed&q.value=cow&q.op=eq response_json_paths: $[0].meter: livestock $[0].metadata.breed: cow $[-1].metadata.breed: cow - name: query by user url: /v2/samples?q.field=user&q.value=$RESPONSE['$[0].user_id']&q.op=eq response_json_paths: $[0].user_id: $RESPONSE['$[0].user_id'] - name: query by user_id url: /v2/samples?q.field=user_id&q.value=$RESPONSE['$[0].user_id']&q.op=eq response_json_paths: $[0].user_id: $RESPONSE['$[0].user_id'] - name: query by project url: /v2/samples?q.field=project&q.value=$RESPONSE['$[0].project_id']&q.op=eq response_json_paths: $[0].project_id: $RESPONSE['$[0].project_id'] - name: query by project_id url: /v2/samples?q.field=project_id&q.value=$RESPONSE['$[0].project_id']&q.op=eq response_json_paths: $[0].project_id: $RESPONSE['$[0].project_id'] # Explore failure modes for listing samples - name: list samples with bad field url: /v2/samples?q.field=harpoon&q.value=cow&q.op=eq status: 400 response_strings: - timestamp - project - unrecognized field in query - name: list samples with bad metaquery field url: /v2/samples?q.field=metaquery&q.value=cow&q.op=eq status: 400 response_strings: - unrecognized field in query - name: bad limit value url: /v2/samples?limit=happiness status: 400 response_strings: - Invalid input for field/attribute limit - name: negative limit value 400 url: /v2/samples?limit=-99 status: 400 - name: negative limit value error message url: /v2/samples?limit=-99 status: 400 response_headers: content-type: /application/json/ response_json_paths: $.error_message.faultstring: Limit must be positive - name: bad accept desc: try an unexpected content type url: /v2/samples request_headers: accept: text/plain status: 406 - name: complex good accept desc: client sends complex accept do we adapt url: /v2/samples request_headers: accept: text/plain, application/json; q=0.8 - name: complex bad accept desc: client sends complex accept do we adapt url: /v2/samples request_headers: accept: text/plain, application/binary; q=0.8 status: 406 - name: bad method url: /v2/samples method: POST status: 405 response_headers: allow: GET # Work with just one sample. - name: list one of the samples url: /v2/samples?limit=1 - name: retrieve one sample url: /v2/samples/$RESPONSE['$[0].id'] response_headers: content-type: /application/json/ response_json_paths: $.meter: livestock - name: retrieve sample with useless query url: /v2/samples/$RESPONSE['$.id']?limit=5 status: 400 response_strings: - "Unknown argument:" - name: attempt missing sample url: /v2/samples/davesnothere status: 404 response_headers: content-type: /application/json/ response_json_paths: $.error_message.faultstring: Sample davesnothere Not Found ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/api_events_no_data.yaml0000664000567000056710000001411512701406223031626 0ustar jenkinsjenkins00000000000000# These test run against the Events API with no data preloaded into the # datastore. This allows us to verify that requests are still processed # normally even if data is missing for that endpoint. fixtures: - ConfigFixture tests: # this attempts to get all the events and expects an empty list back - name: get all events url: /v2/events request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to get all the events with no role/user/project # info in header and expects a 403 - name: get events with bad headers url: /v2/events status: 403 # this attempts to get all the events with no user/project # info in header and expects a 403 - name: get events with admin only header url: /v2/events request_headers: X-Roles: admin status: 403 # this attempts to get all the events with no project # info in header and expects a 403 - name: get events with no project header url: /v2/events request_headers: X-Roles: admin X-User-Id: user1 status: 403 # this attempts to get all the events with no user # info in header and expects a 403 - name: get events with no user header url: /v2/events request_headers: X-Roles: admin X-Project-Id: project1 status: 403 # this attempts to get all the events with invalid parameters and expects a 400 - name: get events with bad params url: /v2/events?bad_Stuff_here request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 400 # this attempts to query the events with the correct parameterized query syntax # and expects an empty list - name: get events that match query url: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the correct data query syntax and # expects an empty list - name: get events that match query via request data url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: event_type op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the correct parameterized query syntax # but a bad field name and expects an empty list - name: get events that match bad query url: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the correct data query syntax and # a bad field name and expects an empty list - name: get events that match bad query via request data url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: bad_field op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # this attempts to query the events with the wrong data query syntax missing the # q object but supplying the field list and a bad field name and expects a 400 - name: get events that match bad query via request data malformed list url: /v2/events request_headers: content-type: application/json; charset=UTF-8 X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: - field: bad_field op: eq type: string value: cookies_chocolate.chip status: 400 # this attempts to query the events with the wrong data query syntax missing the # q object but supplying the field list along with a bad content-type. Should # return a 400 - name: get events that match bad query via request data wrong type url: /v2/events request_headers: content-type: text/plain X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: "field: bad_field op: eq type: string value: cookies_chocolate.chip xfail: True" status: 415 # Get a single event by message_id no data is present so should return a 404 - name: get a single event url: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all the event types should return an empty list - name: get all event types url: /v2/event_types request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # Get a single event type by name, this API is unused and should return a 404 - name: get event types for good event_type unused api url: /v2/event_types/cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all traits for an event type should return an empty list - name: get all traits for event type url: /v2/event_types/cookies_chocolate.chip/traits request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" # Get all traits named ate for an event type should return an empty list - name: get all traits named ate for event type url: /v2/event_types/cookies_chocolate.chip/traits/ate request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json; charset=UTF-8 response_strings: - "[]" ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml0000664000567000056710000000046012701406223030433 0ustar jenkinsjenkins00000000000000# # Explore the capabilities API # fixtures: - ConfigFixture tests: - name: get capabilities desc: retrieve capabilities for the mongo store url: /v2/capabilities response_json_paths: $.event_storage.['storage:production_ready']: true $.storage.['storage:production_ready']: true ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml0000664000567000056710000000477412701406223031660 0ustar jenkinsjenkins00000000000000# # Explore and cover resources API with gabbi tests when there are a # small number of pre-existing resources # fixtures: - ConfigFixture - SampleDataFixture tests: - name: list all resources url: /v2/resources response_json_paths: $[0].user_id: farmerjon $[0].links[1].rel: livestock - name: get one resource desc: get a resource via the links in the first resource listed above url: $RESPONSE['$[0].links[0].href'] response_json_paths: $.resource_id: $RESPONSE['$[0].resource_id'] - name: list resources limit user_id url: /v2/resources?q.field=user_id&q.value=farmerjon response_json_paths: $[0].user_id: farmerjon $[0].links[1].rel: livestock - name: list resources limit metadata url: /v2/resources?q.field=metadata.breed&q.value=sheep response_json_paths: $[0].user_id: farmerjon $[0].links[1].rel: livestock - name: list resources limit metadata no match url: /v2/resources?q.field=metadata.breed&q.value=llamma response_strings: - "[]" - name: fail to get one resource url: /v2/resources/nosirnothere status: 404 - name: list resource meter links present url: /v2/resources?meter_links=1 response_json_paths: $[0].links[0].rel: self $[0].links[1].rel: livestock $[0].links[-1].rel: livestock - name: list resource meter links not present url: /v2/resources?meter_links=0 desc: there is only one links entry when meter_links is 0 response_json_paths: $[0].links[0].rel: self $[0].links[-1].rel: self # limit resource results - name: get resources ulimited url: /v2/resources response_json_paths: $.`len`: 1 - name: get resources limited url: /v2/resources?limit=1 response_json_paths: $.`len`: 1 - name: get resources double limit url: /v2/resources?limit=1&limit=1 status: 400 - name: get resources filter limit desc: expressing limit this way is now disallowed url: /v2/resources?q.field=limit&q.op=eq&q.type=&q.value=1 status: 400 response_strings: - 'Unknown argument: \"limit\": unrecognized field in query' - name: get resources filter limit and limit url: /v2/resources?q.field=limit&q.op=eq&q.type=&q.value=1&limit=1 status: 400 response_strings: - 'Unknown argument: \"limit\": unrecognized field in query' ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml0000664000567000056710000000620012701406223030524 0ustar jenkinsjenkins00000000000000# Post a simple sample, sir, and the retrieve it in various ways. fixtures: - ConfigFixture tests: # POST one sample and verify its existence. - name: post sample for meter desc: post a single sample url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: | [ { "counter_name": "apples", "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", "counter_unit": "instance", "counter_volume": 1, "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", "resource_metadata": { "name2": "value2", "name1": "value1" }, "counter_type": "gauge" } ] response_json_paths: $.[0].counter_name: apples status: 201 response_headers: content-type: application/json; charset=UTF-8 # When POSTing a sample perhaps we should get back a location header # with the URI of the posted sample - name: post a sample expect location desc: https://bugs.launchpad.net/ceilometer/+bug/1426426 xfail: true url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: - counter_name: apples project_id: 35b17138-b364-4e6a-a131-8f3099c5be68 user_id: efd87807-12d2-4b38-9c70-5f5c2ac427ff counter_unit: instance counter_volume: 1 resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 resource_metadata: name2: value2 name1: value1 counter_type: gauge response_headers: location: /$SCHEME://$NETLOC/ # GET all the samples created for the apples meter - name: get samples for meter desc: get all the samples at that meter url: /v2/meters/apples response_json_paths: $.[0].counter_name: apples $.[0].counter_volume: 1 $.[0].resource_metadata.name2: value2 # POSTing a sample to a meter will implicitly create a resource - name: get resources desc: get the resources that exist because of the sample url: /v2/resources response_json_paths: $.[0].metadata.name2: value2 # NOTE(chdent): We assume that the first item in links is self. # Need to determine how to express the more correct JSONPath here # (if possible). - name: get resource desc: get just one of those resources via self url: $RESPONSE['$[0].links[0].href'] response_json_paths: $.metadata.name2: value2 # GET the created samples - name: get samples desc: get all the created samples url: /v2/samples response_json_paths: $.[0].metadata.name2: value2 $.[0].meter: apples - name: get one sample desc: get the one sample that exists url: /v2/samples/$RESPONSE['$[0].id'] response_json_paths: $.metadata.name2: value2 $.meter: apples ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbi_paste.ini0000664000567000056710000000143012701406223026442 0ustar jenkinsjenkins00000000000000# Ceilometer API WSGI Pipeline # Define the filters that make up the pipeline for processing WSGI requests # Note: This pipeline is PasteDeploy's term rather than Ceilometer's pipeline # used for processing samples # # This version is specific for gabbi. It removes support for keystone while # keeping suport for CORS. # Remove authtoken from the pipeline if you don't want to use keystone authentication [pipeline:main] pipeline = cors api-server [app:api-server] paste.app_factory = ceilometer.api.app:app_factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = ceilometer ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits_prefix/0000775000567000056710000000000012701406364026501 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml0000664000567000056710000000102212701406223030433 0ustar jenkinsjenkins00000000000000# # Confirm root reports the right data including a prefixed URL # fixtures: - ConfigFixture tests: # Root gives us some information on where to go from here. - name: quick root check url: / response_headers: content-type: application/json; charset=UTF-8 response_strings: - '"base": "application/json"' response_json_paths: versions.values.[0].status: stable versions.values.[0].media-types.[0].base: application/json response_strings: - /telemetry/ ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml0000664000567000056710000000114412701406223033221 0ustar jenkinsjenkins00000000000000# # Explore and cover resources API with gabbi tests when there are a # small number of pre-existing resources # fixtures: - ConfigFixture - SampleDataFixture tests: - name: list all resources url: /v2/resources response_json_paths: $[0].user_id: farmerjon $[0].links[1].rel: livestock response_strings: - /telemetry/ - name: get one resource desc: get a resource via the links in the first resource listed above url: $RESPONSE['$[0].links[0].href'] response_json_paths: $.resource_id: $RESPONSE['$[0].resource_id'] ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml0000664000567000056710000000273112701406223032106 0ustar jenkinsjenkins00000000000000# Post a simple sample and confirm the created resource has # reasonable URLs fixtures: - ConfigFixture tests: # POST one sample and verify its existence. - name: post sample for meter desc: post a single sample url: /v2/meters/apples?direct=True method: POST request_headers: content-type: application/json data: | [ { "counter_name": "apples", "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", "counter_unit": "instance", "counter_volume": 1, "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", "resource_metadata": { "name2": "value2", "name1": "value1" }, "counter_type": "gauge" } ] response_json_paths: $.[0].counter_name: apples status: 201 response_headers: content-type: application/json; charset=UTF-8 - name: get resources desc: get the resources that exist because of the sample url: /v2/resources response_json_paths: $.[0].metadata.name2: value2 - name: get resource desc: get just one of those resources via self url: $RESPONSE['$[0].links[0].href'] response_json_paths: $.metadata.name2: value2 response_strings: - /telemetry/ ceilometer-6.0.0/ceilometer/tests/functional/gabbi/__init__.py0000664000567000056710000000000012701406223025602 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml0000664000567000056710000000067412701406223027327 0ustar jenkinsjenkins00000000000000# A limited pipeline for use with the Gabbi spike. # direct writes to the the metering database without using an # intermediary dispatcher. # # This is one of several things that will need some extensive # tidying to be more right. --- sources: - name: meter_source interval: 1 meters: - "*" sinks: - meter_sink sinks: - name: meter_sink transformers: publishers: - direct:// ceilometer-6.0.0/ceilometer/tests/functional/gabbi/test_gabbi.py0000664000567000056710000000226512701406223026165 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A test module to exercise the Ceilometer API with gabbi For the sake of exploratory development. """ import os from gabbi import driver from ceilometer.api import app from ceilometer.tests.functional.gabbi import fixtures as fixture_module TESTS_DIR = 'gabbits' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host=None, intercept=app.load_app, fixture_module=fixture_module) ceilometer-6.0.0/ceilometer/tests/functional/gabbi/test_gabbi_prefix.py0000664000567000056710000000232212701406223027534 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A module to exercise the Ceilometer API with gabbi with a URL prefix""" import os from gabbi import driver from ceilometer.api import app from ceilometer.tests.functional.gabbi import fixtures as fixture_module TESTS_DIR = 'gabbits_prefix' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host=None, prefix='/telemetry', intercept=app.setup_app, fixture_module=fixture_module) ceilometer-6.0.0/ceilometer/tests/functional/gabbi/fixtures.py0000664000567000056710000001606612701406223025737 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures used during Gabbi-based test runs.""" import datetime import os import random from unittest import case import uuid from gabbi import fixture from oslo_config import cfg from oslo_config import fixture as fixture_config from oslo_policy import opts from oslo_utils import fileutils import six from six.moves.urllib import parse as urlparse from ceilometer.event.storage import models from ceilometer.publisher import utils from ceilometer import sample from ceilometer import storage # TODO(chdent): For now only MongoDB is supported, because of easy # database name handling and intentional focus on the API, not the # data store. ENGINES = ['mongodb'] class ConfigFixture(fixture.GabbiFixture): """Establish the relevant configuration for a test run.""" def start_fixture(self): """Set up config.""" self.conf = None # Determine the database connection. db_url = os.environ.get('OVERTEST_URL', "sqlite://").replace( "mysql://", "mysql+pymysql://") if not db_url: raise case.SkipTest('No database connection configured') engine = urlparse.urlparse(db_url).scheme if engine not in ENGINES: raise case.SkipTest('Database engine not supported') conf = fixture_config.Config().conf self.conf = conf self.conf([], project='ceilometer', validate_default_values=True) opts.set_defaults(self.conf) conf.import_group('api', 'ceilometer.api.controllers.v2.root') conf.import_opt('store_events', 'ceilometer.notification', group='notification') content = ('{"default": ""}') if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='policy', suffix='.json') conf.set_override("policy_file", self.tempfile, group='oslo_policy') conf.set_override( 'api_paste_config', os.path.abspath( 'ceilometer/tests/functional/gabbi/gabbi_paste.ini') ) # A special pipeline is required to use the direct publisher. conf.set_override('pipeline_cfg_file', 'ceilometer/tests/functional/gabbi_pipeline.yaml') database_name = '%s-%s' % (db_url, str(uuid.uuid4())) conf.set_override('connection', database_name, group='database') conf.set_override('metering_connection', '', group='database') conf.set_override('event_connection', '', group='database') conf.set_override('pecan_debug', True, group='api') conf.set_override('gnocchi_is_enabled', False, group='api') conf.set_override('aodh_is_enabled', False, group='api') conf.set_override('store_events', True, group='notification') def stop_fixture(self): """Reset the config and remove data.""" if self.conf: storage.get_connection_from_config(self.conf).clear() self.conf.reset() class SampleDataFixture(fixture.GabbiFixture): """Instantiate some sample data for use in testing.""" def start_fixture(self): """Create some samples.""" conf = fixture_config.Config().conf self.conn = storage.get_connection_from_config(conf) timestamp = datetime.datetime.utcnow() project_id = str(uuid.uuid4()) self.source = str(uuid.uuid4()) resource_metadata = {'farmed_by': 'nancy'} for name in ['cow', 'pig', 'sheep']: resource_metadata.update({'breed': name}), c = sample.Sample(name='livestock', type='gauge', unit='head', volume=int(10 * random.random()), user_id='farmerjon', project_id=project_id, resource_id=project_id, timestamp=timestamp, resource_metadata=resource_metadata, source=self.source) data = utils.meter_message_from_counter( c, conf.publisher.telemetry_secret) self.conn.record_metering_data(data) def stop_fixture(self): """Destroy the samples.""" # NOTE(chdent): print here for sake of info during testing. # This will go away eventually. print('resource', self.conn.db.resource.remove({'source': self.source})) print('meter', self.conn.db.meter.remove({'source': self.source})) class EventDataFixture(fixture.GabbiFixture): """Instantiate some sample event data for use in testing.""" def start_fixture(self): """Create some events.""" conf = fixture_config.Config().conf self.conn = storage.get_connection_from_config(conf, 'event') events = [] name_list = ['chocolate.chip', 'peanut.butter', 'sugar'] for ix, name in enumerate(name_list): timestamp = datetime.datetime.utcnow() message_id = 'fea1b15a-1d47-4175-85a5-a4bb2c72924{}'.format(ix) traits = [models.Trait('type', 1, name), models.Trait('ate', 2, ix)] event = models.Event(message_id, 'cookies_{}'.format(name), timestamp, traits, {'nested': {'inside': 'value'}}) events.append(event) self.conn.record_events(events) def stop_fixture(self): """Destroy the events.""" self.conn.db.event.remove({'event_type': '/^cookies_/'}) class CORSConfigFixture(fixture.GabbiFixture): """Inject mock configuration for the CORS middleware.""" def start_fixture(self): # Here we monkeypatch GroupAttr.__getattr__, necessary because the # paste.ini method of initializing this middleware creates its own # ConfigOpts instance, bypassing the regular config fixture. def _mock_getattr(instance, key): if key != 'allowed_origin': return self._original_call_method(instance, key) return "http://valid.example.com" self._original_call_method = cfg.ConfigOpts.GroupAttr.__getattr__ cfg.ConfigOpts.GroupAttr.__getattr__ = _mock_getattr def stop_fixture(self): """Remove the monkeypatch.""" cfg.ConfigOpts.GroupAttr.__getattr__ = self._original_call_method ceilometer-6.0.0/ceilometer/tests/functional/test_bin.py0000664000567000056710000001533312701406223024625 0ustar jenkinsjenkins00000000000000# Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess from oslo_utils import fileutils import six from ceilometer.tests import base class BinTestCase(base.BaseTestCase): def setUp(self): super(BinTestCase, self).setUp() content = ("[DEFAULT]\n" "rpc_backend=fake\n" "[database]\n" "connection=log://localhost\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') def tearDown(self): super(BinTestCase, self).tearDown() os.remove(self.tempfile) def test_dbsync_run(self): subp = subprocess.Popen(['ceilometer-dbsync', "--config-file=%s" % self.tempfile]) self.assertEqual(0, subp.wait()) def test_run_expirer_ttl_disabled(self): subp = subprocess.Popen(['ceilometer-expirer', '-d', "--config-file=%s" % self.tempfile], stderr=subprocess.PIPE) __, err = subp.communicate() self.assertEqual(0, subp.poll()) self.assertIn(b"Nothing to clean, database metering " b"time to live is disabled", err) self.assertIn(b"Nothing to clean, database event " b"time to live is disabled", err) def _test_run_expirer_ttl_enabled(self, ttl_name, data_name): content = ("[DEFAULT]\n" "rpc_backend=fake\n" "[database]\n" "%s=1\n" "connection=log://localhost\n" % ttl_name) if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') subp = subprocess.Popen(['ceilometer-expirer', '-d', "--config-file=%s" % self.tempfile], stderr=subprocess.PIPE) __, err = subp.communicate() self.assertEqual(0, subp.poll()) msg = "Dropping %s data with TTL 1" % data_name if six.PY3: msg = msg.encode('utf-8') self.assertIn(msg, err) def test_run_expirer_ttl_enabled(self): self._test_run_expirer_ttl_enabled('metering_time_to_live', 'metering') self._test_run_expirer_ttl_enabled('time_to_live', 'metering') self._test_run_expirer_ttl_enabled('event_time_to_live', 'event') class BinSendSampleTestCase(base.BaseTestCase): def setUp(self): super(BinSendSampleTestCase, self).setUp() pipeline_cfg_file = self.path_get('etc/ceilometer/pipeline.yaml') content = ("[DEFAULT]\n" "rpc_backend=fake\n" "pipeline_cfg_file={0}\n".format(pipeline_cfg_file)) if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') def tearDown(self): super(BinSendSampleTestCase, self).tearDown() os.remove(self.tempfile) def test_send_counter_run(self): subp = subprocess.Popen(['ceilometer-send-sample', "--config-file=%s" % self.tempfile, "--sample-resource=someuuid", "--sample-name=mycounter"]) self.assertEqual(0, subp.wait()) class BinCeilometerPollingServiceTestCase(base.BaseTestCase): def setUp(self): super(BinCeilometerPollingServiceTestCase, self).setUp() self.tempfile = None self.subp = None def tearDown(self): if self.subp: try: self.subp.kill() except OSError: pass os.remove(self.tempfile) super(BinCeilometerPollingServiceTestCase, self).tearDown() def test_starting_with_duplication_namespaces(self): content = ("[DEFAULT]\n" "rpc_backend=fake\n" "[database]\n" "connection=log://localhost\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') self.subp = subprocess.Popen(['ceilometer-polling', "--config-file=%s" % self.tempfile, "--polling-namespaces", "compute", "compute"], stderr=subprocess.PIPE) out = self.subp.stderr.read(1024) self.assertIn(b'Duplicated values: [\'compute\', \'compute\'] ' b'found in CLI options, auto de-duplicated', out) def test_polling_namespaces_invalid_value_in_config(self): content = ("[DEFAULT]\n" "rpc_backend=fake\n" "polling_namespaces = ['central']\n" "[database]\n" "connection=log://localhost\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') self.subp = subprocess.Popen( ["ceilometer-polling", "--config-file=%s" % self.tempfile], stderr=subprocess.PIPE) __, err = self.subp.communicate() expected = ("Exception: Valid values are ['compute', 'central', " "'ipmi'], but found [\"['central']\"]") self.assertIn(expected, err) ceilometer-6.0.0/ceilometer/tests/functional/storage/0000775000567000056710000000000012701406364024111 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/storage/test_impl_log.py0000664000567000056710000000204612701406223027320 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_log.py """ from oslotest import base from ceilometer.storage import impl_log class ConnectionTest(base.BaseTestCase): @staticmethod def test_get_connection(): conn = impl_log.Connection(None) conn.record_metering_data({'counter_name': 'test', 'resource_id': __name__, 'counter_volume': 1, }) ceilometer-6.0.0/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py0000664000567000056710000002140212701406223030676 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_sqlalchemy.py .. note:: In order to run the tests against real SQL server set the environment variable CEILOMETER_TEST_SQL_URL to point to a SQL server before running the tests. """ import datetime import warnings import mock from oslo_db import exception from oslo_utils import timeutils from six.moves import reprlib from ceilometer.event.storage import impl_sqlalchemy as impl_sqla_event from ceilometer.event.storage import models from ceilometer.publisher import utils from ceilometer import sample from ceilometer.storage import impl_sqlalchemy from ceilometer.storage.sqlalchemy import models as sql_models from ceilometer.tests import base as test_base from ceilometer.tests import db as tests_db from ceilometer.tests.functional.storage \ import test_storage_scenarios as scenarios @tests_db.run_with('sqlite', 'mysql', 'pgsql') class CeilometerBaseTest(tests_db.TestBase): def test_ceilometer_base(self): base = sql_models.CeilometerBase() base['key'] = 'value' self.assertEqual('value', base['key']) @tests_db.run_with('sqlite') class EngineFacadeTest(tests_db.TestBase): @mock.patch.object(warnings, 'warn') def test_no_not_supported_warning(self, mocked): impl_sqlalchemy.Connection('sqlite://') impl_sqla_event.Connection('sqlite://') self.assertNotIn(mock.call(mock.ANY, exception.NotSupportedWarning), mocked.call_args_list) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class EventTypeTest(tests_db.TestBase): # EventType is a construct specific to sqlalchemy # Not applicable to other drivers. def test_event_type_exists(self): et1 = self.event_conn._get_or_create_event_type("foo") self.assertTrue(et1.id >= 0) et2 = self.event_conn._get_or_create_event_type("foo") self.assertEqual(et2.id, et1.id) self.assertEqual(et2.desc, et1.desc) def test_event_type_unique(self): et1 = self.event_conn._get_or_create_event_type("foo") self.assertTrue(et1.id >= 0) et2 = self.event_conn._get_or_create_event_type("blah") self.assertNotEqual(et1.id, et2.id) self.assertNotEqual(et1.desc, et2.desc) # Test the method __repr__ returns a string self.assertTrue(reprlib.repr(et2)) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class EventTest(tests_db.TestBase): def _verify_data(self, trait, trait_table): now = datetime.datetime.utcnow() ev = models.Event('1', 'name', now, [trait], {}) self.event_conn.record_events([ev]) session = self.event_conn._engine_facade.get_session() t_tables = [sql_models.TraitText, sql_models.TraitFloat, sql_models.TraitInt, sql_models.TraitDatetime] for table in t_tables: if table == trait_table: self.assertEqual(1, session.query(table).count()) else: self.assertEqual(0, session.query(table).count()) def test_string_traits(self): model = models.Trait("Foo", models.Trait.TEXT_TYPE, "my_text") self._verify_data(model, sql_models.TraitText) def test_int_traits(self): model = models.Trait("Foo", models.Trait.INT_TYPE, 100) self._verify_data(model, sql_models.TraitInt) def test_float_traits(self): model = models.Trait("Foo", models.Trait.FLOAT_TYPE, 123.456) self._verify_data(model, sql_models.TraitFloat) def test_datetime_traits(self): now = datetime.datetime.utcnow() model = models.Trait("Foo", models.Trait.DATETIME_TYPE, now) self._verify_data(model, sql_models.TraitDatetime) def test_event_repr(self): ev = sql_models.Event('msg_id', None, False, {}) ev.id = 100 self.assertTrue(reprlib.repr(ev)) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class RelationshipTest(scenarios.DBTestBase): # Note: Do not derive from SQLAlchemyEngineTestBase, since we # don't want to automatically inherit all the Meter setup. @mock.patch.object(timeutils, 'utcnow') def test_clear_metering_data_meta_tables(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) self.conn.clear_expired_metering_data(3 * 60) session = self.conn._engine_facade.get_session() self.assertEqual(5, session.query(sql_models.Sample).count()) resource_ids = (session.query(sql_models.Resource.internal_id) .group_by(sql_models.Resource.internal_id)) meta_tables = [sql_models.MetaText, sql_models.MetaFloat, sql_models.MetaBigInt, sql_models.MetaBool] s = set() for table in meta_tables: self.assertEqual(0, (session.query(table) .filter(~table.id.in_(resource_ids)).count() )) s.update(session.query(table.id).all()) self.assertEqual(set(resource_ids.all()), s) class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver def test_capabilities(self): expected_capabilities = { 'meters': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'resources': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True, 'complex': False}, 'aggregation': {'standard': True, 'selectable': { 'max': True, 'min': True, 'sum': True, 'avg': True, 'count': True, 'stddev': True, 'cardinality': True}} }, } actual_capabilities = impl_sqlalchemy.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = impl_sqla_event.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_storage_capabilities(self): expected_capabilities = { 'storage': {'production_ready': True}, } actual_capabilities = (impl_sqlalchemy. Connection.get_storage_capabilities()) self.assertEqual(expected_capabilities, actual_capabilities) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class FilterQueryTestForMeters(scenarios.DBTestBase): def prepare_data(self): self.counters = [] c = sample.Sample( 'volume.size', 'gauge', 'GiB', 5, user_id=None, project_id=None, resource_id='fake_id', timestamp=datetime.datetime(2012, 9, 25, 10, 30), resource_metadata={'display_name': 'test-volume', 'tag': 'self.counter', }, source='test', ) self.counters.append(c) msg = utils.meter_message_from_counter( c, secret='not-so-secret') self.conn.record_metering_data(msg) def test_get_meters_by_user(self): meters = list(self.conn.get_meters(user='None')) self.assertEqual(1, len(meters)) def test_get_meters_by_project(self): meters = list(self.conn.get_meters(project='None')) self.assertEqual(1, len(meters)) ceilometer-6.0.0/ceilometer/tests/functional/storage/test_impl_mongodb.py0000664000567000056710000001262112701406223030164 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_mongodb.py .. note:: In order to run the tests against another MongoDB server set the environment variable CEILOMETER_TEST_MONGODB_URL to point to a MongoDB server before running the tests. """ from ceilometer.event.storage import impl_mongodb as impl_mongodb_event from ceilometer.storage import impl_mongodb from ceilometer.tests import base as test_base from ceilometer.tests import db as tests_db @tests_db.run_with('mongodb') class MongoDBConnection(tests_db.TestBase): def test_connection_pooling(self): test_conn = impl_mongodb.Connection(self.db_manager.url) self.assertEqual(self.conn.conn, test_conn.conn) def test_replica_set(self): url = self.db_manager._url + '?replicaSet=foobar' conn = impl_mongodb.Connection(url) self.assertTrue(conn.conn) @tests_db.run_with('mongodb') class IndexTest(tests_db.TestBase): def _test_ttl_index_absent(self, conn, coll_name, ttl_opt): # create a fake index and check it is deleted coll = getattr(conn.db, coll_name) index_name = '%s_ttl' % coll_name self.CONF.set_override(ttl_opt, -1, group='database') conn.upgrade() self.assertNotIn(index_name, coll.index_information()) self.CONF.set_override(ttl_opt, 456789, group='database') conn.upgrade() self.assertEqual(456789, coll.index_information() [index_name]['expireAfterSeconds']) def test_meter_ttl_index_absent(self): self._test_ttl_index_absent(self.conn, 'meter', 'metering_time_to_live') def test_event_ttl_index_absent(self): self._test_ttl_index_absent(self.event_conn, 'event', 'event_time_to_live') def _test_ttl_index_present(self, conn, coll_name, ttl_opt): coll = getattr(conn.db, coll_name) self.CONF.set_override(ttl_opt, 456789, group='database') conn.upgrade() index_name = '%s_ttl' % coll_name self.assertEqual(456789, coll.index_information() [index_name]['expireAfterSeconds']) self.CONF.set_override(ttl_opt, -1, group='database') conn.upgrade() self.assertNotIn(index_name, coll.index_information()) def test_meter_ttl_index_present(self): self._test_ttl_index_present(self.conn, 'meter', 'metering_time_to_live') def test_event_ttl_index_present(self): self._test_ttl_index_present(self.event_conn, 'event', 'event_time_to_live') class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver def test_capabilities(self): expected_capabilities = { 'meters': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'resources': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True, 'complex': False}, 'aggregation': {'standard': True, 'selectable': { 'max': True, 'min': True, 'sum': True, 'avg': True, 'count': True, 'stddev': True, 'cardinality': True}} }, } actual_capabilities = impl_mongodb.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = impl_mongodb_event.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_storage_capabilities(self): expected_capabilities = { 'storage': {'production_ready': True}, } actual_capabilities = (impl_mongodb.Connection. get_storage_capabilities()) self.assertEqual(expected_capabilities, actual_capabilities) ceilometer-6.0.0/ceilometer/tests/functional/storage/test_storage_scenarios.py0000664000567000056710000044105212701406223031234 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base classes for DB backend implementation test """ import datetime import operator import mock from oslo_config import cfg from oslo_db import api from oslo_db import exception as dbexc from oslo_utils import timeutils import pymongo import ceilometer from ceilometer.event.storage import models as event_models from ceilometer.publisher import utils from ceilometer import sample from ceilometer import storage from ceilometer.tests import db as tests_db class DBTestBase(tests_db.TestBase): @staticmethod def create_side_effect(method, exception_type, test_exception): def side_effect(*args, **kwargs): if test_exception.pop(): raise exception_type else: return method(*args, **kwargs) return side_effect def create_and_store_sample(self, timestamp=datetime.datetime.utcnow(), metadata=None, name='instance', sample_type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='user-id', project_id='project-id', resource_id='resource-id', source=None): metadata = metadata or {'display_name': 'test-server', 'tag': 'self.counter'} s = sample.Sample( name, sample_type, unit=unit, volume=volume, user_id=user_id, project_id=project_id, resource_id=resource_id, timestamp=timestamp, resource_metadata=metadata, source=source ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret ) self.conn.record_metering_data(msg) return msg def setUp(self): super(DBTestBase, self).setUp() patcher = mock.patch.object(timeutils, 'utcnow') self.addCleanup(patcher.stop) self.mock_utcnow = patcher.start() self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39) self.prepare_data() def prepare_data(self): original_timestamps = [(2012, 7, 2, 10, 40), (2012, 7, 2, 10, 41), (2012, 7, 2, 10, 41), (2012, 7, 2, 10, 42), (2012, 7, 2, 10, 43)] timestamps_for_test_samples_default_order = [(2012, 7, 2, 10, 44), (2011, 5, 30, 18, 3), (2012, 12, 1, 1, 25), (2012, 2, 29, 6, 59), (2013, 5, 31, 23, 7)] timestamp_list = (original_timestamps + timestamps_for_test_samples_default_order) self.msgs = [] self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(2012, 7, 2, 10, 39), source='test-1') ) self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(*timestamp_list[0]), source='test-1') ) self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(*timestamp_list[1]), resource_id='resource-id-alternate', metadata={'display_name': 'test-server', 'tag': 'self.counter2'}, source='test-2') ) self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(*timestamp_list[2]), resource_id='resource-id-alternate', user_id='user-id-alternate', metadata={'display_name': 'test-server', 'tag': 'self.counter3'}, source='test-3') ) start_idx = 3 end_idx = len(timestamp_list) for i, ts in zip(range(start_idx - 1, end_idx - 1), timestamp_list[start_idx:end_idx]): self.msgs.append( self.create_and_store_sample( timestamp=datetime.datetime(*ts), user_id='user-id-%s' % i, project_id='project-id-%s' % i, resource_id='resource-id-%s' % i, metadata={ 'display_name': 'test-server', 'tag': 'counter-%s' % i }, source='test') ) class ResourceTest(DBTestBase): def prepare_data(self): super(ResourceTest, self).prepare_data() self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(2012, 7, 2, 10, 39), user_id='mongodb_test', resource_id='resource-id-mongo_bad_key', project_id='project-id-test', metadata={'display.name': {'name.$1': 'test-server1', '$name_2': 'test-server2'}, 'tag': 'self.counter'}, source='test-4' )) def test_get_resources(self): expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) expected_last_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 40) msgs_sources = [msg['source'] for msg in self.msgs] resources = list(self.conn.get_resources()) self.assertEqual(10, len(resources)) for resource in resources: if resource.resource_id != 'resource-id': continue self.assertEqual(expected_first_sample_timestamp, resource.first_sample_timestamp) self.assertEqual(expected_last_sample_timestamp, resource.last_sample_timestamp) self.assertEqual('resource-id', resource.resource_id) self.assertEqual('project-id', resource.project_id) self.assertIn(resource.source, msgs_sources) self.assertEqual('user-id', resource.user_id) self.assertEqual('test-server', resource.metadata['display_name']) break else: self.fail('Never found resource-id') def test_get_resources_start_timestamp(self): timestamp = datetime.datetime(2012, 7, 2, 10, 42) expected = set(['resource-id-2', 'resource-id-3', 'resource-id-4', 'resource-id-6', 'resource-id-8']) resources = list(self.conn.get_resources(start_timestamp=timestamp)) resource_ids = [r.resource_id for r in resources] self.assertEqual(expected, set(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=timestamp, start_timestamp_op='ge')) resource_ids = [r.resource_id for r in resources] self.assertEqual(expected, set(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=timestamp, start_timestamp_op='gt')) resource_ids = [r.resource_id for r in resources] expected.remove('resource-id-2') self.assertEqual(expected, set(resource_ids)) def test_get_resources_end_timestamp(self): timestamp = datetime.datetime(2012, 7, 2, 10, 42) expected = set(['resource-id', 'resource-id-alternate', 'resource-id-5', 'resource-id-7', 'resource-id-mongo_bad_key']) resources = list(self.conn.get_resources(end_timestamp=timestamp)) resource_ids = [r.resource_id for r in resources] self.assertEqual(expected, set(resource_ids)) resources = list(self.conn.get_resources(end_timestamp=timestamp, end_timestamp_op='lt')) resource_ids = [r.resource_id for r in resources] self.assertEqual(expected, set(resource_ids)) resources = list(self.conn.get_resources(end_timestamp=timestamp, end_timestamp_op='le')) resource_ids = [r.resource_id for r in resources] expected.add('resource-id-2') self.assertEqual(expected, set(resource_ids)) def test_get_resources_both_timestamps(self): start_ts = datetime.datetime(2012, 7, 2, 10, 42) end_ts = datetime.datetime(2012, 7, 2, 10, 43) resources = list(self.conn.get_resources(start_timestamp=start_ts, end_timestamp=end_ts)) resource_ids = [r.resource_id for r in resources] self.assertEqual(set(['resource-id-2']), set(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=start_ts, end_timestamp=end_ts, start_timestamp_op='ge', end_timestamp_op='lt')) resource_ids = [r.resource_id for r in resources] self.assertEqual(set(['resource-id-2']), set(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=start_ts, end_timestamp=end_ts, start_timestamp_op='gt', end_timestamp_op='lt')) resource_ids = [r.resource_id for r in resources] self.assertEqual(0, len(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=start_ts, end_timestamp=end_ts, start_timestamp_op='gt', end_timestamp_op='le')) resource_ids = [r.resource_id for r in resources] self.assertEqual(set(['resource-id-3']), set(resource_ids)) resources = list(self.conn.get_resources(start_timestamp=start_ts, end_timestamp=end_ts, start_timestamp_op='ge', end_timestamp_op='le')) resource_ids = [r.resource_id for r in resources] self.assertEqual(set(['resource-id-2', 'resource-id-3']), set(resource_ids)) def test_get_resources_by_source(self): resources = list(self.conn.get_resources(source='test-1')) self.assertEqual(1, len(resources)) ids = set(r.resource_id for r in resources) self.assertEqual(set(['resource-id']), ids) def test_get_resources_by_user(self): resources = list(self.conn.get_resources(user='user-id')) self.assertTrue(len(resources) == 2 or len(resources) == 1) ids = set(r.resource_id for r in resources) # tolerate storage driver only reporting latest owner of resource resources_ever_owned_by = set(['resource-id', 'resource-id-alternate']) resources_now_owned_by = set(['resource-id']) self.assertTrue(ids == resources_ever_owned_by or ids == resources_now_owned_by, 'unexpected resources: %s' % ids) def test_get_resources_by_alternate_user(self): resources = list(self.conn.get_resources(user='user-id-alternate')) self.assertEqual(1, len(resources)) # only a single resource owned by this user ever self.assertEqual('resource-id-alternate', resources[0].resource_id) def test_get_resources_by_project(self): resources = list(self.conn.get_resources(project='project-id')) self.assertEqual(2, len(resources)) ids = set(r.resource_id for r in resources) self.assertEqual(set(['resource-id', 'resource-id-alternate']), ids) def test_get_resources_by_metaquery(self): q = {'metadata.display_name': 'test-server'} resources = list(self.conn.get_resources(metaquery=q)) self.assertEqual(9, len(resources)) def test_get_resources_by_metaquery_key_with_dot_in_metadata(self): q = {'metadata.display.name.$name_2': 'test-server2', 'metadata.display.name.name.$1': 'test-server1'} resources = list(self.conn.get_resources(metaquery=q)) self.assertEqual(1, len(resources)) def test_get_resources_by_empty_metaquery(self): resources = list(self.conn.get_resources(metaquery={})) self.assertEqual(10, len(resources)) def test_get_resources_most_recent_metadata_all(self): resources = self.conn.get_resources() expected_tags = ['self.counter', 'self.counter3', 'counter-2', 'counter-3', 'counter-4', 'counter-5', 'counter-6', 'counter-7', 'counter-8'] for resource in resources: self.assertIn(resource.metadata['tag'], expected_tags) def test_get_resources_most_recent_metadata_single(self): resource = list( self.conn.get_resources(resource='resource-id-alternate') )[0] expected_tag = 'self.counter3' self.assertEqual(expected_tag, resource.metadata['tag']) class ResourceTestOrdering(DBTestBase): def prepare_data(self): sample_timings = [('resource-id-1', [(2013, 8, 10, 10, 43), (2013, 8, 10, 10, 44), (2013, 8, 10, 10, 42), (2013, 8, 10, 10, 49), (2013, 8, 10, 10, 47)]), ('resource-id-2', [(2013, 8, 10, 10, 43), (2013, 8, 10, 10, 48), (2013, 8, 10, 10, 42), (2013, 8, 10, 10, 48), (2013, 8, 10, 10, 47)]), ('resource-id-3', [(2013, 8, 10, 10, 43), (2013, 8, 10, 10, 44), (2013, 8, 10, 10, 50), (2013, 8, 10, 10, 49), (2013, 8, 10, 10, 47)])] counter = 0 for resource, timestamps in sample_timings: for timestamp in timestamps: self.create_and_store_sample( timestamp=datetime.datetime(*timestamp), resource_id=resource, user_id=str(counter % 2), project_id=str(counter % 3), metadata={ 'display_name': 'test-server', 'tag': 'sample-%s' % counter }, source='test' ) counter += 1 def test_get_resources_ordering_all(self): resources = list(self.conn.get_resources()) expected = set([ ('resource-id-1', 'sample-3'), ('resource-id-2', 'sample-8'), ('resource-id-3', 'sample-12') ]) received = set([(r.resource_id, r.metadata['tag']) for r in resources]) self.assertEqual(expected, received) def test_get_resources_ordering_single(self): resource = list(self.conn.get_resources(resource='resource-id-2'))[0] self.assertEqual('resource-id-2', resource.resource_id) self.assertEqual('sample-8', resource.metadata['tag']) class MeterTest(DBTestBase): def test_get_meters(self): msgs_sources = [msg['source'] for msg in self.msgs] results = list(self.conn.get_meters()) self.assertEqual(9, len(results)) for meter in results: self.assertIn(meter.source, msgs_sources) def test_get_meters_by_user(self): results = list(self.conn.get_meters(user='user-id')) self.assertEqual(1, len(results)) def test_get_meters_by_project(self): results = list(self.conn.get_meters(project='project-id')) self.assertEqual(2, len(results)) def test_get_meters_by_metaquery(self): q = {'metadata.display_name': 'test-server'} results = list(self.conn.get_meters(metaquery=q)) self.assertIsNotEmpty(results) self.assertEqual(9, len(results)) def test_get_meters_by_empty_metaquery(self): results = list(self.conn.get_meters(metaquery={})) self.assertEqual(9, len(results)) class RawSampleTest(DBTestBase): def prepare_data(self): super(RawSampleTest, self).prepare_data() self.msgs.append(self.create_and_store_sample( timestamp=datetime.datetime(2012, 7, 2, 10, 39), user_id='mongodb_test', resource_id='resource-id-mongo_bad_key', project_id='project-id-test', metadata={'display.name': {'name.$1': 'test-server1', '$name_2': 'test-server2'}, 'tag': 'self.counter'}, source='test-4' )) def test_get_sample_counter_volume(self): # NOTE(idegtiarov) Because wsme expected a float type of data this test # checks type of counter_volume received from database. f = storage.SampleFilter() result = next(self.conn.get_samples(f, limit=1)) self.assertIsInstance(result.counter_volume, float) def test_get_samples_limit_zero(self): f = storage.SampleFilter() results = list(self.conn.get_samples(f, limit=0)) self.assertEqual(0, len(results)) def test_get_samples_limit(self): f = storage.SampleFilter() results = list(self.conn.get_samples(f, limit=3)) self.assertEqual(3, len(results)) for result in results: self.assertTimestampEqual(timeutils.utcnow(), result.recorded_at) def test_get_samples_in_default_order(self): f = storage.SampleFilter() prev_timestamp = None for sample_item in self.conn.get_samples(f): if prev_timestamp is not None: self.assertTrue(prev_timestamp >= sample_item.timestamp) prev_timestamp = sample_item.timestamp def test_get_samples_by_user(self): f = storage.SampleFilter(user='user-id') results = list(self.conn.get_samples(f)) self.assertEqual(3, len(results)) for meter in results: d = meter.as_dict() self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) del d['recorded_at'] self.assertIn(d, self.msgs[:3]) def test_get_samples_by_user_limit(self): f = storage.SampleFilter(user='user-id') results = list(self.conn.get_samples(f, limit=1)) self.assertEqual(1, len(results)) def test_get_samples_by_user_limit_bigger(self): f = storage.SampleFilter(user='user-id') results = list(self.conn.get_samples(f, limit=42)) self.assertEqual(3, len(results)) def test_get_samples_by_project(self): f = storage.SampleFilter(project='project-id') results = list(self.conn.get_samples(f)) self.assertIsNotNone(results) for meter in results: d = meter.as_dict() self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) del d['recorded_at'] self.assertIn(d, self.msgs[:4]) def test_get_samples_by_resource(self): f = storage.SampleFilter(user='user-id', resource='resource-id') results = list(self.conn.get_samples(f)) self.assertEqual(2, len(results)) d = results[1].as_dict() self.assertEqual(timeutils.utcnow(), d['recorded_at']) del d['recorded_at'] self.assertEqual(self.msgs[0], d) def test_get_samples_by_metaquery(self): q = {'metadata.display_name': 'test-server'} f = storage.SampleFilter(metaquery=q) results = list(self.conn.get_samples(f)) self.assertIsNotNone(results) for meter in results: d = meter.as_dict() self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) del d['recorded_at'] self.assertIn(d, self.msgs) def test_get_samples_by_metaquery_key_with_dot_in_metadata(self): q = {'metadata.display.name.name.$1': 'test-server1', 'metadata.display.name.$name_2': 'test-server2'} f = storage.SampleFilter(metaquery=q) results = list(self.conn.get_samples(f)) self.assertIsNotNone(results) self.assertEqual(1, len(results)) def test_get_samples_by_start_time(self): timestamp = datetime.datetime(2012, 7, 2, 10, 41) f = storage.SampleFilter( user='user-id', start_timestamp=timestamp, ) results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) self.assertEqual(timestamp, results[0].timestamp) f.start_timestamp_op = 'ge' results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) self.assertEqual(timestamp, results[0].timestamp) f.start_timestamp_op = 'gt' results = list(self.conn.get_samples(f)) self.assertEqual(0, len(results)) def test_get_samples_by_end_time(self): timestamp = datetime.datetime(2012, 7, 2, 10, 40) f = storage.SampleFilter( user='user-id', end_timestamp=timestamp, ) results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) f.end_timestamp_op = 'lt' results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) f.end_timestamp_op = 'le' results = list(self.conn.get_samples(f)) self.assertEqual(2, len(results)) self.assertEqual(datetime.datetime(2012, 7, 2, 10, 39), results[1].timestamp) def test_get_samples_by_both_times(self): start_ts = datetime.datetime(2012, 7, 2, 10, 42) end_ts = datetime.datetime(2012, 7, 2, 10, 43) f = storage.SampleFilter( start_timestamp=start_ts, end_timestamp=end_ts, ) results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) self.assertEqual(start_ts, results[0].timestamp) f.start_timestamp_op = 'gt' f.end_timestamp_op = 'lt' results = list(self.conn.get_samples(f)) self.assertEqual(0, len(results)) f.start_timestamp_op = 'ge' f.end_timestamp_op = 'lt' results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) self.assertEqual(start_ts, results[0].timestamp) f.start_timestamp_op = 'gt' f.end_timestamp_op = 'le' results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) self.assertEqual(end_ts, results[0].timestamp) f.start_timestamp_op = 'ge' f.end_timestamp_op = 'le' results = list(self.conn.get_samples(f)) self.assertEqual(2, len(results)) self.assertEqual(end_ts, results[0].timestamp) self.assertEqual(start_ts, results[1].timestamp) def test_get_samples_by_name(self): f = storage.SampleFilter(user='user-id', meter='no-such-meter') results = list(self.conn.get_samples(f)) self.assertIsEmpty(results) def test_get_samples_by_name2(self): f = storage.SampleFilter(user='user-id', meter='instance') results = list(self.conn.get_samples(f)) self.assertIsNotEmpty(results) def test_get_samples_by_source(self): f = storage.SampleFilter(source='test-1') results = list(self.conn.get_samples(f)) self.assertEqual(2, len(results)) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') def test_clear_metering_data(self): # NOTE(jd) Override this test in MongoDB because our code doesn't clear # the collections, this is handled by MongoDB TTL feature. self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) self.conn.clear_expired_metering_data(3 * 60) f = storage.SampleFilter(meter='instance') results = list(self.conn.get_samples(f)) self.assertEqual(5, len(results)) results = list(self.conn.get_resources()) self.assertEqual(5, len(results)) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2') def test_clear_metering_data_no_data_to_remove(self): # NOTE(jd) Override this test in MongoDB because our code doesn't clear # the collections, this is handled by MongoDB TTL feature. self.mock_utcnow.return_value = datetime.datetime(2010, 7, 2, 10, 45) self.conn.clear_expired_metering_data(3 * 60) f = storage.SampleFilter(meter='instance') results = list(self.conn.get_samples(f)) self.assertEqual(12, len(results)) results = list(self.conn.get_resources()) self.assertEqual(10, len(results)) @tests_db.run_with('sqlite', 'mysql', 'pgsql') def test_clear_metering_data_expire_samples_only(self): cfg.CONF.set_override('sql_expire_samples_only', True) self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) self.conn.clear_expired_metering_data(4 * 60) f = storage.SampleFilter(meter='instance') results = list(self.conn.get_samples(f)) self.assertEqual(7, len(results)) results = list(self.conn.get_resources()) self.assertEqual(6, len(results)) @tests_db.run_with('sqlite', 'mysql', 'pgsql') def test_record_metering_data_retry_success_on_deadlock(self): raise_deadlock = [False, True] self.CONF.set_override('max_retries', 2, group='database') s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='user_id', project_id='project_id', resource_id='resource_id', timestamp=datetime.datetime.utcnow(), resource_metadata={'display_name': 'test-server', 'tag': 'self.counter'}, source=None) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret ) mock_resource_create = mock.patch.object(self.conn, "_create_resource") mock_resource_create.side_effect = self.create_side_effect( self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) with mock.patch.object(api.time, 'sleep') as retry_sleep: self.conn.record_metering_data(msg) self.assertEqual(1, retry_sleep.call_count) f = storage.SampleFilter(meter='instance') results = list(self.conn.get_samples(f)) self.assertEqual(13, len(results)) @tests_db.run_with('sqlite', 'mysql', 'pgsql') def test_record_metering_data_retry_failure_on_deadlock(self): raise_deadlock = [True, True, True] self.CONF.set_override('max_retries', 3, group='database') s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='user_id', project_id='project_id', resource_id='resource_id', timestamp=datetime.datetime.utcnow(), resource_metadata={'display_name': 'test-server', 'tag': 'self.counter'}, source=None) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret ) mock_resource_create = mock.patch.object(self.conn, "_create_resource") mock_resource_create.side_effect = self.create_side_effect( self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) with mock.patch.object(api.time, 'sleep') as retry_sleep: try: self.conn.record_metering_data(msg) except dbexc.DBError as err: self.assertIn('DBDeadlock', str(type(err))) self.assertEqual(3, retry_sleep.call_count) class ComplexSampleQueryTest(DBTestBase): def setUp(self): super(ComplexSampleQueryTest, self).setUp() self.complex_filter = { "and": [{"or": [{"=": {"resource_id": "resource-id-42"}}, {"=": {"resource_id": "resource-id-44"}}]}, {"and": [{"=": {"counter_name": "cpu_util"}}, {"and": [{">": {"counter_volume": 0.4}}, {"not": {">": {"counter_volume": 0.8}}}]}]}]} or_expression = [{"=": {"resource_id": "resource-id-42"}}, {"=": {"resource_id": "resource-id-43"}}, {"=": {"resource_id": "resource-id-44"}}] and_expression = [{">": {"counter_volume": 0.4}}, {"not": {">": {"counter_volume": 0.8}}}] self.complex_filter_list = {"and": [{"or": or_expression}, {"and": [{"=": {"counter_name": "cpu_util"}}, {"and": and_expression}]}]} in_expression = {"in": {"resource_id": ["resource-id-42", "resource-id-43", "resource-id-44"]}} self.complex_filter_in = {"and": [in_expression, {"and": [{"=": {"counter_name": "cpu_util"}}, {"and": and_expression}]}]} def _create_samples(self): for resource in range(42, 45): for volume in [0.79, 0.41, 0.4, 0.8, 0.39, 0.81]: metadata = {'a_string_key': "meta-value" + str(volume), 'a_float_key': volume, 'an_int_key': resource, 'a_bool_key': (resource == 43)} self.create_and_store_sample(resource_id="resource-id-%s" % resource, metadata=metadata, name="cpu_util", volume=volume) def test_no_filter(self): results = list(self.conn.query_samples()) self.assertEqual(len(self.msgs), len(results)) for sample_item in results: d = sample_item.as_dict() del d['recorded_at'] self.assertIn(d, self.msgs) def test_query_complex_filter_with_regexp(self): self._create_samples() complex_regex_filter = {"and": [ {"=~": {"resource_id": "resource-id.*"}}, {"=": {"counter_volume": 0.4}}]} results = list( self.conn.query_samples(filter_expr=complex_regex_filter)) self.assertEqual(3, len(results)) for sample_item in results: self.assertIn(sample_item.resource_id, set(["resource-id-42", "resource-id-43", "resource-id-44"])) def test_query_complex_filter_with_regexp_metadata(self): self._create_samples() complex_regex_filter = {"and": [ {"=~": {"resource_metadata.a_string_key": "meta-value.*"}}, {"=": {"counter_volume": 0.4}}]} results = list( self.conn.query_samples(filter_expr=complex_regex_filter)) self.assertEqual(3, len(results)) for sample_item in results: self.assertEqual("meta-value0.4", sample_item.resource_metadata['a_string_key']) def test_no_filter_with_zero_limit(self): limit = 0 results = list(self.conn.query_samples(limit=limit)) self.assertEqual(limit, len(results)) def test_no_filter_with_limit(self): limit = 3 results = list(self.conn.query_samples(limit=limit)) self.assertEqual(limit, len(results)) def test_query_simple_filter(self): simple_filter = {"=": {"resource_id": "resource-id-8"}} results = list(self.conn.query_samples(filter_expr=simple_filter)) self.assertEqual(1, len(results)) for sample_item in results: self.assertEqual("resource-id-8", sample_item.resource_id) def test_query_simple_filter_with_not_equal_relation(self): simple_filter = {"!=": {"resource_id": "resource-id-8"}} results = list(self.conn.query_samples(filter_expr=simple_filter)) self.assertEqual(len(self.msgs) - 1, len(results)) for sample_item in results: self.assertNotEqual("resource-id-8", sample_item.resource_id) def test_query_complex_filter(self): self._create_samples() results = list(self.conn.query_samples(filter_expr=( self.complex_filter))) self.assertEqual(6, len(results)) for sample_item in results: self.assertIn(sample_item.resource_id, set(["resource-id-42", "resource-id-44"])) self.assertEqual("cpu_util", sample_item.counter_name) self.assertTrue(sample_item.counter_volume > 0.4) self.assertTrue(sample_item.counter_volume <= 0.8) def test_query_complex_filter_with_limit(self): self._create_samples() limit = 3 results = list(self.conn.query_samples(filter_expr=self.complex_filter, limit=limit)) self.assertEqual(limit, len(results)) def test_query_complex_filter_with_simple_orderby(self): self._create_samples() expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] orderby = [{"counter_volume": "asc"}] results = list(self.conn.query_samples(filter_expr=self.complex_filter, orderby=orderby)) self.assertEqual(expected_volume_order, [s.counter_volume for s in results]) def test_query_complex_filter_with_complex_orderby(self): self._create_samples() expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] expected_resource_id_order = ["resource-id-44", "resource-id-42", "resource-id-44", "resource-id-42", "resource-id-44", "resource-id-42"] orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] results = list(self.conn.query_samples(filter_expr=self.complex_filter, orderby=orderby)) self.assertEqual(expected_volume_order, [s.counter_volume for s in results]) self.assertEqual(expected_resource_id_order, [s.resource_id for s in results]) def test_query_complex_filter_with_list(self): self._create_samples() results = list( self.conn.query_samples(filter_expr=self.complex_filter_list)) self.assertEqual(9, len(results)) for sample_item in results: self.assertIn(sample_item.resource_id, set(["resource-id-42", "resource-id-43", "resource-id-44"])) self.assertEqual("cpu_util", sample_item.counter_name) self.assertTrue(sample_item.counter_volume > 0.4) self.assertTrue(sample_item.counter_volume <= 0.8) def test_query_complex_filter_with_list_with_limit(self): self._create_samples() limit = 3 results = list( self.conn.query_samples(filter_expr=self.complex_filter_list, limit=limit)) self.assertEqual(limit, len(results)) def test_query_complex_filter_with_list_with_simple_orderby(self): self._create_samples() expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, 0.79, 0.8, 0.8, 0.8] orderby = [{"counter_volume": "asc"}] results = list( self.conn.query_samples(filter_expr=self.complex_filter_list, orderby=orderby)) self.assertEqual(expected_volume_order, [s.counter_volume for s in results]) def test_query_complex_filterwith_list_with_complex_orderby(self): self._create_samples() expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, 0.79, 0.8, 0.8, 0.8] expected_resource_id_order = ["resource-id-44", "resource-id-43", "resource-id-42", "resource-id-44", "resource-id-43", "resource-id-42", "resource-id-44", "resource-id-43", "resource-id-42"] orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] results = list( self.conn.query_samples(filter_expr=self.complex_filter_list, orderby=orderby)) self.assertEqual(expected_volume_order, [s.counter_volume for s in results]) self.assertEqual(expected_resource_id_order, [s.resource_id for s in results]) def test_query_complex_filter_with_wrong_order_in_orderby(self): self._create_samples() orderby = [{"counter_volume": "not valid order"}, {"resource_id": "desc"}] query = lambda: list(self.conn.query_samples(filter_expr=( self.complex_filter), orderby=orderby)) self.assertRaises(KeyError, query) def test_query_complex_filter_with_in(self): self._create_samples() results = list( self.conn.query_samples(filter_expr=self.complex_filter_in)) self.assertEqual(9, len(results)) for sample_item in results: self.assertIn(sample_item.resource_id, set(["resource-id-42", "resource-id-43", "resource-id-44"])) self.assertEqual("cpu_util", sample_item.counter_name) self.assertTrue(sample_item.counter_volume > 0.4) self.assertTrue(sample_item.counter_volume <= 0.8) def test_query_simple_metadata_filter(self): self._create_samples() filter_expr = {"=": {"resource_metadata.a_bool_key": True}} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(6, len(results)) for sample_item in results: self.assertTrue(sample_item.resource_metadata["a_bool_key"]) def test_query_simple_metadata_with_in_op(self): self._create_samples() filter_expr = {"in": {"resource_metadata.an_int_key": [42, 43]}} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(12, len(results)) for sample_item in results: self.assertIn(sample_item.resource_metadata["an_int_key"], [42, 43]) def test_query_complex_metadata_filter(self): self._create_samples() subfilter = {"or": [{"=": {"resource_metadata.a_string_key": "meta-value0.81"}}, {"<=": {"resource_metadata.a_float_key": 0.41}}]} filter_expr = {"and": [{">": {"resource_metadata.an_int_key": 42}}, subfilter]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(8, len(results)) for sample_item in results: self.assertTrue((sample_item.resource_metadata["a_string_key"] == "meta-value0.81" or sample_item.resource_metadata["a_float_key"] <= 0.41)) self.assertTrue(sample_item.resource_metadata["an_int_key"] > 42) def test_query_mixed_data_and_metadata_filter(self): self._create_samples() subfilter = {"or": [{"=": {"resource_metadata.a_string_key": "meta-value0.81"}}, {"<=": {"resource_metadata.a_float_key": 0.41}}]} filter_expr = {"and": [{"=": {"resource_id": "resource-id-42"}}, subfilter]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(4, len(results)) for sample_item in results: self.assertTrue((sample_item.resource_metadata["a_string_key"] == "meta-value0.81" or sample_item.resource_metadata["a_float_key"] <= 0.41)) self.assertEqual("resource-id-42", sample_item.resource_id) def test_query_non_existing_metadata_with_result(self): self._create_samples() filter_expr = { "or": [{"=": {"resource_metadata.a_string_key": "meta-value0.81"}}, {"<=": {"resource_metadata.key_not_exists": 0.41}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(3, len(results)) for sample_item in results: self.assertEqual("meta-value0.81", sample_item.resource_metadata["a_string_key"]) def test_query_non_existing_metadata_without_result(self): self._create_samples() filter_expr = { "or": [{"=": {"resource_metadata.key_not_exists": "meta-value0.81"}}, {"<=": {"resource_metadata.key_not_exists": 0.41}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(0, len(results)) def test_query_negated_metadata(self): self._create_samples() filter_expr = { "and": [{"=": {"resource_id": "resource-id-42"}}, {"not": {"or": [{">": {"resource_metadata.an_int_key": 43}}, {"<=": {"resource_metadata.a_float_key": 0.41}}]}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(3, len(results)) for sample_item in results: self.assertEqual("resource-id-42", sample_item.resource_id) self.assertTrue(sample_item.resource_metadata["an_int_key"] <= 43) self.assertTrue(sample_item.resource_metadata["a_float_key"] > 0.41) def test_query_negated_complex_expression(self): self._create_samples() filter_expr = { "and": [{"=": {"counter_name": "cpu_util"}}, {"not": {"or": [{"or": [{"=": {"resource_id": "resource-id-42"}}, {"=": {"resource_id": "resource-id-44"}}]}, {"and": [{">": {"counter_volume": 0.4}}, {"<": {"counter_volume": 0.8}}]}]}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(4, len(results)) for sample_item in results: self.assertEqual("resource-id-43", sample_item.resource_id) self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) self.assertEqual("cpu_util", sample_item.counter_name) def test_query_with_double_negation(self): self._create_samples() filter_expr = { "and": [{"=": {"counter_name": "cpu_util"}}, {"not": {"or": [{"or": [{"=": {"resource_id": "resource-id-42"}}, {"=": {"resource_id": "resource-id-44"}}]}, {"and": [{"not": {"<=": {"counter_volume": 0.4}}}, {"<": {"counter_volume": 0.8}}]}]}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(4, len(results)) for sample_item in results: self.assertEqual("resource-id-43", sample_item.resource_id) self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) self.assertEqual("cpu_util", sample_item.counter_name) def test_query_negate_not_equal(self): self._create_samples() filter_expr = {"not": {"!=": {"resource_id": "resource-id-43"}}} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(6, len(results)) for sample_item in results: self.assertEqual("resource-id-43", sample_item.resource_id) def test_query_negated_in_op(self): self._create_samples() filter_expr = { "and": [{"not": {"in": {"counter_volume": [0.39, 0.4, 0.79]}}}, {"=": {"resource_id": "resource-id-42"}}]} results = list(self.conn.query_samples(filter_expr=filter_expr)) self.assertEqual(3, len(results)) for sample_item in results: self.assertIn(sample_item.counter_volume, [0.41, 0.8, 0.81]) class StatisticsTest(DBTestBase): def prepare_data(self): for i in range(3): c = sample.Sample( 'volume.size', 'gauge', 'GiB', 5 + i, 'user-id', 'project1', 'resource-id', timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), resource_metadata={'display_name': 'test-volume', 'tag': 'self.counter', }, source='test', ) msg = utils.meter_message_from_counter( c, secret='not-so-secret', ) self.conn.record_metering_data(msg) for i in range(3): c = sample.Sample( 'volume.size', 'gauge', 'GiB', 8 + i, 'user-5', 'project2', 'resource-6', timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), resource_metadata={'display_name': 'test-volume', 'tag': 'self.counter', }, source='test', ) msg = utils.meter_message_from_counter( c, secret='not-so-secret', ) self.conn.record_metering_data(msg) for i in range(3): c = sample.Sample( 'memory', 'gauge', 'MB', 8 + i, 'user-5', 'project2', 'resource-6', timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), resource_metadata={}, source='test', ) msg = utils.meter_message_from_counter( c, secret='not-so-secret', ) self.conn.record_metering_data(msg) def test_by_meter(self): f = storage.SampleFilter( meter='memory' ) results = list(self.conn.get_meter_statistics(f))[0] self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - datetime.datetime(2012, 9, 25, 10, 30)).seconds, results.duration) self.assertEqual(3, results.count) self.assertEqual('MB', results.unit) self.assertEqual(8, results.min) self.assertEqual(10, results.max) self.assertEqual(27, results.sum) self.assertEqual(9, results.avg) self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), results.period_start) self.assertEqual(datetime.datetime(2012, 9, 25, 12, 32), results.period_end) def test_by_user(self): f = storage.SampleFilter( user='user-5', meter='volume.size', ) results = list(self.conn.get_meter_statistics(f))[0] self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - datetime.datetime(2012, 9, 25, 10, 30)).seconds, results.duration) self.assertEqual(3, results.count) self.assertEqual('GiB', results.unit) self.assertEqual(8, results.min) self.assertEqual(10, results.max) self.assertEqual(27, results.sum) self.assertEqual(9, results.avg) def test_no_period_in_query(self): f = storage.SampleFilter( user='user-5', meter='volume.size', ) results = list(self.conn.get_meter_statistics(f))[0] self.assertEqual(0, results.period) def test_period_is_int(self): f = storage.SampleFilter( meter='volume.size', ) results = list(self.conn.get_meter_statistics(f))[0] self.assertIs(int, type(results.period)) self.assertEqual(6, results.count) def test_by_user_period(self): f = storage.SampleFilter( user='user-5', meter='volume.size', start_timestamp='2012-09-25T10:28:00', ) results = list(self.conn.get_meter_statistics(f, period=7200)) self.assertEqual(2, len(results)) self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), datetime.datetime(2012, 9, 25, 12, 28)]), set(r.period_start for r in results)) self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), datetime.datetime(2012, 9, 25, 14, 28)]), set(r.period_end for r in results)) r = results[0] self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), r.period_start) self.assertEqual(2, r.count) self.assertEqual('GiB', r.unit) self.assertEqual(8.5, r.avg) self.assertEqual(8, r.min) self.assertEqual(9, r.max) self.assertEqual(17, r.sum) self.assertEqual(7200, r.period) self.assertIsInstance(r.period, int) expected_end = r.period_start + datetime.timedelta(seconds=7200) self.assertEqual(expected_end, r.period_end) self.assertEqual(3660, r.duration) self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), r.duration_start) self.assertEqual(datetime.datetime(2012, 9, 25, 11, 31), r.duration_end) def test_by_user_period_with_timezone(self): dates = [ '2012-09-25T00:28:00-10:00', '2012-09-25T01:28:00-09:00', '2012-09-25T02:28:00-08:00', '2012-09-25T03:28:00-07:00', '2012-09-25T04:28:00-06:00', '2012-09-25T05:28:00-05:00', '2012-09-25T06:28:00-04:00', '2012-09-25T07:28:00-03:00', '2012-09-25T08:28:00-02:00', '2012-09-25T09:28:00-01:00', '2012-09-25T10:28:00Z', '2012-09-25T11:28:00+01:00', '2012-09-25T12:28:00+02:00', '2012-09-25T13:28:00+03:00', '2012-09-25T14:28:00+04:00', '2012-09-25T15:28:00+05:00', '2012-09-25T16:28:00+06:00', '2012-09-25T17:28:00+07:00', '2012-09-25T18:28:00+08:00', '2012-09-25T19:28:00+09:00', '2012-09-25T20:28:00+10:00', '2012-09-25T21:28:00+11:00', '2012-09-25T22:28:00+12:00', ] for date in dates: f = storage.SampleFilter( user='user-5', meter='volume.size', start_timestamp=date ) results = list(self.conn.get_meter_statistics(f, period=7200)) self.assertEqual(2, len(results)) self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), datetime.datetime(2012, 9, 25, 12, 28)]), set(r.period_start for r in results)) self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), datetime.datetime(2012, 9, 25, 14, 28)]), set(r.period_end for r in results)) def test_by_user_period_start_end(self): f = storage.SampleFilter( user='user-5', meter='volume.size', start_timestamp='2012-09-25T10:28:00', end_timestamp='2012-09-25T11:28:00', ) results = list(self.conn.get_meter_statistics(f, period=1800)) self.assertEqual(1, len(results)) r = results[0] self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), r.period_start) self.assertEqual(1, r.count) self.assertEqual('GiB', r.unit) self.assertEqual(8, r.avg) self.assertEqual(8, r.min) self.assertEqual(8, r.max) self.assertEqual(8, r.sum) self.assertEqual(1800, r.period) self.assertEqual(r.period_start + datetime.timedelta(seconds=1800), r.period_end) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), r.duration_start) self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), r.duration_end) def test_by_project(self): f = storage.SampleFilter( meter='volume.size', resource='resource-id', start_timestamp='2012-09-25T11:30:00', end_timestamp='2012-09-25T11:32:00', ) results = list(self.conn.get_meter_statistics(f))[0] self.assertEqual(0, results.duration) self.assertEqual(1, results.count) self.assertEqual('GiB', results.unit) self.assertEqual(6, results.min) self.assertEqual(6, results.max) self.assertEqual(6, results.sum) self.assertEqual(6, results.avg) def test_one_resource(self): f = storage.SampleFilter( user='user-id', meter='volume.size', ) results = list(self.conn.get_meter_statistics(f))[0] self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - datetime.datetime(2012, 9, 25, 10, 30)).seconds, results.duration) self.assertEqual(3, results.count) self.assertEqual('GiB', results.unit) self.assertEqual(5, results.min) self.assertEqual(7, results.max) self.assertEqual(18, results.sum) self.assertEqual(6, results.avg) def test_with_no_sample(self): f = storage.SampleFilter( user='user-not-exists', meter='volume.size', ) results = list(self.conn.get_meter_statistics(f, period=1800)) self.assertEqual([], results) class StatisticsGroupByTest(DBTestBase): def prepare_data(self): test_sample_data = ( {'volume': 2, 'user': 'user-1', 'project': 'project-1', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', 'source': 'source-2', 'metadata_instance_type': '84'}, {'volume': 2, 'user': 'user-1', 'project': 'project-2', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', 'source': 'source-2', 'metadata_instance_type': '83'}, {'volume': 1, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', 'source': 'source-1', 'metadata_instance_type': '82'}, {'volume': 1, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1', 'metadata_instance_type': '82'}, {'volume': 2, 'user': 'user-2', 'project': 'project-1', 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1', 'metadata_instance_type': '84'}, {'volume': 4, 'user': 'user-2', 'project': 'project-2', 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', 'source': 'source-1', 'metadata_instance_type': '82'}, {'volume': 4, 'user': 'user-3', 'project': 'project-1', 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', 'source': 'source-3', 'metadata_instance_type': '83'}, ) for test_sample in test_sample_data: c = sample.Sample( 'instance', sample.TYPE_CUMULATIVE, unit='s', volume=test_sample['volume'], user_id=test_sample['user'], project_id=test_sample['project'], resource_id=test_sample['resource'], timestamp=datetime.datetime(*test_sample['timestamp']), resource_metadata={'flavor': test_sample['metadata_flavor'], 'event': test_sample['metadata_event'], 'instance_type': test_sample['metadata_instance_type']}, source=test_sample['source'], ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_group_by_user(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, groupby=['user_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['user_id']), groupby_keys_set) self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set) for r in results: if r.groupby == {'user_id': 'user-1'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-2'}: self.assertEqual(4, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(8, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) def test_group_by_resource(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, groupby=['resource_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_id']), groupby_keys_set) self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), groupby_vals_set) for r in results: if r.groupby == {'resource_id': 'resource-1'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'resource_id': 'resource-2'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'resource_id': 'resource-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) def test_group_by_project(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual(2, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) for r in results: if r.groupby == {'project_id': 'project-1'}: self.assertEqual(5, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(10, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'project_id': 'project-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(3, r.avg) def test_group_by_source(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, groupby=['source'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['source']), groupby_keys_set) self.assertEqual(set(['source-1', 'source-2', 'source-3']), groupby_vals_set) for r in results: if r.groupby == {'source': 'source-1'}: self.assertEqual(4, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(8, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'source': 'source-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'source': 'source-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) def test_group_by_unknown_field(self): f = storage.SampleFilter( meter='instance', ) # NOTE(terriyu): The MongoDB get_meter_statistics() returns a list # whereas the SQLAlchemy get_meter_statistics() returns a generator. # You have to apply list() to the SQLAlchemy generator to get it to # throw an error. The MongoDB get_meter_statistics() will throw an # error before list() is called. By using lambda, we can cover both # MongoDB and SQLAlchemy in a single test. self.assertRaises( ceilometer.NotImplementedError, lambda: list(self.conn.get_meter_statistics(f, groupby=['wtf'])) ) def test_group_by_metadata(self): # This test checks grouping by a single metadata field # (now only resource_metadata.instance_type is available). f = storage.SampleFilter( meter='instance', ) results = list( self.conn.get_meter_statistics( f, groupby=['resource_metadata.instance_type'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_metadata.instance_type']), groupby_keys_set) self.assertEqual(set(['82', '83', '84']), groupby_vals_set) for r in results: if r.groupby == {'resource_metadata.instance_type': '82'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'resource_metadata.instance_type': '83'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(3, r.avg) elif r.groupby == {'resource_metadata.instance_type': '84'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) def test_group_by_multiple_regular(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, groupby=['user_id', 'resource_id'])) self.assertEqual(4, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set) self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1', 'resource-2', 'resource-3']), groupby_vals_set) for r in results: if r.groupby == {'user_id': 'user-1', 'resource_id': 'resource-1'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-2', 'resource_id': 'resource-1'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-2', 'resource_id': 'resource-2'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-3', 'resource_id': 'resource-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) else: self.assertNotEqual({'user_id': 'user-1', 'resource_id': 'resource-2'}, r.groupby) self.assertNotEqual({'user_id': 'user-1', 'resource_id': 'resource-3'}, r.groupby) self.assertNotEqual({'user_id': 'user-2', 'resource_id': 'resource-3'}, r.groupby) self.assertNotEqual({'user_id': 'user-3', 'resource_id': 'resource-1'}, r.groupby) self.assertNotEqual({'user_id': 'user-3', 'resource_id': 'resource-2'}, r.groupby, ) def test_group_by_multiple_metadata(self): # TODO(terriyu): test_group_by_multiple_metadata needs to be # implemented. # This test should check grouping by multiple metadata fields. pass def test_group_by_multiple_regular_metadata(self): # This test checks grouping by a combination of regular and # metadata fields. f = storage.SampleFilter( meter='instance', ) results = list( self.conn.get_meter_statistics( f, groupby=['user_id', 'resource_metadata.instance_type'])) self.assertEqual(5, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['user_id', 'resource_metadata.instance_type']), groupby_keys_set) self.assertEqual(set(['user-1', 'user-2', 'user-3', '82', '83', '84']), groupby_vals_set) for r in results: if r.groupby == {'user_id': 'user-1', 'resource_metadata.instance_type': '83'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-1', 'resource_metadata.instance_type': '84'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-2', 'resource_metadata.instance_type': '82'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-2', 'resource_metadata.instance_type': '84'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'user_id': 'user-3', 'resource_metadata.instance_type': '83'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) else: self.assertNotEqual({'user_id': 'user-1', 'resource_metadata.instance_type': '82'}, r.groupby) self.assertNotEqual({'user_id': 'user-2', 'resource_metadata.instance_type': '83'}, r.groupby) self.assertNotEqual({'user_id': 'user-3', 'resource_metadata.instance_type': '82'}, r.groupby) self.assertNotEqual({'user_id': 'user-3', 'resource_metadata.instance_type': '84'}, r.groupby) def test_group_by_with_query_filter(self): f = storage.SampleFilter( meter='instance', project='project-1', ) results = list(self.conn.get_meter_statistics( f, groupby=['resource_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_id']), groupby_keys_set) self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), groupby_vals_set) for r in results: if r.groupby == {'resource_id': 'resource-1'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'resource_id': 'resource-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) elif r.groupby == {'resource_id': 'resource-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) def test_group_by_metadata_with_query_filter(self): # This test checks grouping by a metadata field in combination # with a query filter. f = storage.SampleFilter( meter='instance', project='project-1', ) results = list(self.conn.get_meter_statistics( f, groupby=['resource_metadata.instance_type'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_metadata.instance_type']), groupby_keys_set) self.assertEqual(set(['82', '83', '84']), groupby_vals_set) for r in results: if r.groupby == {'resource_metadata.instance_type': '82'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) elif r.groupby == {'resource_metadata.instance_type': '83'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) elif r.groupby == {'resource_metadata.instance_type': '84'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) def test_group_by_with_query_filter_multiple(self): f = storage.SampleFilter( meter='instance', user='user-2', source='source-1', ) results = list(self.conn.get_meter_statistics( f, groupby=['project_id', 'resource_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2', 'resource-1', 'resource-2']), groupby_vals_set) for r in results: if r.groupby == {'project_id': 'project-1', 'resource_id': 'resource-1'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'project_id': 'project-1', 'resource_id': 'resource-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) elif r.groupby == {'project_id': 'project-2', 'resource_id': 'resource-2'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) else: self.assertNotEqual({'project_id': 'project-2', 'resource_id': 'resource-1'}, r.groupby) def test_group_by_metadata_with_query_filter_multiple(self): # TODO(terriyu): test_group_by_metadata_with_query_filter_multiple # needs to be implemented. # This test should check grouping by multiple metadata fields in # combination with a query filter. pass def test_group_by_with_period(self): f = storage.SampleFilter( meter='instance', ) results = list(self.conn.get_meter_statistics(f, period=7200, groupby=['project_id'])) self.assertEqual(4, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), datetime.datetime(2013, 8, 1, 14, 11), datetime.datetime(2013, 8, 1, 16, 11)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) self.assertEqual(4260, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) self.assertEqual(4260, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) elif (r.groupby == {'project_id': 'project-2'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) elif (r.groupby == {'project_id': 'project-2'} and r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), r.period_end) else: self.assertNotEqual([{'project_id': 'project-1'}, datetime.datetime(2013, 8, 1, 16, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 10, 11)], [r.groupby, r.period_start]) def test_group_by_metadata_with_period(self): # This test checks grouping by metadata fields in combination # with period grouping. f = storage.SampleFilter( meter='instance') results = list(self.conn.get_meter_statistics(f, period=7200, groupby=['resource_metadata.instance_type'])) self.assertEqual(5, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_metadata.instance_type']), groupby_keys_set) self.assertEqual(set(['82', '83', '84']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), datetime.datetime(2013, 8, 1, 14, 11), datetime.datetime(2013, 8, 1, 16, 11)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'resource_metadata.instance_type': '82'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) self.assertEqual(1740, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '82'} and r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '83'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '83'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '84'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) self.assertEqual(4260, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) else: self.assertNotEqual([{'resource_metadata.instance_type': '82'}, datetime.datetime(2013, 8, 1, 14, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'resource_metadata.instance_type': '83'}, datetime.datetime(2013, 8, 1, 16, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'resource_metadata.instance_type': '84'}, datetime.datetime(2013, 8, 1, 10, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'resource_metadata.instance_type': '84'}, datetime.datetime(2013, 8, 1, 16, 11)], [r.groupby, r.period_start]) def test_group_by_with_query_filter_and_period(self): f = storage.SampleFilter( meter='instance', source='source-1', ) results = list(self.conn.get_meter_statistics(f, period=7200, groupby=['project_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), datetime.datetime(2013, 8, 1, 14, 11), datetime.datetime(2013, 8, 1, 16, 11)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) self.assertEqual(1740, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) elif (r.groupby == {'project_id': 'project-2'} and r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), r.period_end) else: self.assertNotEqual([{'project_id': 'project-1'}, datetime.datetime(2013, 8, 1, 16, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 10, 11)], [r.groupby, r.period_start]) def test_group_by_metadata_with_query_filter_and_period(self): # This test checks grouping with metadata fields in combination # with a query filter and period grouping. f = storage.SampleFilter( meter='instance', project='project-1', ) results = list( self.conn.get_meter_statistics( f, period=7200, groupby=['resource_metadata.instance_type'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_metadata.instance_type']), groupby_keys_set) self.assertEqual(set(['82', '83', '84']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), datetime.datetime(2013, 8, 1, 14, 11)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'resource_metadata.instance_type': '82'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) self.assertEqual(1740, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '83'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), r.period_end) elif (r.groupby == {'resource_metadata.instance_type': '84'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) self.assertEqual(4260, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), r.period_end) else: self.assertNotEqual([{'resource_metadata.instance_type': '82'}, datetime.datetime(2013, 8, 1, 14, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'resource_metadata.instance_type': '83'}, datetime.datetime(2013, 8, 1, 14, 11)], [r.groupby, r.period_start]) self.assertNotEqual([{'resource_metadata.instance_type': '84'}, datetime.datetime(2013, 8, 1, 10, 11)], [r.groupby, r.period_start]) def test_group_by_start_timestamp_after(self): f = storage.SampleFilter( meter='instance', start_timestamp=datetime.datetime(2013, 8, 1, 17, 28, 1), ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual([], results) def test_group_by_end_timestamp_before(self): f = storage.SampleFilter( meter='instance', end_timestamp=datetime.datetime(2013, 8, 1, 10, 10, 59), ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual([], results) def test_group_by_start_timestamp(self): f = storage.SampleFilter( meter='instance', start_timestamp=datetime.datetime(2013, 8, 1, 14, 58), ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual(2, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) for r in results: if r.groupby == {'project_id': 'project-1'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'project_id': 'project-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(3, r.avg) def test_group_by_end_timestamp(self): f = storage.SampleFilter( meter='instance', end_timestamp=datetime.datetime(2013, 8, 1, 11, 45), ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual(1, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1']), groupby_vals_set) for r in results: if r.groupby == {'project_id': 'project-1'}: self.assertEqual(3, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(2, r.avg) def test_group_by_start_end_timestamp(self): f = storage.SampleFilter( meter='instance', start_timestamp=datetime.datetime(2013, 8, 1, 8, 17, 3), end_timestamp=datetime.datetime(2013, 8, 1, 23, 59, 59), ) results = list(self.conn.get_meter_statistics(f, groupby=['project_id'])) self.assertEqual(2, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) for r in results: if r.groupby == {'project_id': 'project-1'}: self.assertEqual(5, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(4, r.max) self.assertEqual(10, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'project_id': 'project-2'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(4, r.max) self.assertEqual(6, r.sum) self.assertEqual(3, r.avg) def test_group_by_start_end_timestamp_with_query_filter(self): f = storage.SampleFilter( meter='instance', project='project-1', start_timestamp=datetime.datetime(2013, 8, 1, 11, 1), end_timestamp=datetime.datetime(2013, 8, 1, 20, 0), ) results = list(self.conn.get_meter_statistics(f, groupby=['resource_id'])) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['resource_id']), groupby_keys_set) self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set) for r in results: if r.groupby == {'resource_id': 'resource-1'}: self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(4, r.sum) self.assertEqual(2, r.avg) elif r.groupby == {'resource_id': 'resource-3'}: self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) def test_group_by_start_end_timestamp_with_period(self): f = storage.SampleFilter( meter='instance', start_timestamp=datetime.datetime(2013, 8, 1, 14, 0), end_timestamp=datetime.datetime(2013, 8, 1, 17, 0), ) results = list(self.conn.get_meter_statistics(f, period=3600, groupby=['project_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 14, 0), datetime.datetime(2013, 8, 1, 15, 0), datetime.datetime(2013, 8, 1, 16, 0)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_end) self.assertEqual(3600, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 0), r.period_end) elif (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), r.duration_end) self.assertEqual(3600, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 0), r.period_end) elif (r.groupby == {'project_id': 'project-2'} and r.period_start == datetime.datetime(2013, 8, 1, 15, 0)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), r.duration_end) self.assertEqual(3600, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), r.period_end) else: self.assertNotEqual([{'project_id': 'project-1'}, datetime.datetime(2013, 8, 1, 15, 0)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 14, 0)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 16, 0)], [r.groupby, r.period_start]) def test_group_by_start_end_timestamp_with_query_filter_and_period(self): f = storage.SampleFilter( meter='instance', source='source-1', start_timestamp=datetime.datetime(2013, 8, 1, 10, 0), end_timestamp=datetime.datetime(2013, 8, 1, 18, 0), ) results = list(self.conn.get_meter_statistics(f, period=7200, groupby=['project_id'])) self.assertEqual(3, len(results)) groupby_list = [r.groupby for r in results] groupby_keys_set = set(x for sub_dict in groupby_list for x in sub_dict.keys()) groupby_vals_set = set(x for sub_dict in groupby_list for x in sub_dict.values()) self.assertEqual(set(['project_id']), groupby_keys_set) self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) period_start_set = set([r.period_start for r in results]) period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 0), datetime.datetime(2013, 8, 1, 14, 0), datetime.datetime(2013, 8, 1, 16, 0)]) self.assertEqual(period_start_valid, period_start_set) for r in results: if (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 10, 0)): self.assertEqual(2, r.count) self.assertEqual('s', r.unit) self.assertEqual(1, r.min) self.assertEqual(1, r.max) self.assertEqual(2, r.sum) self.assertEqual(1, r.avg) self.assertEqual(1740, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 12, 0), r.period_end) elif (r.groupby == {'project_id': 'project-1'} and r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(2, r.min) self.assertEqual(2, r.max) self.assertEqual(2, r.sum) self.assertEqual(2, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), r.period_end) elif (r.groupby == {'project_id': 'project-2'} and r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): self.assertEqual(1, r.count) self.assertEqual('s', r.unit) self.assertEqual(4, r.min) self.assertEqual(4, r.max) self.assertEqual(4, r.sum) self.assertEqual(4, r.avg) self.assertEqual(0, r.duration) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_start) self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), r.duration_end) self.assertEqual(7200, r.period) self.assertEqual(datetime.datetime(2013, 8, 1, 18, 0), r.period_end) else: self.assertNotEqual([{'project_id': 'project-1'}, datetime.datetime(2013, 8, 1, 16, 0)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 10, 0)], [r.groupby, r.period_start]) self.assertNotEqual([{'project_id': 'project-2'}, datetime.datetime(2013, 8, 1, 14, 0)], [r.groupby, r.period_start]) class CounterDataTypeTest(DBTestBase): def prepare_data(self): c = sample.Sample( 'dummyBigCounter', sample.TYPE_CUMULATIVE, unit='', volume=337203685477580, user_id='user-id', project_id='project-id', resource_id='resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={}, source='test-1', ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) c = sample.Sample( 'dummySmallCounter', sample.TYPE_CUMULATIVE, unit='', volume=-337203685477580, user_id='user-id', project_id='project-id', resource_id='resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={}, source='test-1', ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) c = sample.Sample( 'floatCounter', sample.TYPE_CUMULATIVE, unit='', volume=1938495037.53697, user_id='user-id', project_id='project-id', resource_id='resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={}, source='test-1', ) msg = utils.meter_message_from_counter( c, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_storage_can_handle_large_values(self): f = storage.SampleFilter( meter='dummyBigCounter', ) results = list(self.conn.get_samples(f)) self.assertEqual(337203685477580, results[0].counter_volume) f = storage.SampleFilter( meter='dummySmallCounter', ) results = list(self.conn.get_samples(f)) observed_num = int(results[0].counter_volume) self.assertEqual(-337203685477580, observed_num) def test_storage_can_handle_float_values(self): f = storage.SampleFilter( meter='floatCounter', ) results = list(self.conn.get_samples(f)) self.assertEqual(1938495037.53697, results[0].counter_volume) class EventTestBase(tests_db.TestBase): """Separate test base class. We don't want to inherit all the Meter stuff. """ def setUp(self): super(EventTestBase, self).setUp() self.prepare_data() def prepare_data(self): self.event_models = [] base = 0 self.start = datetime.datetime(2013, 12, 31, 5, 0) now = self.start for event_type in ['Foo', 'Bar', 'Zoo', 'Foo', 'Bar', 'Zoo']: trait_models = [event_models.Trait(name, dtype, value) for name, dtype, value in [ ('trait_A', event_models.Trait.TEXT_TYPE, "my_%s_text" % event_type), ('trait_B', event_models.Trait.INT_TYPE, base + 1), ('trait_C', event_models.Trait.FLOAT_TYPE, float(base) + 0.123456), ('trait_D', event_models.Trait.DATETIME_TYPE, now)]] self.event_models.append( event_models.Event("id_%s_%d" % (event_type, base), event_type, now, trait_models, {'status': {'nested': 'started'}})) base += 100 now = now + datetime.timedelta(hours=1) self.end = now self.event_conn.record_events(self.event_models) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class EventTTLTest(EventTestBase): @mock.patch.object(timeutils, 'utcnow') def test_clear_expired_event_data(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime(2013, 12, 31, 10, 0) self.event_conn.clear_expired_event_data(3600) events = list(self.event_conn.get_events(storage.EventFilter())) self.assertEqual(2, len(events)) event_types = list(self.event_conn.get_event_types()) self.assertEqual(['Bar', 'Zoo'], event_types) for event_type in event_types: trait_types = list(self.event_conn.get_trait_types(event_type)) self.assertEqual(4, len(trait_types)) traits = list(self.event_conn.get_traits(event_type)) self.assertEqual(4, len(traits)) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'db2') class EventTest(EventTestBase): def test_duplicate_message_id(self): now = datetime.datetime.utcnow() m = [event_models.Event("1", "Foo", now, None, {}), event_models.Event("1", "Zoo", now, [], {})] with mock.patch('%s.LOG' % self.event_conn.record_events.__module__) as log: self.event_conn.record_events(m) self.assertEqual(1, log.info.call_count) def test_bad_event(self): now = datetime.datetime.utcnow() broken_event = event_models.Event("1", "Foo", now, None, {}) del(broken_event.__dict__['raw']) m = [broken_event, broken_event] with mock.patch('%s.LOG' % self.event_conn.record_events.__module__) as log: self.assertRaises(AttributeError, self.event_conn.record_events, m) # ensure that record_events does not break on first error but # delays exception and tries to record each event. self.assertEqual(2, log.exception.call_count) class GetEventTest(EventTestBase): def test_generated_is_datetime(self): event_filter = storage.EventFilter(self.start, self.end) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(6, len(events)) for i, event in enumerate(events): self.assertIsInstance(event.generated, datetime.datetime) self.assertEqual(event.generated, self.event_models[i].generated) model_traits = self.event_models[i].traits for j, trait in enumerate(event.traits): if trait.dtype == event_models.Trait.DATETIME_TYPE: self.assertIsInstance(trait.value, datetime.datetime) self.assertEqual(trait.value, model_traits[j].value) def test_simple_get(self): event_filter = storage.EventFilter(self.start, self.end) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(6, len(events)) start_time = None for i, type in enumerate(['Foo', 'Bar', 'Zoo']): self.assertEqual(type, events[i].event_type) self.assertEqual(4, len(events[i].traits)) # Ensure sorted results ... if start_time is not None: # Python 2.6 has no assertLess :( self.assertTrue(start_time < events[i].generated) start_time = events[i].generated def test_simple_get_event_type(self): expected_trait_values = { 'id_Bar_100': { 'trait_A': 'my_Bar_text', 'trait_B': 101, 'trait_C': 100.123456, 'trait_D': self.start + datetime.timedelta(hours=1) }, 'id_Bar_400': { 'trait_A': 'my_Bar_text', 'trait_B': 401, 'trait_C': 400.123456, 'trait_D': self.start + datetime.timedelta(hours=4) } } event_filter = storage.EventFilter(self.start, self.end, "Bar") events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[0].event_type) self.assertEqual("Bar", events[1].event_type) self.assertEqual(4, len(events[0].traits)) self.assertEqual(4, len(events[1].traits)) for event in events: trait_values = expected_trait_values.get(event.message_id, None) if not trait_values: self.fail("Unexpected event ID returned:" % event.message_id) for trait in event.traits: expected_val = trait_values.get(trait.name) if not expected_val: self.fail("Unexpected trait type: %s" % trait.dtype) self.assertEqual(expected_val, trait.value) def test_get_event_trait_filter(self): trait_filters = [{'key': 'trait_B', 'integer': 101}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Bar", events[0].event_type) self.assertEqual(4, len(events[0].traits)) def test_get_event_trait_filter_op_string(self): trait_filters = [{'key': 'trait_A', 'string': 'my_Foo_text', 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Foo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_A', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[0].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'le'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Zoo", events[3].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Zoo", events[0].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Foo", events[2].event_type) def test_get_event_trait_filter_op_integer(self): trait_filters = [{'key': 'trait_B', 'integer': 101, 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Bar", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_B', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Foo", events[0].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'le'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Zoo", events[4].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Zoo", events[0].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Foo", events[2].event_type) def test_get_event_trait_filter_op_float(self): trait_filters = [{'key': 'trait_C', 'float': 300.123456, 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Foo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_C', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Zoo", events[2].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'le'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Zoo", events[2].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[0].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Zoo", events[2].event_type) def test_get_event_trait_filter_op_datetime(self): trait_filters = [{'key': 'trait_D', 'datetime': self.start + datetime.timedelta(hours=2), 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Zoo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_D', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(2, len(events)) trait_filters[0].update({'key': 'trait_D', 'op': 'le'}) self.assertEqual("Bar", events[1].event_type) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_D', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Foo", events[2].event_type) trait_filters[0].update({'key': 'trait_D', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Zoo", events[2].event_type) trait_filters[0].update({'key': 'trait_D', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Bar", events[2].event_type) def test_get_event_multiple_trait_filter(self): trait_filters = [{'key': 'trait_B', 'integer': 1}, {'key': 'trait_A', 'string': 'my_Foo_text'}, {'key': 'trait_C', 'float': 0.123456}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Foo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) def test_get_event_multiple_trait_filter_expect_none(self): trait_filters = [{'key': 'trait_B', 'integer': 1}, {'key': 'trait_A', 'string': 'my_Zoo_text'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(0, len(events)) def test_get_event_types(self): event_types = [e for e in self.event_conn.get_event_types()] self.assertEqual(3, len(event_types)) self.assertIn("Bar", event_types) self.assertIn("Foo", event_types) self.assertIn("Zoo", event_types) def test_get_trait_types(self): trait_types = [tt for tt in self.event_conn.get_trait_types("Foo")] self.assertEqual(4, len(trait_types)) trait_type_names = map(lambda x: x['name'], trait_types) self.assertIn("trait_A", trait_type_names) self.assertIn("trait_B", trait_type_names) self.assertIn("trait_C", trait_type_names) self.assertIn("trait_D", trait_type_names) def test_get_trait_types_unknown_event(self): trait_types = [tt for tt in self.event_conn.get_trait_types("Moo")] self.assertEqual(0, len(trait_types)) def test_get_traits(self): traits = self.event_conn.get_traits("Bar") # format results in a way that makes them easier to work with trait_dict = {} for trait in traits: trait_dict[trait.name] = trait.dtype self.assertIn("trait_A", trait_dict) self.assertEqual(event_models.Trait.TEXT_TYPE, trait_dict["trait_A"]) self.assertIn("trait_B", trait_dict) self.assertEqual(event_models.Trait.INT_TYPE, trait_dict["trait_B"]) self.assertIn("trait_C", trait_dict) self.assertEqual(event_models.Trait.FLOAT_TYPE, trait_dict["trait_C"]) self.assertIn("trait_D", trait_dict) self.assertEqual(event_models.Trait.DATETIME_TYPE, trait_dict["trait_D"]) def test_get_all_traits(self): traits = self.event_conn.get_traits("Foo") traits = sorted([t for t in traits], key=operator.attrgetter('dtype')) self.assertEqual(8, len(traits)) trait = traits[0] self.assertEqual("trait_A", trait.name) self.assertEqual(event_models.Trait.TEXT_TYPE, trait.dtype) def test_simple_get_event_no_traits(self): new_events = [event_models.Event("id_notraits", "NoTraits", self.start, [], {})] self.event_conn.record_events(new_events) event_filter = storage.EventFilter(self.start, self.end, "NoTraits") events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("id_notraits", events[0].message_id) self.assertEqual("NoTraits", events[0].event_type) self.assertEqual(0, len(events[0].traits)) def test_simple_get_no_filters(self): event_filter = storage.EventFilter(None, None, None) events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(6, len(events)) def test_get_by_message_id(self): new_events = [event_models.Event("id_testid", "MessageIDTest", self.start, [], {})] self.event_conn.record_events(new_events) event_filter = storage.EventFilter(message_id="id_testid") events = [event for event in self.event_conn.get_events(event_filter)] self.assertEqual(1, len(events)) event = events[0] self.assertEqual("id_testid", event.message_id) def test_simple_get_raw(self): event_filter = storage.EventFilter() events = [event for event in self.event_conn.get_events(event_filter)] self.assertTrue(events) self.assertEqual({'status': {'nested': 'started'}}, events[0].raw) def test_trait_type_enforced_on_none(self): new_events = [event_models.Event( "id_testid", "MessageIDTest", self.start, [event_models.Trait('text', event_models.Trait.TEXT_TYPE, ''), event_models.Trait('int', event_models.Trait.INT_TYPE, 0), event_models.Trait('float', event_models.Trait.FLOAT_TYPE, 0.0)], {})] self.event_conn.record_events(new_events) event_filter = storage.EventFilter(message_id="id_testid") events = [event for event in self.event_conn.get_events(event_filter)] options = [(event_models.Trait.TEXT_TYPE, ''), (event_models.Trait.INT_TYPE, 0.0), (event_models.Trait.FLOAT_TYPE, 0.0)] for trait in events[0].traits: options.remove((trait.dtype, trait.value)) class BigIntegerTest(tests_db.TestBase): def test_metadata_bigint(self): metadata = {'bigint': 99999999999999} s = sample.Sample(name='name', type=sample.TYPE_GAUGE, unit='B', volume=1, user_id='user-id', project_id='project-id', resource_id='resource-id', timestamp=datetime.datetime.utcnow(), resource_metadata=metadata) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret) self.conn.record_metering_data(msg) @tests_db.run_with('mongodb') class MongoAutoReconnectTest(DBTestBase): def setUp(self): super(MongoAutoReconnectTest, self).setUp() self.CONF.set_override('retry_interval', 0, group='database') def test_mongo_client(self): self.assertIsInstance(self.conn.conn.conn, pymongo.MongoClient) def test_mongo_cursor_next(self): expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) raise_exc = [False, True] method = self.conn.db.resource.find().cursor.next with mock.patch('pymongo.cursor.Cursor.next', mock.Mock()) as mock_next: mock_next.side_effect = self.create_side_effect( method, pymongo.errors.AutoReconnect, raise_exc) resource = self.conn.db.resource.find().next() self.assertEqual(expected_first_sample_timestamp, resource['first_sample_timestamp']) def test_mongo_insert(self): raise_exc = [False, True] method = self.conn.db.meter.insert with mock.patch('pymongo.collection.Collection.insert', mock.Mock(return_value=method)) as mock_insert: mock_insert.side_effect = self.create_side_effect( method, pymongo.errors.AutoReconnect, raise_exc) mock_insert.__name__ = 'insert' self.create_and_store_sample( timestamp=datetime.datetime(2014, 10, 15, 14, 39), source='test-proxy') meters = list(self.conn.db.meter.find()) self.assertEqual(12, len(meters)) def test_mongo_find_and_modify(self): raise_exc = [False, True] method = self.conn.db.resource.find_and_modify with mock.patch('pymongo.collection.Collection.find_and_modify', mock.Mock()) as mock_fam: mock_fam.side_effect = self.create_side_effect( method, pymongo.errors.AutoReconnect, raise_exc) mock_fam.__name__ = 'find_and_modify' self.create_and_store_sample( timestamp=datetime.datetime(2014, 10, 15, 14, 39), source='test-proxy') data = self.conn.db.resource.find( {'last_sample_timestamp': datetime.datetime(2014, 10, 15, 14, 39)})[0]['source'] self.assertEqual('test-proxy', data) def test_mongo_update(self): raise_exc = [False, True] method = self.conn.db.resource.update with mock.patch('pymongo.collection.Collection.update', mock.Mock()) as mock_update: mock_update.side_effect = self.create_side_effect( method, pymongo.errors.AutoReconnect, raise_exc) mock_update.__name__ = 'update' self.create_and_store_sample( timestamp=datetime.datetime(2014, 10, 15, 17, 39), source='test-proxy-update') data = self.conn.db.resource.find( {'last_sample_timestamp': datetime.datetime(2014, 10, 15, 17, 39)})[0]['source'] self.assertEqual('test-proxy-update', data) @tests_db.run_with('mongodb') class MongoTimeToLiveTest(DBTestBase): def test_ensure_index(self): cfg.CONF.set_override('metering_time_to_live', 5, group='database') self.conn.upgrade() self.assertEqual(5, self.conn.db.resource.index_information() ['resource_ttl']['expireAfterSeconds']) self.assertEqual(5, self.conn.db.meter.index_information() ['meter_ttl']['expireAfterSeconds']) def test_modification_of_index(self): cfg.CONF.set_override('metering_time_to_live', 5, group='database') self.conn.upgrade() cfg.CONF.set_override('metering_time_to_live', 15, group='database') self.conn.upgrade() self.assertEqual(15, self.conn.db.resource.index_information() ['resource_ttl']['expireAfterSeconds']) self.assertEqual(15, self.conn.db.meter.index_information() ['meter_ttl']['expireAfterSeconds']) class TestRecordUnicodeSamples(DBTestBase): def prepare_data(self): self.msgs = [] self.msgs.append(self.create_and_store_sample( name=u'meter.accent\xe9\u0437', metadata={u"metadata_key\xe9\u0437": "test", u"metadata_key": u"test\xe9\u0437"}, )) def test_unicode_sample(self): f = storage.SampleFilter() results = list(self.conn.get_samples(f)) self.assertEqual(1, len(results)) expected = self.msgs[0] actual = results[0].as_dict() self.assertEqual(expected['counter_name'], actual['counter_name']) self.assertEqual(expected['resource_metadata'], actual['resource_metadata']) ceilometer-6.0.0/ceilometer/tests/functional/storage/__init__.py0000664000567000056710000000000012701406223026202 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/storage/test_impl_hbase.py0000664000567000056710000000774012701406223027627 0ustar jenkinsjenkins00000000000000# # Copyright 2012, 2013 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_hbase.py .. note:: In order to run the tests against real HBase server set the environment variable CEILOMETER_TEST_HBASE_URL to point to that HBase instance before running the tests. Make sure the Thrift server is running on that server. """ import mock try: import happybase # noqa except ImportError: import testtools.testcase raise testtools.testcase.TestSkipped("happybase is needed") from ceilometer.event.storage import impl_hbase as hbase_event from ceilometer.storage import impl_hbase as hbase from ceilometer.tests import base as test_base from ceilometer.tests import db as tests_db class ConnectionTest(tests_db.TestBase): @tests_db.run_with('hbase') def test_hbase_connection(self): class TestConn(object): def __init__(self, host, port): self.netloc = '%s:%s' % (host, port) def open(self): pass def get_connection_pool(conf): return TestConn(conf['host'], conf['port']) with mock.patch.object(hbase.Connection, '_get_connection_pool', side_effect=get_connection_pool): conn = hbase.Connection('hbase://test_hbase:9090') self.assertIsInstance(conn.conn_pool, TestConn) class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver def test_capabilities(self): expected_capabilities = { 'meters': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'resources': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'statistics': {'groupby': False, 'query': {'simple': True, 'metadata': True, 'complex': False}, 'aggregation': {'standard': True, 'selectable': { 'max': False, 'min': False, 'sum': False, 'avg': False, 'count': False, 'stddev': False, 'cardinality': False}} }, } actual_capabilities = hbase.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = hbase_event.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_storage_capabilities(self): expected_capabilities = { 'storage': {'production_ready': True}, } actual_capabilities = hbase.Connection.get_storage_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) ceilometer-6.0.0/ceilometer/tests/functional/storage/test_impl_db2.py0000664000567000056710000001442212701406223027207 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2014. All rights reserved # # Authors: Ildiko Vancsa # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_db2.py .. note:: In order to run the tests against another MongoDB server set the environment variable CEILOMETER_TEST_DB2_URL to point to a DB2 server before running the tests. """ import bson import mock from oslo_config import cfg from oslo_utils import timeutils from ceilometer.event.storage import impl_db2 as impl_db2_event from ceilometer.storage import impl_db2 from ceilometer.storage.mongo import utils as pymongo_utils from ceilometer.tests import base as test_base class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver def test_capabilities(self): expected_capabilities = { 'meters': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'resources': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True, 'complex': False}, 'aggregation': {'standard': True, 'selectable': { 'max': False, 'min': False, 'sum': False, 'avg': False, 'count': False, 'stddev': False, 'cardinality': False}} }, } actual_capabilities = impl_db2.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = impl_db2_event.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) def test_storage_capabilities(self): expected_capabilities = { 'storage': {'production_ready': True}, } actual_capabilities = impl_db2.Connection.get_storage_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) class ConnectionTest(test_base.BaseTestCase): @mock.patch.object(impl_db2.Connection, '_generate_random_str') @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') @mock.patch.object(timeutils, 'utcnow') @mock.patch.object(bson.objectid, 'ObjectId') def test_upgrade(self, meter_id, timestamp, mongo_connect, _generate_random_str): conn_mock = mock.MagicMock() conn_mock.server_info.return_value = {} _generate_random_str.return_value = 'wew' * 247 + 'x' * 3 conn_mock.ceilodb2.resource.index_information.return_value = {} mongo_connect.return_value = conn_mock meter_id.return_value = '54b8860d75bfe43b54e84ce7' timestamp.return_value = 'timestamp' cfg.CONF.set_override('db2nosql_resource_id_maxlen', 256, group='database') impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') resource_id = 'wew' * 247 + 'x' * 3 conn_mock.ceilodb2.resource.insert_one.assert_called_with( {'_id': resource_id, 'no_key': resource_id}) conn_mock.ceilodb2.meter.insert_one.assert_called_with( {'_id': '54b8860d75bfe43b54e84ce7', 'no_key': '54b8860d75bfe43b54e84ce7', 'timestamp': 'timestamp'}) @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') @mock.patch.object(bson.objectid, 'ObjectId') def test_generate_random_str_with_less_config_len(self, objectid, mongo_connect): fake_str = '54b8860d75bfe43b54e84ce7' conn_mock = mock.MagicMock() conn_mock.server_info.return_value = {} mongo_connect.return_value = conn_mock objectid.return_value = fake_str cfg.CONF.set_override('db2nosql_resource_id_maxlen', 20, group='database') conn = impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') rand_str = conn._generate_random_str(20) self.assertEqual(fake_str, rand_str) @mock.patch.object(pymongo_utils.ConnectionPool, 'connect') @mock.patch.object(bson.objectid, 'ObjectId') def test_generate_random_str_with_default_config_len(self, objectid, mongo_connect): fake_str = '54b8860d75bfe43b54e84ce7' conn_mock = mock.MagicMock() conn_mock.server_info.return_value = {} mongo_connect.return_value = conn_mock objectid.return_value = fake_str cfg.CONF.set_override('db2nosql_resource_id_maxlen', 512, group='database') conn = impl_db2.Connection('db2://user:pwd@localhost:27017/ceilodb2') rand_str = conn._generate_random_str(512) str_len = len(str(fake_str)) expect_str = fake_str * int(512 / str_len) + 'x' * (512 % str_len) self.assertEqual(expect_str, rand_str) ceilometer-6.0.0/ceilometer/tests/functional/storage/test_pymongo_base.py0000664000567000056710000001310012701406223030171 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests the mongodb and db2 common functionality """ import copy import datetime import mock from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests import db as tests_db from ceilometer.tests.functional.storage import test_storage_scenarios @tests_db.run_with('mongodb', 'db2') class CompatibilityTest(test_storage_scenarios.DBTestBase): def prepare_data(self): def old_record_metering_data(self, data): received_timestamp = datetime.datetime.utcnow() self.db.resource.update( {'_id': data['resource_id']}, {'$set': {'project_id': data['project_id'], 'user_id': data['user_id'], # Current metadata being used and when it was # last updated. 'timestamp': data['timestamp'], 'received_timestamp': received_timestamp, 'metadata': data['resource_metadata'], 'source': data['source'], }, '$addToSet': {'meter': {'counter_name': data['counter_name'], 'counter_type': data['counter_type'], }, }, }, upsert=True, ) record = copy.copy(data) self.db.meter.insert(record) # Stubout with the old version DB schema, the one w/o 'counter_unit' with mock.patch.object(self.conn, 'record_metering_data', side_effect=old_record_metering_data): self.counters = [] c = sample.Sample( 'volume.size', 'gauge', 'GiB', 5, 'user-id', 'project1', 'resource-id', timestamp=datetime.datetime(2012, 9, 25, 10, 30), resource_metadata={'display_name': 'test-volume', 'tag': 'self.counter', }, source='test', ) self.counters.append(c) msg = utils.meter_message_from_counter( c, secret='not-so-secret') self.conn.record_metering_data(self.conn, msg) def test_counter_unit(self): meters = list(self.conn.get_meters()) self.assertEqual(1, len(meters)) # TODO(ananya) same test should be done for other databse @tests_db.run_with('mongodb', 'db2') class FilterQueryTestForMeters(test_storage_scenarios.DBTestBase): def prepare_data(self): def old_record_metering_data(self, data): received_timestamp = datetime.datetime.utcnow() self.db.resource.update( {'_id': data['resource_id']}, {'$set': {'project_id': data['project_id'], 'user_id': data['user_id'], # Current metadata being used and when it was # last updated. 'timestamp': data['timestamp'], 'received_timestamp': received_timestamp, 'metadata': data['resource_metadata'], 'source': data['source'], }, '$addToSet': {'meter': {'counter_name': data['counter_name'], 'counter_type': data['counter_type'], }, }, }, upsert=True, ) record = copy.copy(data) self.db.meter.insert(record) # Stubout with the old version DB schema, the one w/o 'counter_unit' with mock.patch.object(self.conn, 'record_metering_data', side_effect=old_record_metering_data): self.counters = [] c = sample.Sample( 'volume.size', 'gauge', 'GiB', 5, None, None, None, timestamp=datetime.datetime(2012, 9, 25, 10, 30), resource_metadata={'display_name': 'test-volume', 'tag': 'self.counter', }, source='test', ) self.counters.append(c) msg = utils.meter_message_from_counter( c, secret='not-so-secret') self.conn.record_metering_data(self.conn, msg) def test_get_meters_by_user(self): meters = list(self.conn.get_meters(user='None')) self.assertEqual(1, len(meters)) def test_get_meters_by_resource(self): meters = list(self.conn.get_meters(resource='None')) self.assertEqual(1, len(meters)) def test_get_meters_by_project(self): meters = list(self.conn.get_meters(project='None')) self.assertEqual(1, len(meters)) ceilometer-6.0.0/ceilometer/tests/functional/hooks/0000775000567000056710000000000012701406364023570 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/hooks/post_test_hook.sh0000775000567000056710000000343512701406223027172 0ustar jenkinsjenkins00000000000000#!/bin/bash -xe # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside post_test_hook function in devstack gate. function generate_testr_results { if [ -f .testrepository/0 ]; then sudo .tox/functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html sudo gzip -9 $BASE/logs/testrepository.subunit sudo gzip -9 $BASE/logs/testr_results.html sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz fi } export CEILOMETER_DIR="$BASE/new/ceilometer" # Go to the ceilometer dir cd $CEILOMETER_DIR if [[ -z "$STACK_USER" ]]; then export STACK_USER=stack fi sudo chown -R $STACK_USER:stack $CEILOMETER_DIR # Run tests echo "Running ceilometer functional test suite" set +e # NOTE(ityaptin) Expected a script param which contains a backend name CEILOMETER_TEST_BACKEND="$1" sudo -E -H -u ${STACK_USER:-${USER}} tox -efunctional EXIT_CODE=$? set -e # Collect and parse result generate_testr_results exit $EXIT_CODE ceilometer-6.0.0/ceilometer/tests/functional/api/0000775000567000056710000000000012701406364023216 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/api/__init__.py0000664000567000056710000001702212701406223025323 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for API tests. """ from oslo_config import cfg from oslo_config import fixture as fixture_config from oslo_policy import opts import pecan import pecan.testing from ceilometer.api import rbac from ceilometer.tests import db as db_test_base OPT_GROUP_NAME = 'keystone_authtoken' cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token") cfg.CONF.import_group('api', 'ceilometer.api.controllers.v2.root') class FunctionalTest(db_test_base.TestBase): """Used for functional tests of Pecan controllers. Used in case when you need to test your literal application and its integration with the framework. """ PATH_PREFIX = '' def setUp(self): super(FunctionalTest, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.setup_messaging(self.CONF) opts.set_defaults(self.CONF) self.CONF.set_override("auth_version", "v2.0", group=OPT_GROUP_NAME) self.CONF.set_override("policy_file", self.path_get('etc/ceilometer/policy.json'), group='oslo_policy') self.CONF.set_override('gnocchi_is_enabled', False, group='api') self.CONF.set_override('aodh_is_enabled', False, group='api') self.app = self._make_app() def _make_app(self, enable_acl=False): self.config = { 'app': { 'root': 'ceilometer.api.controllers.root.RootController', 'modules': ['ceilometer.api'], 'enable_acl': enable_acl, }, 'wsme': { 'debug': True, }, } return pecan.testing.load_test_app(self.config) def tearDown(self): super(FunctionalTest, self).tearDown() rbac.reset() pecan.set_config({}, overwrite=True) def put_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PUT request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ return self.post_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="put") def post_json(self, path, params, expect_errors=False, headers=None, method="post", extra_environ=None, status=None): """Sends simulated HTTP POST request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param method: Request method type. Appropriate method function call should be used rather than passing attribute in. :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ full_path = self.PATH_PREFIX + path response = getattr(self.app, "%s_json" % method)( str(full_path), params=params, headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors ) return response def delete(self, path, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP DELETE request to Pecan test app. :param path: url path of target service :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ full_path = self.PATH_PREFIX + path response = self.app.delete(str(full_path), headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors) return response def get_json(self, path, expect_errors=False, headers=None, extra_environ=None, q=None, groupby=None, status=None, override_params=None, **params): """Sends simulated HTTP GET request to Pecan test app. :param path: url path of target service :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param q: list of queries consisting of: field, value, op, and type keys :param groupby: list of fields to group by :param status: Expected status code of response :param override_params: literally encoded query param string :param params: content for wsgi.input of request """ q = q or [] groupby = groupby or [] full_path = self.PATH_PREFIX + path if override_params: all_params = override_params else: query_params = {'q.field': [], 'q.value': [], 'q.op': [], 'q.type': [], } for query in q: for name in ['field', 'op', 'value', 'type']: query_params['q.%s' % name].append(query.get(name, '')) all_params = {} all_params.update(params) if q: all_params.update(query_params) if groupby: all_params.update({'groupby': groupby}) response = self.app.get(full_path, params=all_params, headers=headers, extra_environ=extra_environ, expect_errors=expect_errors, status=status) if not expect_errors: response = response.json return response ceilometer-6.0.0/ceilometer/tests/functional/api/v2/0000775000567000056710000000000012701406364023545 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py0000664000567000056710000003134512701406223032120 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Authors: Ildiko Vancsa # Balazs Gibizer # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests complex queries for samples """ import datetime from oslo_utils import timeutils from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests.functional.api import v2 as tests_api admin_header = {"X-Roles": "admin", "X-Project-Id": "project-id1"} non_admin_header = {"X-Roles": "Member", "X-Project-Id": "project-id1"} class TestQueryMetersController(tests_api.FunctionalTest): def setUp(self): super(TestQueryMetersController, self).setUp() self.url = '/query/samples' for cnt in [ sample.Sample('meter.test', 'cumulative', '', 1, 'user-id1', 'project-id1', 'resource-id1', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server1', 'tag': 'self.sample', 'size': 456, 'util': 0.25, 'is_public': True}, source='test_source'), sample.Sample('meter.test', 'cumulative', '', 2, 'user-id2', 'project-id2', 'resource-id2', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server2', 'tag': 'self.sample', 'size': 123, 'util': 0.75, 'is_public': True}, source='test_source'), sample.Sample('meter.test', 'cumulative', '', 3, 'user-id3', 'project-id3', 'resource-id3', timestamp=datetime.datetime(2012, 7, 2, 10, 42), resource_metadata={'display_name': 'test-server3', 'tag': 'self.sample', 'size': 789, 'util': 0.95, 'is_public': True}, source='test_source')]: msg = utils.meter_message_from_counter( cnt, self.CONF.publisher.telemetry_secret) self.conn.record_metering_data(msg) def test_query_fields_are_optional(self): data = self.post_json(self.url, params={}) self.assertEqual(3, len(data.json)) def test_query_with_isotime(self): date_time = datetime.datetime(2012, 7, 2, 10, 41) isotime = date_time.isoformat() data = self.post_json(self.url, params={"filter": '{">=": {"timestamp": "' + isotime + '"}}'}) self.assertEqual(2, len(data.json)) for sample_item in data.json: result_time = timeutils.parse_isotime(sample_item['timestamp']) result_time = result_time.replace(tzinfo=None) self.assertTrue(result_time >= date_time) def test_non_admin_tenant_sees_only_its_own_project(self): data = self.post_json(self.url, params={}, headers=non_admin_header) for sample_item in data.json: self.assertEqual("project-id1", sample_item['project_id']) def test_non_admin_tenant_cannot_query_others_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"project_id": "project-id2"}}'}, expect_errors=True, headers=non_admin_header) self.assertEqual(401, data.status_int) self.assertIn(b"Not Authorized to access project project-id2", data.body) def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"project_id": "project-id1"}}'}, headers=non_admin_header) for sample_item in data.json: self.assertEqual("project-id1", sample_item['project_id']) def test_admin_tenant_sees_every_project(self): data = self.post_json(self.url, params={}, headers=admin_header) self.assertEqual(3, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['project_id'], (["project-id1", "project-id2", "project-id3"])) def test_admin_tenant_sees_every_project_with_complex_filter(self): filter = ('{"OR": ' + '[{"=": {"project_id": "project-id1"}}, ' + '{"=": {"project_id": "project-id2"}}]}') data = self.post_json(self.url, params={"filter": filter}, headers=admin_header) self.assertEqual(2, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['project_id'], (["project-id1", "project-id2"])) def test_admin_tenant_sees_every_project_with_in_filter(self): filter = ('{"In": ' + '{"project_id": ["project-id1", "project-id2"]}}') data = self.post_json(self.url, params={"filter": filter}, headers=admin_header) self.assertEqual(2, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['project_id'], (["project-id1", "project-id2"])) def test_admin_tenant_can_query_any_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"project_id": "project-id2"}}'}, headers=admin_header) self.assertEqual(1, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['project_id'], set(["project-id2"])) def test_query_with_orderby(self): data = self.post_json(self.url, params={"orderby": '[{"project_id": "DESC"}]'}) self.assertEqual(3, len(data.json)) self.assertEqual(["project-id3", "project-id2", "project-id1"], [s["project_id"] for s in data.json]) def test_query_with_field_name_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"project": "project-id2"}}'}) self.assertEqual(1, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['project_id'], set(["project-id2"])) def test_query_with_field_name_resource(self): data = self.post_json(self.url, params={"filter": '{"=": {"resource": "resource-id2"}}'}) self.assertEqual(1, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['resource_id'], set(["resource-id2"])) def test_query_with_wrong_field_name(self): data = self.post_json(self.url, params={"filter": '{"=": {"unknown": "resource-id2"}}'}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"is not valid under any of the given schemas", data.body) def test_query_with_wrong_json(self): data = self.post_json(self.url, params={"filter": '{"=": "resource": "resource-id2"}}'}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"Filter expression not valid", data.body) def test_query_with_field_name_user(self): data = self.post_json(self.url, params={"filter": '{"=": {"user": "user-id2"}}'}) self.assertEqual(1, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['user_id'], set(["user-id2"])) def test_query_with_field_name_meter(self): data = self.post_json(self.url, params={"filter": '{"=": {"meter": "meter.test"}}'}) self.assertEqual(3, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['meter'], set(["meter.test"])) def test_query_with_lower_and_upper_case_orderby(self): data = self.post_json(self.url, params={"orderby": '[{"project_id": "DeSc"}]'}) self.assertEqual(3, len(data.json)) self.assertEqual(["project-id3", "project-id2", "project-id1"], [s["project_id"] for s in data.json]) def test_query_with_user_field_name_orderby(self): data = self.post_json(self.url, params={"orderby": '[{"user": "aSc"}]'}) self.assertEqual(3, len(data.json)) self.assertEqual(["user-id1", "user-id2", "user-id3"], [s["user_id"] for s in data.json]) def test_query_with_volume_field_name_orderby(self): data = self.post_json(self.url, params={"orderby": '[{"volume": "deSc"}]'}) self.assertEqual(3, len(data.json)) self.assertEqual([3, 2, 1], [s["volume"] for s in data.json]) def test_query_with_missing_order_in_orderby(self): data = self.post_json(self.url, params={"orderby": '[{"project_id": ""}]'}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"does not match '(?i)^asc$|^desc$'", data.body) def test_query_with_wrong_json_in_orderby(self): data = self.post_json(self.url, params={"orderby": '{"project_id": "desc"}]'}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"Order-by expression not valid: Extra data", data.body) def test_filter_with_metadata(self): data = self.post_json(self.url, params={"filter": '{">=": {"metadata.util": 0.5}}'}) self.assertEqual(2, len(data.json)) for sample_item in data.json: self.assertTrue(float(sample_item["metadata"]["util"]) >= 0.5) def test_filter_with_negation(self): filter_expr = '{"not": {">=": {"metadata.util": 0.5}}}' data = self.post_json(self.url, params={"filter": filter_expr}) self.assertEqual(1, len(data.json)) for sample_item in data.json: self.assertTrue(float(sample_item["metadata"]["util"]) < 0.5) def test_limit_must_be_positive(self): data = self.post_json(self.url, params={"limit": 0}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"Limit must be positive", data.body) def test_default_limit(self): self.CONF.set_override('default_api_return_limit', 1, group='api') data = self.post_json(self.url, params={}) self.assertEqual(1, len(data.json)) ceilometer-6.0.0/ceilometer/tests/functional/api/v2/test_event_scenarios.py0000664000567000056710000007015212701406224030345 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test event, event_type and trait retrieval.""" import datetime import uuid import webtest.app from ceilometer.event.storage import models from ceilometer.tests import db as tests_db from ceilometer.tests.functional.api import v2 USER_ID = uuid.uuid4().hex PROJ_ID = uuid.uuid4().hex HEADERS = {"X-Roles": "admin", "X-User-Id": USER_ID, "X-Project-Id": PROJ_ID} class EventTestBase(v2.FunctionalTest): def setUp(self): super(EventTestBase, self).setUp() self._generate_models() def _generate_models(self): event_models = [] base = 0 self.s_time = datetime.datetime(2013, 12, 31, 5, 0) self.trait_time = datetime.datetime(2013, 12, 31, 5, 0) for event_type in ['Foo', 'Bar', 'Zoo']: trait_models = [models.Trait(name, type, value) for name, type, value in [ ('trait_A', models.Trait.TEXT_TYPE, "my_%s_text" % event_type), ('trait_B', models.Trait.INT_TYPE, base + 1), ('trait_C', models.Trait.FLOAT_TYPE, float(base) + 0.123456), ('trait_D', models.Trait.DATETIME_TYPE, self.trait_time)]] # Message ID for test will be 'base'. So, message ID for the first # event will be '0', the second '100', and so on. # trait_time in first event will be equal to self.trait_time # (datetime.datetime(2013, 12, 31, 5, 0)), next will add 1 day, so # second will be (datetime.datetime(2014, 01, 01, 5, 0)) and so on. event_models.append( models.Event(message_id=str(base), event_type=event_type, generated=self.trait_time, traits=trait_models, raw={'status': {'nested': 'started'}})) base += 100 self.trait_time += datetime.timedelta(days=1) self.event_conn.record_events(event_models) class TestEventTypeAPI(EventTestBase): PATH = '/event_types' def test_event_types(self): data = self.get_json(self.PATH, headers=HEADERS) for event_type in ['Foo', 'Bar', 'Zoo']: self.assertIn(event_type, data) class TestTraitAPI(EventTestBase): PATH = '/event_types/%s/traits' def test_get_traits_for_event(self): path = self.PATH % "Foo" data = self.get_json(path, headers=HEADERS) self.assertEqual(4, len(data)) def test_get_event_invalid_path(self): data = self.get_json('/event_types/trait_A/', headers=HEADERS, expect_errors=True) self.assertEqual(404, data.status_int) def test_get_traits_for_non_existent_event(self): path = self.PATH % "NO_SUCH_EVENT_TYPE" data = self.get_json(path, headers=HEADERS) self.assertEqual([], data) def test_get_trait_data_for_event(self): path = (self.PATH % "Foo") + "/trait_A" data = self.get_json(path, headers=HEADERS) self.assertEqual(1, len(data)) self.assertEqual("trait_A", data[0]['name']) path = (self.PATH % "Foo") + "/trait_B" data = self.get_json(path, headers=HEADERS) self.assertEqual(1, len(data)) self.assertEqual("trait_B", data[0]['name']) self.assertEqual("1", data[0]['value']) path = (self.PATH % "Foo") + "/trait_D" data = self.get_json(path, headers=HEADERS) self.assertEqual(1, len(data)) self.assertEqual("trait_D", data[0]['name']) self.assertEqual((self.trait_time - datetime.timedelta(days=3)). isoformat(), data[0]['value']) def test_get_trait_data_for_non_existent_event(self): path = (self.PATH % "NO_SUCH_EVENT") + "/trait_A" data = self.get_json(path, headers=HEADERS) self.assertEqual([], data) def test_get_trait_data_for_non_existent_trait(self): path = (self.PATH % "Foo") + "/no_such_trait" data = self.get_json(path, headers=HEADERS) self.assertEqual([], data) class TestEventAPI(EventTestBase): PATH = '/events' def test_get_events(self): data = self.get_json(self.PATH, headers=HEADERS) self.assertEqual(3, len(data)) # We expect to get native UTC generated time back trait_time = self.s_time for event in data: expected_generated = trait_time.isoformat() self.assertIn(event['event_type'], ['Foo', 'Bar', 'Zoo']) self.assertEqual(4, len(event['traits'])) self.assertEqual({'status': {'nested': 'started'}}, event['raw']), self.assertEqual(expected_generated, event['generated']) for trait_name in ['trait_A', 'trait_B', 'trait_C', 'trait_D']: self.assertIn(trait_name, map(lambda x: x['name'], event['traits'])) trait_time += datetime.timedelta(days=1) def test_get_event_by_message_id(self): event = self.get_json(self.PATH + "/100", headers=HEADERS) expected_traits = [{'name': 'trait_A', 'type': 'string', 'value': 'my_Bar_text'}, {'name': 'trait_B', 'type': 'integer', 'value': '101'}, {'name': 'trait_C', 'type': 'float', 'value': '100.123456'}, {'name': 'trait_D', 'type': 'datetime', 'value': '2014-01-01T05:00:00'}] self.assertEqual('100', event['message_id']) self.assertEqual('Bar', event['event_type']) self.assertEqual('2014-01-01T05:00:00', event['generated']) self.assertEqual(expected_traits, event['traits']) def test_get_event_by_message_id_no_such_id(self): data = self.get_json(self.PATH + "/DNE", headers=HEADERS, expect_errors=True) self.assertEqual(404, data.status_int) def test_get_events_filter_event_type(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'event_type', 'value': 'Foo'}]) self.assertEqual(1, len(data)) def test_get_events_filter_trait_no_type(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text'}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_filter_trait_empty_type(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': ''}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_filter_trait_invalid_type(self): resp = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'whats-up'}], expect_errors=True) self.assertEqual(400, resp.status_code) self.assertEqual("The data type whats-up is not supported. The " "supported data type list is: [\'integer\', " "\'float\', \'string\', \'datetime\']", resp.json['error_message']['faultstring']) def test_get_events_filter_operator_invalid_type(self): resp = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'op': 'whats-up'}], expect_errors=True) self.assertEqual(400, resp.status_code) self.assertEqual("Operator whats-up is not supported. The " "supported operators are: (\'lt\', \'le\', " "\'eq\', \'ne\', \'ge\', \'gt\')", resp.json['error_message']['faultstring']) def test_get_events_filter_text_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_filter_int_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer'}]) self.assertEqual(1, len(data)) self.assertEqual('Bar', data[0]['event_type']) traits = [x for x in data[0]['traits'] if x['name'] == 'trait_B'] self.assertEqual(1, len(traits)) self.assertEqual('integer', traits[0]['type']) self.assertEqual('101', traits[0]['value']) def test_get_events_filter_float_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '200.123456', 'type': 'float'}]) self.assertEqual(1, len(data)) self.assertEqual('Zoo', data[0]['event_type']) traits = [x for x in data[0]['traits'] if x['name'] == 'trait_C'] self.assertEqual(1, len(traits)) self.assertEqual('float', traits[0]['type']) self.assertEqual('200.123456', traits[0]['value']) def test_get_events_filter_datetime_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-01T05:00:00', 'type': 'datetime'}]) self.assertEqual(1, len(data)) traits = [x for x in data[0]['traits'] if x['name'] == 'trait_D'] self.assertEqual(1, len(traits)) self.assertEqual('datetime', traits[0]['type']) self.assertEqual('2014-01-01T05:00:00', traits[0]['value']) def test_get_events_multiple_filters(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer'}, {'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_multiple_filters_no_matches(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer'}, {'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}]) self.assertEqual(0, len(data)) def test_get_events_multiple_filters_same_field_different_values(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}, {'field': 'trait_A', 'value': 'my_Bar_text', 'type': 'string'}]) self.assertEqual(0, len(data)) def test_get_events_not_filters(self): data = self.get_json(self.PATH, headers=HEADERS, q=[]) self.assertEqual(3, len(data)) def test_get_events_filter_op_string(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Bar_text', 'type': 'string', 'op': 'lt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Zoo_text', 'type': 'string', 'op': 'le'}]) self.assertEqual(3, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Bar_text', 'type': 'string', 'op': 'gt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Zoo_text', 'type': 'string', 'op': 'ge'}]) self.assertEqual(1, len(data)) def test_get_events_filter_op_integer(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '201', 'type': 'integer', 'op': 'lt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer', 'op': 'le'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '201', 'type': 'integer', 'op': 'gt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer', 'op': 'ge'}]) self.assertEqual(3, len(data)) def test_get_events_filter_op_float(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '100.123456', 'type': 'float', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '200.123456', 'type': 'float', 'op': 'lt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '0.123456', 'type': 'float', 'op': 'le'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '100.123456', 'type': 'float', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '200.123456', 'type': 'float', 'op': 'gt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '0.123456', 'type': 'float', 'op': 'ge'}]) self.assertEqual(3, len(data)) def test_get_events_filter_op_datatime(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-01T05:00:00', 'type': 'datetime', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-02T05:00:00', 'type': 'datetime', 'op': 'lt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2013-12-31T05:00:00', 'type': 'datetime', 'op': 'le'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-01T05:00:00', 'type': 'datetime', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-02T05:00:00', 'type': 'datetime', 'op': 'gt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2013-12-31T05:00:00', 'type': 'datetime', 'op': 'ge'}]) self.assertEqual(3, len(data)) def test_get_events_filter_wrong_op(self): self.assertRaises(webtest.app.AppError, self.get_json, self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer', 'op': 'el'}]) class AclRestrictedEventTestBase(v2.FunctionalTest): def setUp(self): super(AclRestrictedEventTestBase, self).setUp() self.admin_user_id = uuid.uuid4().hex self.admin_proj_id = uuid.uuid4().hex self.user_id = uuid.uuid4().hex self.proj_id = uuid.uuid4().hex self._generate_models() def _generate_models(self): event_models = [] self.s_time = datetime.datetime(2013, 12, 31, 5, 0) event_models.append( models.Event(message_id='1', event_type='empty_ev', generated=self.s_time, traits=[models.Trait('random', models.Trait.TEXT_TYPE, 'blah')], raw={})) event_models.append( models.Event(message_id='2', event_type='admin_ev', generated=self.s_time, traits=[models.Trait('project_id', models.Trait.TEXT_TYPE, self.admin_proj_id), models.Trait('user_id', models.Trait.TEXT_TYPE, self.admin_user_id)], raw={})) event_models.append( models.Event(message_id='3', event_type='user_ev', generated=self.s_time, traits=[models.Trait('project_id', models.Trait.TEXT_TYPE, self.proj_id), models.Trait('user_id', models.Trait.TEXT_TYPE, self.user_id)], raw={})) self.event_conn.record_events(event_models) def test_non_admin_access(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": self.proj_id} data = self.get_json('/events', headers=a_headers) self.assertEqual(1, len(data)) self.assertEqual('user_ev', data[0]['event_type']) def test_non_admin_access_single(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": self.proj_id} data = self.get_json('/events/3', headers=a_headers) self.assertEqual('user_ev', data['event_type']) def test_non_admin_access_incorrect_user(self): a_headers = {"X-Roles": "member", "X-User-Id": 'blah', "X-Project-Id": self.proj_id} data = self.get_json('/events', headers=a_headers) self.assertEqual(0, len(data)) def test_non_admin_access_incorrect_proj(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": 'blah'} data = self.get_json('/events', headers=a_headers) self.assertEqual(0, len(data)) def test_non_admin_access_single_invalid(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": self.proj_id} data = self.get_json('/events/1', headers=a_headers, expect_errors=True) self.assertEqual(404, data.status_int) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') def test_admin_access(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events', headers=a_headers) self.assertEqual(2, len(data)) self.assertEqual(set(['empty_ev', 'admin_ev']), set(ev['event_type'] for ev in data)) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') def test_admin_access_trait_filter(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events', headers=a_headers, q=[{'field': 'random', 'value': 'blah', 'type': 'string', 'op': 'eq'}]) self.assertEqual(1, len(data)) self.assertEqual('empty_ev', data[0]['event_type']) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') def test_admin_access_single(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events/1', headers=a_headers) self.assertEqual('empty_ev', data['event_type']) data = self.get_json('/events/2', headers=a_headers) self.assertEqual('admin_ev', data['event_type']) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es', 'db2') def test_admin_access_trait_filter_no_access(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events', headers=a_headers, q=[{'field': 'user_id', 'value': self.user_id, 'type': 'string', 'op': 'eq'}]) self.assertEqual(0, len(data)) class EventRestrictionTestBase(v2.FunctionalTest): def setUp(self): super(EventRestrictionTestBase, self).setUp() self.CONF.set_override('default_api_return_limit', 10, group='api') self._generate_models() def _generate_models(self): event_models = [] base = 0 self.s_time = datetime.datetime(2013, 12, 31, 5, 0) self.trait_time = datetime.datetime(2013, 12, 31, 5, 0) for i in range(20): trait_models = [models.Trait(name, type, value) for name, type, value in [ ('trait_A', models.Trait.TEXT_TYPE, "my_text"), ('trait_B', models.Trait.INT_TYPE, base + 1), ('trait_C', models.Trait.FLOAT_TYPE, float(base) + 0.123456), ('trait_D', models.Trait.DATETIME_TYPE, self.trait_time)]] event_models.append( models.Event(message_id=str(uuid.uuid4()), event_type='foo.bar', generated=self.trait_time, traits=trait_models, raw={'status': {'nested': 'started'}})) self.trait_time += datetime.timedelta(seconds=1) self.event_conn.record_events(event_models) class TestEventRestriction(EventRestrictionTestBase): def test_get_limit(self): data = self.get_json('/events?limit=1', headers=HEADERS) self.assertEqual(1, len(data)) def test_get_limit_negative(self): self.assertRaises(webtest.app.AppError, self.get_json, '/events?limit=-2', headers=HEADERS) def test_get_limit_bigger(self): data = self.get_json('/events?limit=100', headers=HEADERS) self.assertEqual(20, len(data)) def test_get_default_limit(self): data = self.get_json('/events', headers=HEADERS) self.assertEqual(10, len(data)) ceilometer-6.0.0/ceilometer/tests/functional/api/v2/__init__.py0000664000567000056710000000131212701406223025645 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.tests.functional import api class FunctionalTest(api.FunctionalTest): PATH_PREFIX = '/v2' ceilometer-6.0.0/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py0000664000567000056710000010305612701406223031555 0ustar jenkinsjenkins00000000000000# # Copyright 2012 Red Hat, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test listing meters. """ import base64 import datetime from oslo_serialization import jsonutils import six import webtest.app from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests.functional.api import v2 class TestListEmptyMeters(v2.FunctionalTest): def test_empty(self): data = self.get_json('/meters') self.assertEqual([], data) class TestValidateUserInput(v2.FunctionalTest): def test_list_meters_query_float_metadata(self): self.assertRaises(webtest.app.AppError, self.get_json, '/meters/meter.test', q=[{'field': 'metadata.util', 'op': 'eq', 'value': '0.7.5', 'type': 'float'}]) self.assertRaises(webtest.app.AppError, self.get_json, '/meters/meter.test', q=[{'field': 'metadata.util', 'op': 'eq', 'value': 'abacaba', 'type': 'boolean'}]) self.assertRaises(webtest.app.AppError, self.get_json, '/meters/meter.test', q=[{'field': 'metadata.util', 'op': 'eq', 'value': '45.765', 'type': 'integer'}]) class TestListMetersRestriction(v2.FunctionalTest): def setUp(self): super(TestListMetersRestriction, self).setUp() self.CONF.set_override('default_api_return_limit', 3, group='api') for x in range(5): for i in range(5): s = sample.Sample( 'volume.size%s' % x, 'gauge', 'GiB', 5 + i, 'user-id', 'project1', 'resource-id', timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + datetime.timedelta(seconds=i)), resource_metadata={'display_name': 'test-volume', 'tag': 'self.sample', }, source='source1', ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_meter_limit(self): data = self.get_json('/meters?limit=1') self.assertEqual(1, len(data)) def test_meter_limit_negative(self): self.assertRaises(webtest.app.AppError, self.get_json, '/meters?limit=-2') def test_meter_limit_bigger(self): data = self.get_json('/meters?limit=42') self.assertEqual(5, len(data)) def test_meter_default_limit(self): data = self.get_json('/meters') self.assertEqual(3, len(data)) def test_old_sample_limit(self): data = self.get_json('/meters/volume.size0?limit=1') self.assertEqual(1, len(data)) def test_old_sample_limit_negative(self): self.assertRaises(webtest.app.AppError, self.get_json, '/meters/volume.size0?limit=-2') def test_old_sample_limit_bigger(self): data = self.get_json('/meters/volume.size0?limit=42') self.assertEqual(5, len(data)) def test_old_sample_default_limit(self): data = self.get_json('/meters/volume.size0') self.assertEqual(3, len(data)) def test_sample_limit(self): data = self.get_json('/samples?limit=1') self.assertEqual(1, len(data)) def test_sample_limit_negative(self): self.assertRaises(webtest.app.AppError, self.get_json, '/samples?limit=-2') def test_sample_limit_bigger(self): data = self.get_json('/samples?limit=42') self.assertEqual(25, len(data)) def test_sample_default_limit(self): data = self.get_json('/samples') self.assertEqual(3, len(data)) class TestListMeters(v2.FunctionalTest): def setUp(self): super(TestListMeters, self).setUp() self.messages = [] for cnt in [ sample.Sample( 'meter.test', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', 'size': 123, 'util': 0.75, 'is_public': True}, source='test_source'), sample.Sample( 'meter.test', 'cumulative', '', 3, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 11, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample1', 'size': 0, 'util': 0.47, 'is_public': False}, source='test_source'), sample.Sample( 'meter.mine', 'gauge', '', 1, 'user-id', 'project-id', 'resource-id2', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', 'size': 456, 'util': 0.64, 'is_public': False}, source='test_source'), sample.Sample( 'meter.test', 'cumulative', '', 1, 'user-id2', 'project-id2', 'resource-id3', timestamp=datetime.datetime(2012, 7, 2, 10, 42), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample3', 'size': 0, 'util': 0.75, 'is_public': False}, source='test_source'), sample.Sample( 'meter.test.new', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample3', 'size': 0, 'util': 0.75, 'is_public': False}, source='test_source'), sample.Sample( 'meter.mine', 'gauge', '', 1, 'user-id4', 'project-id2', 'resource-id4', timestamp=datetime.datetime(2012, 7, 2, 10, 43), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample4', 'properties': { 'prop_1': 'prop_value', 'prop_2': {'sub_prop_1': 'sub_prop_value'}, 'prop.3': {'$sub_prop.2': 'sub_prop_value2'} }, 'size': 0, 'util': 0.58, 'is_public': True}, source='test_source1'), sample.Sample( u'meter.accent\xe9\u0437', 'gauge', '', 1, 'user-id4', 'project-id2', 'resource-id4', timestamp=datetime.datetime(2014, 7, 2, 10, 43), resource_metadata={}, source='test_source1')]: msg = utils.meter_message_from_counter( cnt, self.CONF.publisher.telemetry_secret) self.messages.append(msg) self.conn.record_metering_data(msg) def test_list_meters(self): data = self.get_json('/meters') self.assertEqual(6, len(data)) self.assertEqual(set(['resource-id', 'resource-id2', 'resource-id3', 'resource-id4']), set(r['resource_id'] for r in data)) self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new', u'meter.accent\xe9\u0437']), set(r['name'] for r in data)) self.assertEqual(set(['test_source', 'test_source1']), set(r['source'] for r in data)) def test_list_unique_meters(self): data = self.get_json('/meters?unique=True') self.assertEqual(4, len(data)) self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new', u'meter.accent\xe9\u0437']), set(r['name'] for r in data)) def test_meters_query_with_timestamp(self): date_time = datetime.datetime(2012, 7, 2, 10, 41) isotime = date_time.isoformat() resp = self.get_json('/meters', q=[{'field': 'timestamp', 'op': 'gt', 'value': isotime}], expect_errors=True) self.assertEqual(400, resp.status_code) self.assertEqual('Unknown argument: "timestamp": ' 'not valid for this resource', jsonutils.loads(resp.body)['error_message'] ['faultstring']) def test_list_samples(self): data = self.get_json('/samples') self.assertEqual(7, len(data)) def test_query_samples_with_invalid_field_name_and_non_eq_operator(self): resp = self.get_json('/samples', q=[{'field': 'non_valid_field_name', 'op': 'gt', 'value': 3}], expect_errors=True) resp_string = jsonutils.loads(resp.body) fault_string = resp_string['error_message']['faultstring'] msg = ('Unknown argument: "non_valid_field_name"' ': unrecognized field in query: ' '[ # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.tests.functional.api import v2 as tests_api class TestCapabilitiesController(tests_api.FunctionalTest): def setUp(self): super(TestCapabilitiesController, self).setUp() self.url = '/capabilities' def test_capabilities(self): data = self.get_json(self.url) # check that capabilities data contains both 'api' and 'storage' fields self.assertIsNotNone(data) self.assertNotEqual({}, data) self.assertIn('api', data) self.assertIn('storage', data) ceilometer-6.0.0/ceilometer/tests/functional/api/v2/test_list_resources_scenarios.py0000664000567000056710000004614512701406224032276 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test listing resources. """ import datetime import json import six import webtest.app from ceilometer.publisher import utils from ceilometer import sample from ceilometer.tests.functional.api import v2 class TestListResources(v2.FunctionalTest): def test_empty(self): data = self.get_json('/resources') self.assertEqual([], data) def _verify_resource_timestamps(self, res, first, last): # Bounds need not be tight (see ceilometer bug #1288372) self.assertIn('first_sample_timestamp', res) self.assertTrue(first.isoformat() >= res['first_sample_timestamp']) self.assertIn('last_sample_timestamp', res) self.assertTrue(last.isoformat() <= res['last_sample_timestamp']) def test_instance_no_metadata(self): timestamp = datetime.datetime(2012, 7, 2, 10, 40) sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=timestamp, resource_metadata=None, source='test', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) data = self.get_json('/resources') self.assertEqual(1, len(data)) self._verify_resource_timestamps(data[0], timestamp, timestamp) def test_instances(self): timestamps = { 'resource-id': datetime.datetime(2012, 7, 2, 10, 40), 'resource-id-alternate': datetime.datetime(2012, 7, 2, 10, 41), } sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=timestamps['resource-id'], resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id-alternate', timestamp=timestamps['resource-id-alternate'], resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='test', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources') self.assertEqual(2, len(data)) for res in data: timestamp = timestamps.get(res['resource_id']) self._verify_resource_timestamps(res, timestamp, timestamp) def test_instance_multiple_samples(self): timestamps = [ datetime.datetime(2012, 7, 2, 10, 41), datetime.datetime(2012, 7, 2, 10, 42), datetime.datetime(2012, 7, 2, 10, 40), ] for timestamp in timestamps: datapoint = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=timestamp, resource_metadata={'display_name': 'test-server', 'tag': 'self.sample-%s' % timestamp, }, source='test', ) msg = utils.meter_message_from_counter( datapoint, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) data = self.get_json('/resources') self.assertEqual(1, len(data)) self._verify_resource_timestamps(data[0], timestamps[-1], timestamps[1]) def test_instances_one(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='test', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources/resource-id') self.assertEqual('resource-id', data['resource_id']) def test_with_source(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='not-test', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources', q=[{'field': 'source', 'value': 'test_list_resources', }]) ids = [r['resource_id'] for r in data] self.assertEqual(['resource-id'], ids) sources = [r['source'] for r in data] self.assertEqual(['test_list_resources'], sources) def test_with_invalid_resource_id(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id-1', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id', 'resource-id-2', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='test_list_resources', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) resp1 = self.get_json('/resources/resource-id-1') self.assertEqual("resource-id-1", resp1["resource_id"]) resp2 = self.get_json('/resources/resource-id-2') self.assertEqual("resource-id-2", resp2["resource_id"]) resp3 = self.get_json('/resources/resource-id-3', expect_errors=True) self.assertEqual(404, resp3.status_code) json_data = resp3.body if six.PY3: json_data = json_data.decode('utf-8') self.assertEqual("Resource resource-id-3 Not Found", json.loads(json_data)['error_message'] ['faultstring']) def test_with_user(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='not-test', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources', q=[{'field': 'user_id', 'value': 'user-id', }]) ids = [r['resource_id'] for r in data] self.assertEqual(['resource-id'], ids) def test_with_project(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) sample2 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id2', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample2', }, source='not-test', ) msg2 = utils.meter_message_from_counter( sample2, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources', q=[{'field': 'project_id', 'value': 'project-id', }]) ids = [r['resource_id'] for r in data] self.assertEqual(['resource-id'], ids) def test_with_user_non_admin(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id2', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample1', }, source='not-test', ) msg2 = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources', headers={"X-Roles": "Member", "X-Project-Id": "project-id2"}) ids = set(r['resource_id'] for r in data) self.assertEqual(set(['resource-id-alternate']), ids) def test_with_user_wrong_tenant(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id2', 'project-id2', 'resource-id-alternate', timestamp=datetime.datetime(2012, 7, 2, 10, 41), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample1', }, source='not-test', ) msg2 = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg2) data = self.get_json('/resources', headers={"X-Roles": "Member", "X-Project-Id": "project-wrong"}) ids = set(r['resource_id'] for r in data) self.assertEqual(set(), ids) def test_metadata(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', 'dict_properties': {'key.$1': {'$key': 'val'}}, 'not_ignored_list': ['returned'], }, source='test', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) data = self.get_json('/resources') metadata = data[0]['metadata'] self.assertEqual([(u'dict_properties.key:$1:$key', u'val'), (u'display_name', u'test-server'), (u'not_ignored_list', u"['returned']"), (u'tag', u'self.sample')], list(sorted(six.iteritems(metadata)))) def test_resource_meter_links(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) data = self.get_json('/resources') links = data[0]['links'] self.assertEqual(2, len(links)) self.assertEqual('self', links[0]['rel']) self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') in links[0]['href']) self.assertEqual('instance', links[1]['rel']) self.assertTrue((self.PATH_PREFIX + '/meters/instance?' 'q.field=resource_id&q.value=resource-id') in links[1]['href']) def test_resource_skip_meter_links(self): sample1 = sample.Sample( 'instance', 'cumulative', '', 1, 'user-id', 'project-id', 'resource-id', timestamp=datetime.datetime(2012, 7, 2, 10, 40), resource_metadata={'display_name': 'test-server', 'tag': 'self.sample', }, source='test_list_resources', ) msg = utils.meter_message_from_counter( sample1, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) data = self.get_json('/resources?meter_links=0') links = data[0]['links'] self.assertEqual(len(links), 1) self.assertEqual(links[0]['rel'], 'self') self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') in links[0]['href']) class TestListResourcesRestriction(v2.FunctionalTest): def setUp(self): super(TestListResourcesRestriction, self).setUp() self.CONF.set_override('default_api_return_limit', 10, group='api') for i in range(20): s = sample.Sample( 'volume.size', 'gauge', 'GiB', 5 + i, 'user-id', 'project1', 'resource-id%s' % i, timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + datetime.timedelta(seconds=i)), resource_metadata={'display_name': 'test-volume', 'tag': 'self.sample', }, source='source1', ) msg = utils.meter_message_from_counter( s, self.CONF.publisher.telemetry_secret, ) self.conn.record_metering_data(msg) def test_resource_limit(self): data = self.get_json('/resources?limit=1') self.assertEqual(1, len(data)) def test_resource_limit_negative(self): self.assertRaises(webtest.app.AppError, self.get_json, '/resources?limit=-2') def test_resource_limit_bigger(self): data = self.get_json('/resources?limit=42') self.assertEqual(20, len(data)) def test_resource_default_limit(self): data = self.get_json('/resources') self.assertEqual(10, len(data)) ceilometer-6.0.0/ceilometer/tests/functional/api/v2/test_api_upgrade.py0000664000567000056710000001445612701406223027442 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import fileutils from oslotest import mockpatch import six from ceilometer.tests.functional.api import v2 class TestAPIUpgradePath(v2.FunctionalTest): def _make_app(self): content = ('{"default": ""}') if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='policy', suffix='.json') self.CONF.set_override("policy_file", self.tempfile, group='oslo_policy') return super(TestAPIUpgradePath, self)._make_app() def _setup_osloconfig_options(self): self.CONF.set_override('gnocchi_is_enabled', True, group='api') self.CONF.set_override('aodh_is_enabled', True, group='api') self.CONF.set_override('aodh_url', 'http://alarm-endpoint:8008/', group='api') def _setup_keystone_mock(self): self.CONF.set_override('gnocchi_is_enabled', None, group='api') self.CONF.set_override('aodh_is_enabled', None, group='api') self.CONF.set_override('aodh_url', None, group='api') self.CONF.set_override('meter_dispatchers', ['database']) self.ks = mock.Mock() self.catalog = (self.ks.session.auth.get_access. return_value.service_catalog) self.catalog.url_for.side_effect = self._url_for self.useFixture(mockpatch.Patch( 'ceilometer.keystone_client.get_client', return_value=self.ks)) @staticmethod def _url_for(service_type=None): if service_type == 'metric': return 'http://gnocchi/' elif service_type == 'alarming': return 'http://alarm-endpoint:8008/' def _do_test_gnocchi_enabled_without_database_backend(self): self.CONF.set_override('meter_dispatchers', 'gnocchi') for endpoint in ['meters', 'samples', 'resources']: response = self.app.get(self.PATH_PREFIX + '/' + endpoint, status=410) self.assertIn(b'Gnocchi API', response.body) headers_events = {"X-Roles": "admin", "X-User-Id": "user1", "X-Project-Id": "project1"} for endpoint in ['events', 'event_types']: self.app.get(self.PATH_PREFIX + '/' + endpoint, headers=headers_events, status=200) response = self.post_json('/query/samples', params={ "filter": '{"=": {"type": "creation"}}', "orderby": '[{"timestamp": "DESC"}]', "limit": 3 }, status=410) self.assertIn(b'Gnocchi API', response.body) sample_params = { "counter_type": "gauge", "counter_name": "fake_counter", "resource_id": "fake_resource_id", "counter_unit": "fake_unit", "counter_volume": "1" } self.post_json('/meters/fake_counter', params=[sample_params], status=201) response = self.post_json('/meters/fake_counter?direct=1', params=[sample_params], status=400) self.assertIn(b'direct option cannot be true when Gnocchi is enabled', response.body) def _do_test_alarm_redirect(self): response = self.app.get(self.PATH_PREFIX + '/alarms', expect_errors=True) self.assertEqual(307, response.status_code) self.assertEqual("http://alarm-endpoint:8008/v2/alarms", response.headers['Location']) response = self.app.get(self.PATH_PREFIX + '/alarms/uuid', expect_errors=True) self.assertEqual(307, response.status_code) self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", response.headers['Location']) response = self.app.delete(self.PATH_PREFIX + '/alarms/uuid', expect_errors=True) self.assertEqual(307, response.status_code) self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", response.headers['Location']) response = self.post_json('/query/alarms', params={ "filter": '{"=": {"type": "creation"}}', "orderby": '[{"timestamp": "DESC"}]', "limit": 3 }, status=307) self.assertEqual("http://alarm-endpoint:8008/v2/query/alarms", response.headers['Location']) def test_gnocchi_enabled_without_database_backend_keystone(self): self._setup_keystone_mock() self._do_test_gnocchi_enabled_without_database_backend() self.catalog.url_for.assert_has_calls([ mock.call(service_type="alarming"), mock.call(service_type="metric")], any_order=True) def test_gnocchi_enabled_without_database_backend_configoptions(self): self._setup_osloconfig_options() self._do_test_gnocchi_enabled_without_database_backend() def test_alarm_redirect_keystone(self): self._setup_keystone_mock() self._do_test_alarm_redirect() self.assertEqual([mock.call(service_type="alarming")], self.catalog.url_for.mock_calls) def test_alarm_redirect_configoptions(self): self._setup_osloconfig_options() self._do_test_alarm_redirect() ceilometer-6.0.0/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py0000664000567000056710000001714612701406223035211 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test listing raw events. """ import datetime import mock from oslo_utils import timeutils from ceilometer.storage import models from ceilometer.tests.functional.api import v2 class TestComputeDurationByResource(v2.FunctionalTest): def setUp(self): super(TestComputeDurationByResource, self).setUp() # Create events relative to the range and pretend # that the intervening events exist. self.early1 = datetime.datetime(2012, 8, 27, 7, 0) self.early2 = datetime.datetime(2012, 8, 27, 17, 0) self.start = datetime.datetime(2012, 8, 28, 0, 0) self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) self.end = datetime.datetime(2012, 8, 28, 23, 59) self.late1 = datetime.datetime(2012, 8, 29, 9, 0) self.late2 = datetime.datetime(2012, 8, 29, 19, 0) def _patch_get_interval(self, start, end): def get_interval(sample_filter, period, groupby, aggregate): self.assertIsNotNone(sample_filter.start_timestamp) self.assertIsNotNone(sample_filter.end_timestamp) if (sample_filter.start_timestamp > end or sample_filter.end_timestamp < start): return [] duration_start = max(sample_filter.start_timestamp, start) duration_end = min(sample_filter.end_timestamp, end) duration = timeutils.delta_seconds(duration_start, duration_end) return [ models.Statistics( unit='', min=0, max=0, avg=0, sum=0, count=0, period=None, period_start=None, period_end=None, duration=duration, duration_start=duration_start, duration_end=duration_end, groupby=None, ) ] return mock.patch.object(type(self.conn), 'get_meter_statistics', side_effect=get_interval) def _invoke_api(self): return self.get_json('/meters/instance/statistics', q=[{'field': 'timestamp', 'op': 'ge', 'value': self.start.isoformat()}, {'field': 'timestamp', 'op': 'le', 'value': self.end.isoformat()}, {'field': 'search_offset', 'value': 10}]) def test_before_range(self): with self._patch_get_interval(self.early1, self.early2): data = self._invoke_api() self.assertEqual([], data) def _assert_times_match(self, actual, expected): if actual: actual = timeutils.parse_isotime(actual) actual = actual.replace(tzinfo=None) self.assertEqual(expected, actual) def test_overlap_range_start(self): with self._patch_get_interval(self.early1, self.middle1): data = self._invoke_api() self._assert_times_match(data[0]['duration_start'], self.start) self._assert_times_match(data[0]['duration_end'], self.middle1) self.assertEqual(8 * 60 * 60, data[0]['duration']) def test_within_range(self): with self._patch_get_interval(self.middle1, self.middle2): data = self._invoke_api() self._assert_times_match(data[0]['duration_start'], self.middle1) self._assert_times_match(data[0]['duration_end'], self.middle2) self.assertEqual(10 * 60 * 60, data[0]['duration']) def test_within_range_zero_duration(self): with self._patch_get_interval(self.middle1, self.middle1): data = self._invoke_api() self._assert_times_match(data[0]['duration_start'], self.middle1) self._assert_times_match(data[0]['duration_end'], self.middle1) self.assertEqual(0, data[0]['duration']) def test_overlap_range_end(self): with self._patch_get_interval(self.middle2, self.late1): data = self._invoke_api() self._assert_times_match(data[0]['duration_start'], self.middle2) self._assert_times_match(data[0]['duration_end'], self.end) self.assertEqual(((6 * 60) - 1) * 60, data[0]['duration']) def test_after_range(self): with self._patch_get_interval(self.late1, self.late2): data = self._invoke_api() self.assertEqual([], data) def test_without_end_timestamp(self): statistics = [ models.Statistics( unit=None, count=0, min=None, max=None, avg=None, duration=None, duration_start=self.late1, duration_end=self.late2, sum=0, period=None, period_start=None, period_end=None, groupby=None, ) ] with mock.patch.object(type(self.conn), 'get_meter_statistics', return_value=statistics): data = self.get_json('/meters/instance/statistics', q=[{'field': 'timestamp', 'op': 'ge', 'value': self.late1.isoformat()}, {'field': 'resource_id', 'value': 'resource-id'}, {'field': 'search_offset', 'value': 10}]) self._assert_times_match(data[0]['duration_start'], self.late1) self._assert_times_match(data[0]['duration_end'], self.late2) def test_without_start_timestamp(self): statistics = [ models.Statistics( unit=None, count=0, min=None, max=None, avg=None, duration=None, duration_start=self.early1, duration_end=self.early2, sum=0, period=None, period_start=None, period_end=None, groupby=None, ) ] with mock.patch.object(type(self.conn), 'get_meter_statistics', return_value=statistics): data = self.get_json('/meters/instance/statistics', q=[{'field': 'timestamp', 'op': 'le', 'value': self.early2.isoformat()}, {'field': 'resource_id', 'value': 'resource-id'}, {'field': 'search_offset', 'value': 10}]) self._assert_times_match(data[0]['duration_start'], self.early1) self._assert_times_match(data[0]['duration_end'], self.early2) ceilometer-6.0.0/ceilometer/tests/functional/api/v2/test_app.py0000664000567000056710000001057712701406223025742 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp. # Copyright 2013 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test basic ceilometer-api app """ from ceilometer.tests.functional.api import v2 class TestPecanApp(v2.FunctionalTest): def test_pecan_extension_guessing_unset(self): # check Pecan does not assume .jpg is an extension response = self.app.get(self.PATH_PREFIX + '/meters/meter.jpg') self.assertEqual('application/json', response.content_type) class TestApiMiddleware(v2.FunctionalTest): no_lang_translated_error = 'No lang translated error' en_US_translated_error = 'en-US translated error' def _fake_translate(self, message, user_locale): if user_locale is None: return self.no_lang_translated_error else: return self.en_US_translated_error def test_json_parsable_error_middleware_404(self): response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json,application/xml"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/xml;q=0.8, \ application/json"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "text/html,*/*"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) def test_xml_parsable_error_middleware_404(self): response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/xml,*/*"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json;q=0.8 \ ,application/xml"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) ceilometer-6.0.0/ceilometer/meter/0000775000567000056710000000000012701406364020255 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/meter/data/0000775000567000056710000000000012701406364021166 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/meter/data/meters.yaml0000664000567000056710000005306712701406224023357 0ustar jenkinsjenkins00000000000000--- metric: # Image - name: "image.size" event_type: - "image.upload" - "image.delete" - "image.update" type: "gauge" unit: B volume: $.payload.size resource_id: $.payload.id project_id: $.payload.owner - name: "image.download" event_type: "image.send" type: "delta" unit: "B" volume: $.payload.bytes_sent resource_id: $.payload.image_id user_id: $.payload.receiver_user_id project_id: $.payload.receiver_tenant_id - name: "image.serve" event_type: "image.send" type: "delta" unit: "B" volume: $.payload.bytes_sent resource_id: $.payload.image_id project_id: $.payload.owner_id # MagnetoDB - name: 'magnetodb.table.index.count' type: 'gauge' unit: 'index' event_type: 'magnetodb.table.create.end' volume: $.payload.index_count resource_id: $.payload.table_uuid user_id: $._context_user - name: 'volume.size' event_type: - 'volume.exists' - 'volume.create.*' - 'volume.delete.*' - 'volume.resize.*' - 'volume.attach.*' - 'volume.detach.*' - 'volume.update.*' type: 'gauge' unit: 'GB' volume: $.payload.size user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.volume_id - name: 'snapshot.size' event_type: - 'snapshot.exists' - 'snapshot.create.*' - 'snapshot.delete.*' type: 'gauge' unit: 'GB' volume: $.payload.volume_size user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.snapshot_id # Magnum - name: $.payload.metrics.[*].name event_type: 'magnum.bay.metrics.*' type: 'gauge' unit: $.payload.metrics.[*].unit volume: $.payload.metrics.[*].value user_id: $.payload.user_id project_id: $.payload.project_id resource_id: $.payload.resource_id lookup: ['name', 'unit', 'volume'] # Swift - name: $.payload.measurements.[*].metric.[*].name event_type: 'objectstore.http.request' type: 'delta' unit: $.payload.measurements.[*].metric.[*].unit volume: $.payload.measurements.[*].result resource_id: $.payload.target.id user_id: $.payload.initiator.id project_id: $.payload.initiator.project_id lookup: ['name', 'unit', 'volume'] - name: 'memory' event_type: 'compute.instance.*' type: 'gauge' unit: 'MB' volume: $.payload.memory_mb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id - name: 'vcpus' event_type: 'compute.instance.*' type: 'gauge' unit: 'vcpu' volume: $.payload.vcpus user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id - name: 'compute.instance.booting.time' event_type: 'compute.instance.create.end' type: 'gauge' unit: 'sec' volume: fields: [$.payload.created_at, $.payload.launched_at] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.instance_id - name: 'disk.root.size' event_type: 'compute.instance.*' type: 'gauge' unit: 'GB' volume: $.payload.root_gb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id - name: 'disk.ephemeral.size' event_type: 'compute.instance.*' type: 'gauge' unit: 'GB' volume: $.payload.ephemeral_gb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id - name: 'bandwidth' event_type: 'l3.meter' type: 'delta' unit: 'B' volume: $.payload.bytes project_id: $.payload.tenant_id resource_id: $.payload.label_id - name: 'compute.node.cpu.frequency' event_type: 'compute.metrics.update' type: 'gauge' unit: 'MHz' volume: $.payload.metrics[?(@.name='cpu.frequency')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.frequency')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.frequency')].source - name: 'compute.node.cpu.user.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.user.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.user.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.user.time')].source - name: 'compute.node.cpu.kernel.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.kernel.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.kernel.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.kernel.time')].source - name: 'compute.node.cpu.idle.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.idle.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.idle.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.idle.time')].source - name: 'compute.node.cpu.iowait.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.iowait.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.iowait.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.iowait.time')].source - name: 'compute.node.cpu.kernel.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.kernel.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.kernel.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.kernel.percent')].source - name: 'compute.node.cpu.idle.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.idle.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.idle.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.idle.percent')].source - name: 'compute.node.cpu.user.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.user.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.user.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.user.percent')].source - name: 'compute.node.cpu.iowait.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.iowait.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.iowait.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.iowait.percent')].source - name: 'compute.node.cpu.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.percent')].source # DNS - name: 'dns.domain.exists' event_type: 'dns.domain.exists' type: 'cumulative' unit: 's' volume: fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.id user_id: $._context_user metadata: status: $.payload.status pool_id: $.payload.pool_id host: $.publisher_id # Trove - name: 'trove.instance.exists' event_type: 'trove.instance.exists' type: 'cumulative' unit: 's' volume: fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_id: $.payload.user_id metadata: nova_instance_id: $.payload.nova_instance_id state: $.payload.state service_id: $.payload.service_id instance_type: $.payload.instance_type instance_type_id: $.payload.instance_type_id # NOTE: non-metric meters are generally events/existence meters # These are DEPRECATED in current release and expected to be # REMOVED in the next upcoming release. # # Image - name: "image" event_type: - "image.upload" - "image.update" - "image.delete" type: "gauge" unit: 'image' volume: 1 resource_id: $.payload.id project_id: $.payload.owner - name: "image.upload" event_type: - "image.upload" type: "gauge" unit: 'image' volume: 1 resource_id: $.payload.id project_id: $.payload.owner - name: "image.delete" event_type: - "image.delete" type: "gauge" unit: 'image' volume: 1 resource_id: $.payload.id project_id: $.payload.owner - name: "image.update" event_type: - "image.update" type: "gauge" unit: 'image' volume: 1 resource_id: $.payload.id project_id: $.payload.owner # Orchestration - name: 'stack.create' event_type: - 'orchestration.stack.create.end' type: 'delta' unit: 'stack' volume: 1 user_id: _context_trustor_user_id project_id: $.payload.tenant_id resource_id: $.payload.stack_identity - name: 'stack.update' event_type: - 'orchestration.stack.update.end' type: 'delta' unit: 'stack' volume: 1 user_id: _context_trustor_user_id project_id: $.payload.tenant_id resource_id: $.payload.stack_identity - name: 'stack.delete' event_type: - 'orchestration.stack.delete.end' type: 'delta' unit: 'stack' volume: 1 user_id: _context_trustor_user_id project_id: $.payload.tenant_id resource_id: $.payload.stack_identity - name: 'stack.resume' event_type: - 'orchestration.stack.resume.end' type: 'delta' unit: 'stack' volume: 1 user_id: _context_trustor_user_id project_id: $.payload.tenant_id resource_id: $.payload.stack_identity - name: 'stack.suspend' event_type: - 'orchestration.stack.suspend.end' type: 'delta' unit: 'stack' volume: 1 user_id: _context_trustor_user_id project_id: $.payload.tenant_id resource_id: $.payload.stack_identity # MagnetoDB - name: 'magnetodb.table.create' type: 'gauge' unit: 'table' volume: 1 event_type: 'magnetodb.table.create.end' resource_id: $.payload.table_uuid user_id: _context_user project_id: _context_tenant - name: 'magnetodb.table.delete' type: 'gauge' unit: 'table' volume: 1 event_type: 'magnetodb.table.delete.end' resource_id: $.payload.table_uuid user_id: _context_user project_id: _context_tenant # Volume - name: 'volume' type: 'gauge' unit: 'volume' volume: 1 event_type: - 'volume.exists' - 'volume.create.*' - 'volume.delete.*' - 'volume.resize.*' - 'volume.attach.*' - 'volume.detach.*' - 'volume.update.*' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.exists' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.exists' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.create.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.create.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.create.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.create.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.delete.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.delete.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.delete.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.delete.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.update.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.update.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.update.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.update.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.resize.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.resize.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.resize.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.resize.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.attach.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.attach.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.attach.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.attach.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.detach.end' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.detach.end' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'volume.detach.start' type: 'delta' unit: 'volume' volume: 1 event_type: - 'volume.detach.start' resource_id: $.payload.volume_id user_id: $.payload.user_id project_id: $.payload.tenant_id # Volume Snapshot - name: 'snapshot' type: 'gauge' unit: 'snapshot' volume: 1 event_type: - 'snapshot.exists' - 'snapshot.create.*' - 'snapshot.delete.*' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'snapshot.exists' type: 'delta' unit: 'snapshot' volume: 1 event_type: - 'snapshot.exists' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'snapshot.create.start' type: 'delta' unit: 'snapshot' volume: 1 event_type: - 'snapshot.create.start' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'snapshot.create.end' type: 'delta' unit: 'snapshot' volume: 1 event_type: - 'snapshot.create.end' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'snapshot.delete.start' type: 'delta' unit: 'snapshot' volume: 1 event_type: - 'snapshot.delete.start' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id - name: 'snapshot.delete.end' type: 'delta' unit: 'snapshot' volume: 1 event_type: - 'snapshot.delete.end' resource_id: $.payload.snapshot_id user_id: $.payload.user_id project_id: $.payload.tenant_id # Sahara - name: 'cluster.create' type: 'delta' unit: 'cluster' volume: 1 event_type: - 'sahara.cluster.create' resource_id: $.payload.cluster_id project_id: $.payload.project_id - name: 'cluster.update' type: 'delta' unit: 'cluster' volume: 1 event_type: - 'sahara.cluster.update' resource_id: $.payload.cluster_id project_id: $.payload.project_id - name: 'cluster.delete' type: 'delta' unit: 'cluster' volume: 1 event_type: - 'sahara.cluster.delete' resource_id: $.payload.cluster_id project_id: $.payload.project_id # Identity - name: 'identity.user.created' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.user.created' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.user.updated' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.user.updated' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.user.deleted' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.user.deleted' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.group.created' type: 'delta' unit: 'group' volume: 1 event_type: - 'identity.group.created' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.group.updated' type: 'delta' unit: 'group' volume: 1 event_type: - 'identity.group.updated' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.group.deleted' type: 'delta' unit: 'group' volume: 1 event_type: - 'identity.group.deleted' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.project.created' type: 'delta' unit: 'project' volume: 1 event_type: - 'identity.project.created' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.project.updated' type: 'delta' unit: 'project' volume: 1 event_type: - 'identity.project.updated' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.project.deleted' type: 'delta' unit: 'project' volume: 1 event_type: - 'identity.project.deleted' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.role.created' type: 'delta' unit: 'role' volume: 1 event_type: - 'identity.role.created' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.role.updated' type: 'delta' unit: 'role' volume: 1 event_type: - 'identity.role.updated' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.role.deleted' type: 'delta' unit: 'role' volume: 1 event_type: - 'identity.role.deleted' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.role_assignment.created' type: 'delta' unit: 'role_assignment' volume: 1 event_type: - 'identity.role_assignment.created' resource_id: $.payload.role user_id: $.payload.initiator.id - name: 'identity.role_assignment.deleted' type: 'delta' unit: 'role_assignment' volume: 1 event_type: - 'identity.role_assignment.deleted' resource_id: $.payload.role user_id: $.payload.initiator.id - name: 'identity.authenticate.success' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.authenticate' resource_id: $.payload.initiator.id user_id: $.payload.initiator.id - name: 'identity.authenticate.pending' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.authenticate' resource_id: $.payload.initiator.id user_id: $.payload.initiator.id - name: 'identity.authenticate.failure' type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.authenticate' resource_id: $.payload.initiator.id user_id: $.payload.initiator.id - name: 'identity.trust.created' type: 'delta' unit: 'trust' volume: 1 event_type: - 'identity.OS-TRUST:trust.created' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'identity.trust.deleted' type: 'delta' unit: 'trust' volume: 1 event_type: - 'identity.OS-TRUST:trust.deleted' resource_id: $.payload.resource_info user_id: $.payload.initiator.id - name: 'storage.api.request' type: 'delta' unit: 'request' volume: 1 event_type: - 'objectstore.http.request' resource_id: $.payload.target.id user_id: $.payload.initiator.id project_id: $.payload.initiator.project_id - name: '$.payload.name' event_type: 'profiler.*' type: 'gauge' unit: 'trace' volume: 1 user_id: $.payload.user_id project_id: $.payload.project_id resource_id: '"profiler-" + $.payload.base_id' ceilometer-6.0.0/ceilometer/meter/__init__.py0000664000567000056710000000000012701406223022346 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/meter/notifications.py0000664000567000056710000002263412701406224023502 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import pkg_resources import six from debtcollector import moves from oslo_config import cfg from oslo_log import log import oslo_messaging from stevedore import extension from ceilometer.agent import plugin_base from ceilometer import declarative from ceilometer.i18n import _LE, _LW from ceilometer import sample from ceilometer import utils OPTS = [ cfg.StrOpt('meter_definitions_cfg_file', default="meters.yaml", help="Configuration file for defining meter notifications." ), ] cfg.CONF.register_opts(OPTS, group='meter') cfg.CONF.import_opt('disable_non_metric_meters', 'ceilometer.notification', group='notification') LOG = log.getLogger(__name__) MeterDefinitionException = moves.moved_class(declarative.DefinitionException, 'MeterDefinitionException', __name__, version=6.0, removal_version="?") class MeterDefinition(object): SAMPLE_ATTRIBUTES = ["name", "type", "volume", "unit", "timestamp", "user_id", "project_id", "resource_id"] REQUIRED_FIELDS = ['name', 'type', 'event_type', 'unit', 'volume', 'resource_id'] def __init__(self, definition_cfg, plugin_manager): self.cfg = definition_cfg missing = [field for field in self.REQUIRED_FIELDS if not self.cfg.get(field)] if missing: raise declarative.DefinitionException( _LE("Required fields %s not specified") % missing, self.cfg) self._event_type = self.cfg.get('event_type') if isinstance(self._event_type, six.string_types): self._event_type = [self._event_type] if ('type' not in self.cfg.get('lookup', []) and self.cfg['type'] not in sample.TYPES): raise declarative.DefinitionException( _LE("Invalid type %s specified") % self.cfg['type'], self.cfg) self._fallback_user_id = declarative.Definition( 'user_id', "_context_user_id|_context_user", plugin_manager) self._fallback_project_id = declarative.Definition( 'project_id', "_context_tenant_id|_context_tenant", plugin_manager) self._attributes = {} self._metadata_attributes = {} for name in self.SAMPLE_ATTRIBUTES: attr_cfg = self.cfg.get(name) if attr_cfg: self._attributes[name] = declarative.Definition( name, attr_cfg, plugin_manager) metadata = self.cfg.get('metadata', {}) for name in metadata: self._metadata_attributes[name] = declarative.Definition( name, metadata[name], plugin_manager) # List of fields we expected when multiple meter are in the payload self.lookup = self.cfg.get('lookup') if isinstance(self.lookup, six.string_types): self.lookup = [self.lookup] def match_type(self, meter_name): for t in self._event_type: if utils.match(meter_name, t): return True def to_samples(self, message, all_values=False): # Sample defaults sample = { 'name': self.cfg["name"], 'type': self.cfg["type"], 'unit': self.cfg["unit"], 'volume': None, 'timestamp': None, 'user_id': self._fallback_user_id.parse(message), 'project_id': self._fallback_project_id.parse(message), 'resource_id': None, 'message': message, 'metadata': {}, } for name, parser in self._metadata_attributes.items(): value = parser.parse(message) if value: sample['metadata'][name] = value # NOTE(sileht): We expect multiple samples in the payload # so put each attribute into a list if self.lookup: for name in sample: sample[name] = [sample[name]] for name in self.SAMPLE_ATTRIBUTES: parser = self._attributes.get(name) if parser is not None: value = parser.parse(message, bool(self.lookup)) # NOTE(sileht): If we expect multiple samples # some attributes and overriden even we doesn't get any # result. Also note in this case value is always a list if ((not self.lookup and value is not None) or (self.lookup and ((name in self.lookup + ["name"]) or value))): sample[name] = value if self.lookup: nb_samples = len(sample['name']) # skip if no meters in payload if nb_samples <= 0: raise StopIteration attributes = self.SAMPLE_ATTRIBUTES + ["message", "metadata"] samples_values = [] for name in attributes: values = sample.get(name) nb_values = len(values) if nb_values == nb_samples: samples_values.append(values) elif nb_values == 1 and name not in self.lookup: samples_values.append(itertools.cycle(values)) else: nb = (0 if nb_values == 1 and values[0] is None else nb_values) LOG.warning('Only %(nb)d fetched meters contain ' '"%(name)s" field instead of %(total)d.' % dict(name=name, nb=nb, total=nb_samples)) raise StopIteration # NOTE(sileht): Transform the sample with multiple values per # attribute into multiple samples with one value per attribute. for values in zip(*samples_values): yield dict((attributes[idx], value) for idx, value in enumerate(values)) else: yield sample class ProcessMeterNotifications(plugin_base.NotificationBase): event_types = [] def __init__(self, manager): super(ProcessMeterNotifications, self).__init__(manager) self.definitions = self._load_definitions() @staticmethod def _load_definitions(): plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait_plugin') meters_cfg = declarative.load_definitions( {}, cfg.CONF.meter.meter_definitions_cfg_file, pkg_resources.resource_filename(__name__, "data/meters.yaml")) definitions = {} for meter_cfg in reversed(meters_cfg['metric']): if meter_cfg.get('name') in definitions: # skip duplicate meters LOG.warning(_LW("Skipping duplicate meter definition %s") % meter_cfg) continue if (meter_cfg.get('volume') != 1 or not cfg.CONF.notification.disable_non_metric_meters): try: md = MeterDefinition(meter_cfg, plugin_manager) except declarative.DefinitionException as me: errmsg = (_LE("Error loading meter definition : %(err)s") % dict(err=six.text_type(me))) LOG.error(errmsg) else: definitions[meter_cfg['name']] = md return definitions.values() def get_targets(self, conf): """Return a sequence of oslo_messaging.Target It is defining the exchange and topics to be connected for this plugin. :param conf: Configuration. #TODO(prad): This should be defined in the notification agent """ targets = [] exchanges = [ conf.nova_control_exchange, conf.cinder_control_exchange, conf.glance_control_exchange, conf.neutron_control_exchange, conf.heat_control_exchange, conf.keystone_control_exchange, conf.sahara_control_exchange, conf.trove_control_exchange, conf.zaqar_control_exchange, conf.swift_control_exchange, conf.magnetodb_control_exchange, conf.ceilometer_control_exchange, conf.magnum_control_exchange, conf.dns_control_exchange, ] for exchange in exchanges: targets.extend(oslo_messaging.Target(topic=topic, exchange=exchange) for topic in self.get_notification_topics(conf)) return targets def process_notification(self, notification_body): for d in self.definitions: if d.match_type(notification_body['event_type']): for s in d.to_samples(notification_body): yield sample.Sample.from_notification(**s) ceilometer-6.0.0/ceilometer/ipmi/0000775000567000056710000000000012701406364020077 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/ipmi/notifications/0000775000567000056710000000000012701406364022750 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/ipmi/notifications/__init__.py0000664000567000056710000000000012701406223025041 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/ipmi/notifications/ironic.py0000664000567000056710000001307712701406223024607 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Converters for producing hardware sensor data sample messages from notification events. """ from oslo_config import cfg from oslo_log import log import oslo_messaging as messaging from ceilometer.agent import plugin_base from ceilometer import sample LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('ironic_exchange', default='ironic', help='Exchange name for Ironic notifications.'), ] cfg.CONF.register_opts(OPTS) # Map unit name to SI UNIT_MAP = { 'Watts': 'W', 'Volts': 'V', } def validate_reading(data): """Some sensors read "Disabled".""" return data != 'Disabled' def transform_id(data): return data.lower().replace(' ', '_') def parse_reading(data): try: volume, unit = data.split(' ', 1) unit = unit.rsplit(' ', 1)[-1] return float(volume), UNIT_MAP.get(unit, unit) except ValueError: raise InvalidSensorData('unable to parse sensor reading: %s' % data) class InvalidSensorData(ValueError): pass class SensorNotification(plugin_base.NotificationBase): """A generic class for extracting samples from sensor data notifications. A notification message can contain multiple samples from multiple sensors, all with the same basic structure: the volume for the sample is found as part of the value of a 'Sensor Reading' key. The unit is in the same value. Subclasses exist solely to allow flexibility with stevedore configuration. """ event_types = ['hardware.ipmi.*'] metric = None def get_targets(self, conf): """oslo.messaging.TargetS for this plugin.""" return [messaging.Target(topic=topic, exchange=conf.ironic_exchange) for topic in self.get_notification_topics(conf)] def _get_sample(self, message): try: return (payload for _, payload in message['payload'][self.metric].items()) except KeyError: return [] @staticmethod def _package_payload(message, payload): # NOTE(chdent): How much of the payload should we keep? payload['node'] = message['payload']['node_uuid'] info = {'publisher_id': message['publisher_id'], 'timestamp': message['payload']['timestamp'], 'event_type': message['payload']['event_type'], 'user_id': message['payload'].get('user_id'), 'project_id': message['payload'].get('project_id'), 'payload': payload} return info def process_notification(self, message): """Read and process a notification. The guts of a message are in dict value of a 'payload' key which then itself has a payload key containing a dict of multiple sensor readings. If expected keys in the payload are missing or values are not in the expected form for transformations, KeyError and ValueError are caught and the current sensor payload is skipped. """ payloads = self._get_sample(message['payload']) for payload in payloads: try: # Provide a fallback resource_id in case parts are missing. resource_id = 'missing id' try: resource_id = '%(nodeid)s-%(sensorid)s' % { 'nodeid': message['payload']['node_uuid'], 'sensorid': transform_id(payload['Sensor ID']) } except KeyError as exc: raise InvalidSensorData('missing key in payload: %s' % exc) info = self._package_payload(message, payload) try: sensor_reading = info['payload']['Sensor Reading'] except KeyError as exc: raise InvalidSensorData( "missing 'Sensor Reading' in payload" ) if validate_reading(sensor_reading): volume, unit = parse_reading(sensor_reading) yield sample.Sample.from_notification( name='hardware.ipmi.%s' % self.metric.lower(), type=sample.TYPE_GAUGE, unit=unit, volume=volume, resource_id=resource_id, message=info, user_id=info['user_id'], project_id=info['project_id']) except InvalidSensorData as exc: LOG.warning( 'invalid sensor data for %(resource)s: %(error)s' % dict(resource=resource_id, error=exc) ) continue class TemperatureSensorNotification(SensorNotification): metric = 'Temperature' class CurrentSensorNotification(SensorNotification): metric = 'Current' class FanSensorNotification(SensorNotification): metric = 'Fan' class VoltageSensorNotification(SensorNotification): metric = 'Voltage' ceilometer-6.0.0/ceilometer/ipmi/platform/0000775000567000056710000000000012701406364021723 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/ipmi/platform/ipmitool.py0000664000567000056710000001061212701406223024123 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utils to run ipmitool for data collection""" from oslo_concurrency import processutils from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as ipmiexcept from ceilometer import utils # Following 2 functions are copied from ironic project to handle ipmitool's # sensor data output. Need code clean and sharing in future. # Check ironic/drivers/modules/ipmitool.py def _get_sensor_type(sensor_data_dict): # Have only three sensor type name IDs: 'Sensor Type (Analog)' # 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)' for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)', 'Sensor Type (Threshold)'): try: return sensor_data_dict[key].split(' ', 1)[0] except KeyError: continue raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," "unknown sensor type")) def _process_sensor(sensor_data): sensor_data_fields = sensor_data.split('\n') sensor_data_dict = {} for field in sensor_data_fields: if not field: continue kv_value = field.split(':') if len(kv_value) != 2: continue sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip() return sensor_data_dict def _translate_output(output): """Translate the return value into JSON dict :param output: output of the execution of IPMI command(sensor reading) """ sensors_data_dict = {} sensors_data_array = output.split('\n\n') for sensor_data in sensors_data_array: sensor_data_dict = _process_sensor(sensor_data) if not sensor_data_dict: continue sensor_type = _get_sensor_type(sensor_data_dict) # ignore the sensors which have no current 'Sensor Reading' data sensor_id = sensor_data_dict['Sensor ID'] if 'Sensor Reading' in sensor_data_dict: sensors_data_dict.setdefault(sensor_type, {})[sensor_id] = sensor_data_dict # get nothing, no valid sensor data if not sensors_data_dict: raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," "No data retrieved from given input")) return sensors_data_dict def _parse_output(output, template): """Parse the return value of IPMI command into dict :param output: output of the execution of IPMI command :param template: a dict that contains the expected items of IPMI command and its length. """ ret = {} index = 0 if not (output and template): return ret if "translate" in template: ret = _translate_output(output) else: output_list = output.strip().replace('\n', '').split(' ') if sum(template.values()) != len(output_list): raise ipmiexcept.IPMIException(_("ipmitool output " "length mismatch")) for item in template.items(): index_end = index + item[1] update_value = output_list[index: index_end] ret[item[0]] = update_value index = index_end return ret def execute_ipmi_cmd(template=None): """Decorator for the execution of IPMI command. It parses the output of IPMI command into dictionary. """ template = template or [] def _execute_ipmi_cmd(f): def _execute(self, **kwargs): args = ['ipmitool'] command = f(self, **kwargs) args.extend(command.split(" ")) try: (out, __) = utils.execute(*args, run_as_root=True) except processutils.ProcessExecutionError: raise ipmiexcept.IPMIException(_("running ipmitool failure")) return _parse_output(out, template) return _execute return _execute_ipmi_cmd ceilometer-6.0.0/ceilometer/ipmi/platform/__init__.py0000664000567000056710000000000012701406223024014 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/ipmi/platform/intel_node_manager.py0000664000567000056710000003237712701406224026116 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Node manager engine to collect power and temperature of compute node. Intel Node Manager Technology enables the datacenter IT to monitor and control actual server power, thermal and compute utlization behavior through industry defined standard IPMI. This file provides Node Manager engine to get simple system power and temperature data based on ipmitool. """ import binascii import collections import tempfile import time from oslo_config import cfg import six from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as nmexcept from ceilometer.ipmi.platform import ipmitool OPTS = [ cfg.IntOpt('node_manager_init_retry', default=3, help='Number of retries upon Intel Node ' 'Manager initialization failure') ] CONF = cfg.CONF CONF.register_opts(OPTS, group='ipmi') IPMICMD = {"sdr_dump": "sdr dump", "sdr_info": "sdr info", "sensor_dump": "sdr -v"} IPMIRAWCMD = {"get_device_id": "raw 0x06 0x01", "get_nm_version": "raw 0x2e 0xca 0x57 0x01 0x00", "init_sensor_agent": "raw 0x0a 0x2c 0x01", "init_complete": "raw 0x0a 0x2c 0x00", "init_sensor_agent_status": "raw 0x0a 0x2c 0x00", "read_power_all": "raw 0x2e 0xc8 0x57 0x01 0x00 0x01 0x00 0x00", "read_inlet_temperature": "raw 0x2e 0xc8 0x57 0x01 0x00 0x02 0x00 0x00", "read_outlet_temperature": "raw 0x2e 0xc8 0x57 0x01 0x00 0x05 0x00 0x00", "read_airflow": "raw 0x2e 0xc8 0x57 0x01 0x00 0x04 0x00 0x00", "read_cups_utilization": "raw 0x2e 0x65 0x57 0x01 0x00 0x05", "read_cups_index": "raw 0x2e 0x65 0x57 0x01 0x00 0x01"} MANUFACTURER_ID_INTEL = ['57', '01', '00'] INTEL_PREFIX = '5701000d01' # The template dict are made according to the spec. It contains the expected # length of each item. And it can be used to parse the output of IPMI command. ONE_RETURN_TEMPLATE = {"ret": 1} BMC_INFO_TEMPLATE = collections.OrderedDict() BMC_INFO_TEMPLATE['Device_ID'] = 1 BMC_INFO_TEMPLATE['Device_Revision'] = 1 BMC_INFO_TEMPLATE['Firmware_Revision_1'] = 1 BMC_INFO_TEMPLATE['Firmware_Revision_2'] = 1 BMC_INFO_TEMPLATE['IPMI_Version'] = 1 BMC_INFO_TEMPLATE['Additional_Device_support'] = 1 BMC_INFO_TEMPLATE['Manufacturer_ID'] = 3 BMC_INFO_TEMPLATE['Product_ID'] = 2 BMC_INFO_TEMPLATE['Auxiliary_Firmware_Revision'] = 4 NM_STATISTICS_TEMPLATE = collections.OrderedDict() NM_STATISTICS_TEMPLATE['Manufacturer_ID'] = 3 NM_STATISTICS_TEMPLATE['Current_value'] = 2 NM_STATISTICS_TEMPLATE['Minimum_value'] = 2 NM_STATISTICS_TEMPLATE['Maximum_value'] = 2 NM_STATISTICS_TEMPLATE['Average_value'] = 2 NM_STATISTICS_TEMPLATE['Time_stamp'] = 4 NM_STATISTICS_TEMPLATE['Report_period'] = 4 NM_STATISTICS_TEMPLATE["DomainID_PolicyState"] = 1 NM_GET_DEVICE_ID_TEMPLATE = collections.OrderedDict() NM_GET_DEVICE_ID_TEMPLATE['Device_ID'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Device_revision'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Firmware_revision_1'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Firmware_Revision_2'] = 1 NM_GET_DEVICE_ID_TEMPLATE['IPMI_Version'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Additinal_Device_support'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Manufacturer_ID'] = 3 NM_GET_DEVICE_ID_TEMPLATE['Product_ID_min_version'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Product_ID_major_version'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Implemented_firmware'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Firmware_build_number'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Last_digit_firmware_build_number'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Image_flags'] = 1 NM_GET_VERSION_TEMPLATE = collections.OrderedDict() NM_GET_VERSION_TEMPLATE['Manufacturer_ID'] = 3 NM_GET_VERSION_TEMPLATE['NM_Version'] = 1 NM_GET_VERSION_TEMPLATE['IPMI_Version'] = 1 NM_GET_VERSION_TEMPLATE['Patch_Version'] = 1 NM_GET_VERSION_TEMPLATE['Firmware_Revision_Major'] = 1 NM_GET_VERSION_TEMPLATE['Firmware_Revision_Minor'] = 1 NM_CUPS_UTILIZATION_TEMPLATE = collections.OrderedDict() NM_CUPS_UTILIZATION_TEMPLATE['Manufacturer_ID'] = 3 NM_CUPS_UTILIZATION_TEMPLATE['CPU_Utilization'] = 8 NM_CUPS_UTILIZATION_TEMPLATE['Mem_Utilization'] = 8 NM_CUPS_UTILIZATION_TEMPLATE['IO_Utilization'] = 8 NM_CUPS_INDEX_TEMPLATE = collections.OrderedDict() NM_CUPS_INDEX_TEMPLATE['Manufacturer_ID'] = 3 NM_CUPS_INDEX_TEMPLATE['CUPS_Index'] = 2 def _hex(list=None): """Format the return value in list into hex.""" list = list or [] if list: list.reverse() return int(''.join(list), 16) return 0 class NodeManager(object): """The python implementation of Intel Node Manager engine using ipmitool The class implements the engine to read power and temperature of compute node. It uses ipmitool to execute the IPMI command and parse the output into dict. """ _inited = False _instance = None def __new__(cls, *args, **kwargs): """Singleton to avoid duplicated initialization.""" if not cls._instance: cls._instance = super(NodeManager, cls).__new__(cls, *args, **kwargs) return cls._instance def __init__(self): if not (self._instance and self._inited): # As singleton, only the 1st NM pollster would trigger its # initialization. nm_version indicate init result, and is shared # across all pollsters self._inited = True self.nm_version = 0 self.channel_slave = '' self.nm_version = self.check_node_manager() @staticmethod def _parse_slave_and_channel(file_path): """Parse the dumped file to get slave address and channel number. :param file_path: file path of dumped SDR file. :return: slave address and channel number of target device. """ ret = None prefix = INTEL_PREFIX # According to Intel Node Manager spec, section 4.5, for Intel NM # discovery OEM SDR records are type C0h. It contains manufacture ID # and OEM data in the record body. # 0-2 bytes are OEM ID, byte 3 is 0Dh and byte 4 is 01h. Byte 5, 6 # is Intel NM device slave address and channel number/sensor owner LUN. with open(file_path, 'rb') as bin_fp: for line in bin_fp.readlines(): if line: data_str = binascii.hexlify(line) if six.PY3: data_str = data_str.decode('ascii') if prefix in data_str: oem_id_index = data_str.index(prefix) ret = data_str[oem_id_index + len(prefix): oem_id_index + len(prefix) + 4] # Byte 5 is slave address. [7:4] from byte 6 is channel # number, so just pick ret[2] here. ret = (ret[0:2], ret[2]) break return ret @ipmitool.execute_ipmi_cmd(BMC_INFO_TEMPLATE) def get_device_id(self): """IPMI command GET_DEVICE_ID.""" return IPMIRAWCMD["get_device_id"] @ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE) def _init_sensor_agent(self): """Run initialization agent.""" return IPMIRAWCMD["init_sensor_agent"] @ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE) def _init_sensor_agent_process(self): """Check the status of initialization agent.""" return IPMIRAWCMD["init_sensor_agent_status"] @ipmitool.execute_ipmi_cmd() def _dump_sdr_file(self, data_file=""): """Dump SDR into a file.""" return IPMICMD["sdr_dump"] + " " + data_file @ipmitool.execute_ipmi_cmd(NM_GET_DEVICE_ID_TEMPLATE) def _node_manager_get_device_id(self): """GET_DEVICE_ID command in Intel Node Manager Different from IPMI command GET_DEVICE_ID, it contains more information of Intel Node Manager. """ return self.channel_slave + ' ' + IPMIRAWCMD["get_device_id"] @ipmitool.execute_ipmi_cmd(NM_GET_VERSION_TEMPLATE) def _node_manager_get_version(self): """GET_NODE_MANAGER_VERSION command in Intel Node Manager Byte 4 of the response: 01h - Intel NM 1.0 02h - Intel NM 1.5 03h - Intel NM 2.0 04h - Intel NM 2.5 05h - Intel NM 3.0 """ return self.channel_slave + ' ' + IPMIRAWCMD["get_nm_version"] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_power_all(self): """Get the power consumption of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_power_all'] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_inlet_temperature(self): """Get the inlet temperature info of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_inlet_temperature'] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_outlet_temperature(self): """Get the outlet temperature info of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_outlet_temperature'] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_airflow(self): """Get the volumetric airflow of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_airflow'] @ipmitool.execute_ipmi_cmd(NM_CUPS_UTILIZATION_TEMPLATE) def _read_cups_utilization(self): """Get the average CUPS utilization of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_utilization'] @ipmitool.execute_ipmi_cmd(NM_CUPS_INDEX_TEMPLATE) def _read_cups_index(self): """Get the CUPS Index of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_index'] def read_power_all(self): return self._read_power_all() if self.nm_version > 0 else {} def read_inlet_temperature(self): return self._read_inlet_temperature() if self.nm_version > 0 else {} def read_outlet_temperature(self): return self._read_outlet_temperature() if self.nm_version >= 5 else {} def read_airflow(self): # only available after NM 3.0 return self._read_airflow() if self.nm_version >= 5 else {} def read_cups_utilization(self): # only available after NM 3.0 return self._read_cups_utilization() if self.nm_version >= 5 else {} def read_cups_index(self): # only available after NM 3.0 return self._read_cups_index() if self.nm_version >= 5 else {} def init_node_manager(self): if self._init_sensor_agent_process()['ret'] == ['01']: return # Run sensor initialization agent for i in range(CONF.ipmi.node_manager_init_retry): self._init_sensor_agent() time.sleep(1) if self._init_sensor_agent_process()['ret'] == ['01']: return raise nmexcept.NodeManagerException(_('Node Manager init failed')) def discover_slave_channel(self): """Discover target slave address and channel number.""" file_path = tempfile.mkstemp()[1] self._dump_sdr_file(data_file=file_path) ret = self._parse_slave_and_channel(file_path) slave_address = ''.join(['0x', ret[0]]) channel = ''.join(['0x', ret[1]]) # String of channel and slave_address self.channel_slave = '-b ' + channel + ' -t ' + slave_address def node_manager_version(self): """Intel Node Manager capability checking This function is used to detect if compute node support Intel Node Manager(return version number) or not(return -1) and parse out the slave address and channel number of node manager. """ self.manufacturer_id = self.get_device_id()['Manufacturer_ID'] if MANUFACTURER_ID_INTEL != self.manufacturer_id: # If the manufacturer is not Intel, just set False and return. return 0 self.discover_slave_channel() support = self._node_manager_get_device_id()['Implemented_firmware'] # According to Intel Node Manager spec, return value of GET_DEVICE_ID, # bits 3 to 0 shows if Intel NM implemented or not. if int(support[0], 16) & 0xf == 0: return 0 return _hex(self._node_manager_get_version()['NM_Version']) def check_node_manager(self): """Intel Node Manager init and check This function is used to initialize Intel Node Manager and check the capability without throwing exception. It's safe to call it on non-NodeManager platform. """ try: self.init_node_manager() nm_version = self.node_manager_version() except (nmexcept.NodeManagerException, nmexcept.IPMIException): return 0 return nm_version ceilometer-6.0.0/ceilometer/ipmi/platform/exception.py0000664000567000056710000000132612701406223024267 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class NodeManagerException(Exception): pass class IPMIException(Exception): pass ceilometer-6.0.0/ceilometer/ipmi/platform/ipmi_sensor.py0000664000567000056710000000765612701406223024634 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """IPMI sensor to collect various sensor data of compute node""" from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as ipmiexcept from ceilometer.ipmi.platform import ipmitool IPMICMD = {"sdr_dump": "sdr dump", "sdr_info": "sdr info", "sensor_dump": "sdr -v", "sensor_dump_temperature": "sdr -v type Temperature", "sensor_dump_current": "sdr -v type Current", "sensor_dump_fan": "sdr -v type Fan", "sensor_dump_voltage": "sdr -v type Voltage"} # Requires translation of output into dict DICT_TRANSLATE_TEMPLATE = {"translate": 1} class IPMISensor(object): """The python implementation of IPMI sensor using ipmitool The class implements the IPMI sensor to get various sensor data of compute node. It uses ipmitool to execute the IPMI command and parse the output into dict. """ _inited = False _instance = None def __new__(cls, *args, **kwargs): """Singleton to avoid duplicated initialization.""" if not cls._instance: cls._instance = super(IPMISensor, cls).__new__(cls, *args, **kwargs) return cls._instance def __init__(self): if not (self._instance and self._inited): self.ipmi_support = False self._inited = True self.ipmi_support = self.check_ipmi() @ipmitool.execute_ipmi_cmd() def _get_sdr_info(self): """Get the SDR info.""" return IPMICMD['sdr_info'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_all(self): """Get the sensor data for type.""" return IPMICMD['sensor_dump'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_temperature(self): """Get the sensor data for Temperature.""" return IPMICMD['sensor_dump_temperature'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_voltage(self): """Get the sensor data for Voltage.""" return IPMICMD['sensor_dump_voltage'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_current(self): """Get the sensor data for Current.""" return IPMICMD['sensor_dump_current'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_fan(self): """Get the sensor data for Fan.""" return IPMICMD['sensor_dump_fan'] def read_sensor_any(self, sensor_type=''): """Get the sensor data for type.""" if not self.ipmi_support: return {} mapping = {'': self._read_sensor_all, 'Temperature': self._read_sensor_temperature, 'Fan': self._read_sensor_fan, 'Voltage': self._read_sensor_voltage, 'Current': self._read_sensor_current} try: return mapping[sensor_type]() except KeyError: raise ipmiexcept.IPMIException(_('Wrong sensor type')) def check_ipmi(self): """IPMI capability checking This function is used to detect if compute node is IPMI capable platform. Just run a simple IPMI command to get SDR info for check. """ try: self._get_sdr_info() except ipmiexcept.IPMIException: return False return True ceilometer-6.0.0/ceilometer/ipmi/pollsters/0000775000567000056710000000000012701406364022126 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/ipmi/pollsters/__init__.py0000664000567000056710000000175012701406223024234 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Pollsters for IPMI and Intel Node Manager """ from oslo_config import cfg OPTS = [ cfg.IntOpt('polling_retry', default=3, help='Tolerance of IPMI/NM polling failures ' 'before disable this pollster. ' 'Negative indicates retrying forever.') ] cfg.CONF.register_opts(OPTS, group='ipmi') ceilometer-6.0.0/ceilometer/ipmi/pollsters/node.py0000664000567000056710000001234112701406224023421 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from ceilometer.agent import plugin_base from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as nmexcept from ceilometer.ipmi.platform import intel_node_manager as node_manager from ceilometer import sample CONF = cfg.CONF CONF.import_opt('host', 'ceilometer.service') CONF.import_opt('polling_retry', 'ceilometer.ipmi.pollsters', group='ipmi') LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class _Base(plugin_base.PollsterBase): def setup_environment(self): super(_Base, self).setup_environment() self.nodemanager = node_manager.NodeManager() self.polling_failures = 0 # Do not load this extension if no NM support if self.nodemanager.nm_version == 0: raise plugin_base.ExtensionLoadError() @property def default_discovery(self): return 'local_node' def get_value(self, stats): """Get value from statistics.""" return node_manager._hex(stats["Current_value"]) @abc.abstractmethod def read_data(self, cache): """Return data sample for IPMI.""" def get_samples(self, manager, cache, resources): # Only one resource for Node Manager pollster try: stats = self.read_data(cache) except nmexcept.IPMIException: self.polling_failures += 1 LOG.warning(_('Polling %(name)s failed for %(cnt)s times!') % ({'name': self.NAME, 'cnt': self.polling_failures})) if (CONF.ipmi.polling_retry >= 0 and self.polling_failures > CONF.ipmi.polling_retry): LOG.warning(_('Pollster for %s is disabled!') % self.NAME) raise plugin_base.PollsterPermanentError(resources) else: return self.polling_failures = 0 metadata = { 'node': CONF.host } if stats: data = self.get_value(stats) yield sample.Sample( name=self.NAME, type=self.TYPE, unit=self.UNIT, volume=data, user_id=None, project_id=None, resource_id=CONF.host, timestamp=timeutils.utcnow().isoformat(), resource_metadata=metadata) class InletTemperaturePollster(_Base): # Note(ildikov): The new meter name should be # "hardware.ipmi.node.inlet_temperature". As currently there # is no meter deprecation support in the code, we should use the # old name in order to avoid confusion. NAME = "hardware.ipmi.node.temperature" TYPE = sample.TYPE_GAUGE UNIT = "C" def read_data(self, cache): return self.nodemanager.read_inlet_temperature() class OutletTemperaturePollster(_Base): NAME = "hardware.ipmi.node.outlet_temperature" TYPE = sample.TYPE_GAUGE UNIT = "C" def read_data(self, cache): return self.nodemanager.read_outlet_temperature() class PowerPollster(_Base): NAME = "hardware.ipmi.node.power" TYPE = sample.TYPE_GAUGE UNIT = "W" def read_data(self, cache): return self.nodemanager.read_power_all() class AirflowPollster(_Base): NAME = "hardware.ipmi.node.airflow" TYPE = sample.TYPE_GAUGE UNIT = "CFM" def read_data(self, cache): return self.nodemanager.read_airflow() class CUPSIndexPollster(_Base): NAME = "hardware.ipmi.node.cups" TYPE = sample.TYPE_GAUGE UNIT = "CUPS" def read_data(self, cache): return self.nodemanager.read_cups_index() def get_value(self, stats): return node_manager._hex(stats["CUPS_Index"]) class _CUPSUtilPollsterBase(_Base): CACHE_KEY_CUPS = 'CUPS' def read_data(self, cache): i_cache = cache.setdefault(self.CACHE_KEY_CUPS, {}) if not i_cache: i_cache.update(self.nodemanager.read_cups_utilization()) return i_cache class CPUUtilPollster(_CUPSUtilPollsterBase): NAME = "hardware.ipmi.node.cpu_util" TYPE = sample.TYPE_GAUGE UNIT = "%" def get_value(self, stats): return node_manager._hex(stats["CPU_Utilization"]) class MemUtilPollster(_CUPSUtilPollsterBase): NAME = "hardware.ipmi.node.mem_util" TYPE = sample.TYPE_GAUGE UNIT = "%" def get_value(self, stats): return node_manager._hex(stats["Mem_Utilization"]) class IOUtilPollster(_CUPSUtilPollsterBase): NAME = "hardware.ipmi.node.io_util" TYPE = sample.TYPE_GAUGE UNIT = "%" def get_value(self, stats): return node_manager._hex(stats["IO_Utilization"]) ceilometer-6.0.0/ceilometer/ipmi/pollsters/sensor.py0000664000567000056710000001010012701406224023774 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ceilometer.agent import plugin_base from ceilometer.i18n import _ from ceilometer.ipmi.notifications import ironic as parser from ceilometer.ipmi.platform import exception as ipmiexcept from ceilometer.ipmi.platform import ipmi_sensor from ceilometer import sample CONF = cfg.CONF CONF.import_opt('host', 'ceilometer.service') CONF.import_opt('polling_retry', 'ceilometer.ipmi.pollsters', group='ipmi') LOG = log.getLogger(__name__) class InvalidSensorData(ValueError): pass class SensorPollster(plugin_base.PollsterBase): METRIC = None def setup_environment(self): super(SensorPollster, self).setup_environment() self.ipmi = ipmi_sensor.IPMISensor() self.polling_failures = 0 # Do not load this extension if no IPMI support if not self.ipmi.ipmi_support: raise plugin_base.ExtensionLoadError() @property def default_discovery(self): return 'local_node' @staticmethod def _get_sensor_types(data, sensor_type): try: return (sensor_type_data for _, sensor_type_data in data[sensor_type].items()) except KeyError: return [] def get_samples(self, manager, cache, resources): # Only one resource for IPMI pollster try: stats = self.ipmi.read_sensor_any(self.METRIC) except ipmiexcept.IPMIException: self.polling_failures += 1 LOG.warning(_( 'Polling %(mtr)s sensor failed for %(cnt)s times!') % ({'mtr': self.METRIC, 'cnt': self.polling_failures})) if (CONF.ipmi.polling_retry >= 0 and self.polling_failures > CONF.ipmi.polling_retry): LOG.warning(_('Pollster for %s is disabled!') % self.METRIC) raise plugin_base.PollsterPermanentError(resources) else: return self.polling_failures = 0 sensor_type_data = self._get_sensor_types(stats, self.METRIC) for sensor_data in sensor_type_data: # Continue if sensor_data is not parseable. try: sensor_reading = sensor_data['Sensor Reading'] sensor_id = sensor_data['Sensor ID'] except KeyError: continue if not parser.validate_reading(sensor_reading): continue try: volume, unit = parser.parse_reading(sensor_reading) except parser.InvalidSensorData: continue resource_id = '%(host)s-%(sensor-id)s' % { 'host': CONF.host, 'sensor-id': parser.transform_id(sensor_id) } metadata = { 'node': CONF.host } yield sample.Sample( name='hardware.ipmi.%s' % self.METRIC.lower(), type=sample.TYPE_GAUGE, unit=unit, volume=volume, user_id=None, project_id=None, resource_id=resource_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=metadata) class TemperatureSensorPollster(SensorPollster): METRIC = 'Temperature' class CurrentSensorPollster(SensorPollster): METRIC = 'Current' class FanSensorPollster(SensorPollster): METRIC = 'Fan' class VoltageSensorPollster(SensorPollster): METRIC = 'Voltage' ceilometer-6.0.0/ceilometer/ipmi/__init__.py0000664000567000056710000000000012701406223022170 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/opts.py0000664000567000056710000001320312701406224020472 0ustar jenkinsjenkins00000000000000# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from keystoneauth1 import loading import ceilometer.agent.manager import ceilometer.api import ceilometer.api.app import ceilometer.cmd.polling import ceilometer.collector import ceilometer.compute.discovery import ceilometer.compute.notifications import ceilometer.compute.util import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.inspector import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.coordination import ceilometer.dispatcher import ceilometer.dispatcher.file import ceilometer.dispatcher.gnocchi import ceilometer.energy.kwapi import ceilometer.event.converter import ceilometer.hardware.discovery import ceilometer.image.glance import ceilometer.ipmi.notifications.ironic import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware import ceilometer.network.notifications import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.service import ceilometer.storage import ceilometer.utils def list_opts(): return [ ('DEFAULT', itertools.chain(ceilometer.agent.manager.OPTS, ceilometer.api.app.OPTS, ceilometer.cmd.polling.CLI_OPTS, ceilometer.compute.notifications.OPTS, ceilometer.compute.util.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.inspector.OPTS, ceilometer.dispatcher.OPTS, ceilometer.image.glance.OPTS, ceilometer.ipmi.notifications.ironic.OPTS, ceilometer.middleware.OPTS, ceilometer.network.notifications.OPTS, ceilometer.nova_client.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.OPTS, ceilometer.sample.OPTS, ceilometer.service.OPTS, ceilometer.storage.OLD_OPTS, ceilometer.storage.CLI_OPTS, ceilometer.utils.OPTS,)), ('api', itertools.chain(ceilometer.api.OPTS, ceilometer.api.app.API_OPTS, [ceilometer.service.API_OPT])), # deprecated path, new one is 'polling' ('central', ceilometer.agent.manager.OPTS), ('collector', itertools.chain(ceilometer.collector.OPTS, [ceilometer.service.COLL_OPT])), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', ceilometer.coordination.OPTS), ('database', ceilometer.storage.OPTS), ('dispatcher_file', ceilometer.dispatcher.file.OPTS), ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi.dispatcher_opts), ('event', ceilometer.event.converter.OPTS), ('exchange_control', ceilometer.exchange_control.EXCHANGE_OPTS), ('hardware', ceilometer.hardware.discovery.OPTS), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', itertools.chain(ceilometer.notification.OPTS, [ceilometer.service.NOTI_OPT])), ('polling', ceilometer.agent.manager.OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), # NOTE(sileht): the configuration file contains only the options # for the password plugin that handles keystone v2 and v3 API # with discovery. But other options are possible. # Also, the default loaded plugin is password-ceilometer-legacy for # backward compatibily ('service_credentials', ( ceilometer.keystone_client.CLI_OPTS + loading.get_auth_common_conf_options() + loading.get_auth_plugin_conf_options('password'))), ('service_types', itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, ceilometer.image.glance.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] ceilometer-6.0.0/ceilometer/dispatcher/0000775000567000056710000000000012701406364021267 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/dispatcher/gnocchi.py0000664000567000056710000004056712701406223023261 0ustar jenkinsjenkins00000000000000# # Copyright 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict from hashlib import md5 import itertools import operator import threading import uuid from gnocchiclient import client from gnocchiclient import exceptions as gnocchi_exc from keystoneauth1 import session as ka_session from oslo_config import cfg from oslo_log import log import requests import retrying import six from stevedore import extension from ceilometer import declarative from ceilometer import dispatcher from ceilometer.i18n import _, _LE, _LW from ceilometer import keystone_client from ceilometer import utils NAME_ENCODED = __name__.encode('utf-8') CACHE_NAMESPACE = uuid.UUID(bytes=md5(NAME_ENCODED).digest()) LOG = log.getLogger(__name__) dispatcher_opts = [ cfg.BoolOpt('filter_service_activity', default=True, help='Filter out samples generated by Gnocchi ' 'service activity'), cfg.StrOpt('filter_project', default='gnocchi', help='Gnocchi project used to filter out samples ' 'generated by Gnocchi service activity'), cfg.StrOpt('url', deprecated_for_removal=True, help='URL to Gnocchi. default: autodetection'), cfg.StrOpt('archive_policy', help='The archive policy to use when the dispatcher ' 'create a new metric.'), cfg.StrOpt('resources_definition_file', default='gnocchi_resources.yaml', help=_('The Yaml file that defines mapping between samples ' 'and gnocchi resources/metrics')), ] cfg.CONF.register_opts(dispatcher_opts, group="dispatcher_gnocchi") def cache_key_mangler(key): """Construct an opaque cache key.""" if six.PY2: key = key.encode('utf-8') return uuid.uuid5(CACHE_NAMESPACE, key).hex def log_and_ignore_unexpected_workflow_error(func): def log_and_ignore(self, *args, **kwargs): try: func(self, *args, **kwargs) except gnocchi_exc.ClientException as e: LOG.error(six.text_type(e)) except Exception as e: LOG.error(six.text_type(e), exc_info=True) return log_and_ignore class ResourcesDefinitionException(Exception): def __init__(self, message, definition_cfg): msg = '%s %s: %s' % (self.__class__.__name__, definition_cfg, message) super(ResourcesDefinitionException, self).__init__(msg) class ResourcesDefinition(object): MANDATORY_FIELDS = {'resource_type': six.string_types, 'metrics': list} def __init__(self, definition_cfg, default_archive_policy, plugin_manager): self._default_archive_policy = default_archive_policy self.cfg = definition_cfg for field, field_type in self.MANDATORY_FIELDS.items(): if field not in self.cfg: raise declarative.DefinitionException( _LE("Required field %s not specified") % field, self.cfg) if not isinstance(self.cfg[field], field_type): raise declarative.DefinitionException( _LE("Required field %(field)s should be a %(type)s") % {'field': field, 'type': field_type}, self.cfg) self._attributes = {} for name, attr_cfg in self.cfg.get('attributes', {}).items(): self._attributes[name] = declarative.Definition(name, attr_cfg, plugin_manager) self.metrics = {} for t in self.cfg['metrics']: archive_policy = self.cfg.get('archive_policy', self._default_archive_policy) if archive_policy is None: self.metrics[t] = {} else: self.metrics[t] = dict(archive_policy_name=archive_policy) def match(self, metric_name): for t in self.cfg['metrics']: if utils.match(metric_name, t): return True return False def attributes(self, sample): attrs = {} for name, definition in self._attributes.items(): value = definition.parse(sample) if value is not None: attrs[name] = value return attrs def get_gnocchiclient(conf): requests_session = requests.session() for scheme in requests_session.adapters.keys(): requests_session.mount(scheme, ka_session.TCPKeepAliveAdapter( pool_block=True)) session = keystone_client.get_session(requests_session=requests_session) return client.Client('1', session, interface=conf.service_credentials.interface, region_name=conf.service_credentials.region_name, endpoint_override=conf.dispatcher_gnocchi.url) class LockedDefaultDict(defaultdict): """defaultdict with lock to handle threading Dictionary only deletes if nothing is accessing dict and nothing is holding lock to be deleted. If both cases are not true, it will skip delete. """ def __init__(self, *args, **kwargs): self.lock = threading.Lock() super(LockedDefaultDict, self).__init__(*args, **kwargs) def __getitem__(self, key): with self.lock: return super(LockedDefaultDict, self).__getitem__(key) def pop(self, key, *args): with self.lock: key_lock = super(LockedDefaultDict, self).__getitem__(key) if key_lock.acquire(False): try: super(LockedDefaultDict, self).pop(key, *args) finally: key_lock.release() class GnocchiDispatcher(dispatcher.MeterDispatcherBase): """Dispatcher class for recording metering data into database. The dispatcher class records each meter into the gnocchi service configured in ceilometer configuration file. An example configuration may look like the following: [dispatcher_gnocchi] url = http://localhost:8041 archive_policy = low To enable this dispatcher, the following section needs to be present in ceilometer.conf file [DEFAULT] meter_dispatchers = gnocchi """ def __init__(self, conf): super(GnocchiDispatcher, self).__init__(conf) self.conf = conf self.filter_service_activity = ( conf.dispatcher_gnocchi.filter_service_activity) self._ks_client = keystone_client.get_client() self.resources_definition = self._load_resources_definitions(conf) self.cache = None try: import oslo_cache oslo_cache.configure(self.conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. if 'null' not in self.conf.cache.backend: cache_region = oslo_cache.create_region() self.cache = oslo_cache.configure_cache_region( self.conf, cache_region) self.cache.key_mangler = cache_key_mangler except ImportError: pass except oslo_cache.exception.ConfigurationError as exc: LOG.warning(_LW('unable to configure oslo_cache: %s') % exc) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) self._gnocchi = get_gnocchiclient(conf) # Convert retry_interval secs to msecs for retry decorator retries = conf.storage.max_retries @retrying.retry(wait_fixed=conf.storage.retry_interval * 1000, stop_max_attempt_number=(retries if retries >= 0 else None)) def _get_connection(): self._gnocchi.capabilities.list() try: _get_connection() except Exception: LOG.error(_LE('Failed to connect to Gnocchi.')) raise @classmethod def _load_resources_definitions(cls, conf): plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait_plugin') data = declarative.load_definitions( {}, conf.dispatcher_gnocchi.resources_definition_file) resource_defs = [] for resource in data.get('resources', []): try: resource_defs.append(ResourcesDefinition( resource, conf.dispatcher_gnocchi.archive_policy, plugin_manager)) except Exception as exc: LOG.error(_LE("Failed to load resource due to error %s") % exc) return resource_defs @property def gnocchi_project_id(self): if self._gnocchi_project_id is not None: return self._gnocchi_project_id with self._gnocchi_project_id_lock: if self._gnocchi_project_id is None: try: project = self._ks_client.projects.find( name=self.conf.dispatcher_gnocchi.filter_project) except Exception: LOG.exception('fail to retrieve user of Gnocchi service') raise self._gnocchi_project_id = project.id LOG.debug("gnocchi project found: %s", self.gnocchi_project_id) return self._gnocchi_project_id def _is_swift_account_sample(self, sample): return bool([rd for rd in self.resources_definition if rd.cfg['resource_type'] == 'swift_account' and rd.match(sample['counter_name'])]) def _is_gnocchi_activity(self, sample): return (self.filter_service_activity and ( # avoid anything from the user used by gnocchi sample['project_id'] == self.gnocchi_project_id or # avoid anything in the swift account used by gnocchi (sample['resource_id'] == self.gnocchi_project_id and self._is_swift_account_sample(sample)) )) def _get_resource_definition(self, metric_name): for rd in self.resources_definition: if rd.match(metric_name): return rd def record_metering_data(self, data): # We may have receive only one counter on the wire if not isinstance(data, list): data = [data] # NOTE(sileht): skip sample generated by gnocchi itself data = [s for s in data if not self._is_gnocchi_activity(s)] # FIXME(sileht): This method bulk the processing of samples # grouped by resource_id and metric_name but this is not # efficient yet because the data received here doesn't often # contains a lot of different kind of samples # So perhaps the next step will be to pool the received data from # message bus. data.sort(key=lambda s: (s['resource_id'], s['counter_name'])) resource_grouped_samples = itertools.groupby( data, key=operator.itemgetter('resource_id')) for resource_id, samples_of_resource in resource_grouped_samples: metric_grouped_samples = itertools.groupby( list(samples_of_resource), key=operator.itemgetter('counter_name')) self._process_resource(resource_id, metric_grouped_samples) @log_and_ignore_unexpected_workflow_error def _process_resource(self, resource_id, metric_grouped_samples): resource_extra = {} for metric_name, samples in metric_grouped_samples: samples = list(samples) rd = self._get_resource_definition(metric_name) if rd is None: LOG.warning("metric %s is not handled by gnocchi" % metric_name) continue if rd.cfg.get("ignore"): continue resource_type = rd.cfg['resource_type'] resource = { "id": resource_id, "user_id": samples[0]['user_id'], "project_id": samples[0]['project_id'], "metrics": rd.metrics, } measures = [] for sample in samples: resource_extra.update(rd.attributes(sample)) measures.append({'timestamp': sample['timestamp'], 'value': sample['counter_volume']}) resource.update(resource_extra) retry = True try: self._gnocchi.metric.add_measures(metric_name, measures, resource_id) except gnocchi_exc.ResourceNotFound: self._if_not_cached("create", resource_type, resource, self._create_resource) except gnocchi_exc.MetricNotFound: metric = {'resource_id': resource['id'], 'name': metric_name} metric.update(rd.metrics[metric_name]) try: self._gnocchi.metric.create(metric) except gnocchi_exc.NamedMetricAlreadyExists: # NOTE(sileht): metric created in the meantime pass else: retry = False if retry: self._gnocchi.metric.add_measures(metric_name, measures, resource_id) LOG.debug("Measure posted on metric %s of resource %s", metric_name, resource_id) if resource_extra: self._if_not_cached("update", resource_type, resource, self._update_resource, resource_extra) def _create_resource(self, resource_type, resource): try: self._gnocchi.resource.create(resource_type, resource) LOG.debug('Resource %s created', resource["id"]) except gnocchi_exc.ResourceAlreadyExists: # NOTE(sileht): resource created in the meantime pass def _update_resource(self, resource_type, resource, resource_extra): self._gnocchi.resource.update(resource_type, resource["id"], resource_extra) LOG.debug('Resource %s updated', resource["id"]) def _if_not_cached(self, operation, resource_type, resource, method, *args, **kwargs): if self.cache: cache_key = resource['id'] attribute_hash = self._check_resource_cache(cache_key, resource) if attribute_hash: with self._gnocchi_resource_lock[cache_key]: # NOTE(luogangyi): there is a possibility that the # resource was already built in cache by another # ceilometer-collector when we get the lock here. attribute_hash = self._check_resource_cache(cache_key, resource) if attribute_hash: method(resource_type, resource, *args, **kwargs) self.cache.set(cache_key, attribute_hash) else: LOG.debug('resource cache recheck hit for ' '%s %s', operation, cache_key) self._gnocchi_resource_lock.pop(cache_key, None) else: LOG.debug('Resource cache hit for %s %s', operation, cache_key) else: method(resource_type, resource, *args, **kwargs) def _check_resource_cache(self, key, resource_data): cached_hash = self.cache.get(key) attribute_hash = hash(frozenset(filter(lambda x: x[0] != "metrics", resource_data.items()))) if not cached_hash or cached_hash != attribute_hash: return attribute_hash else: return None ceilometer-6.0.0/ceilometer/dispatcher/__init__.py0000664000567000056710000000557312701406223023404 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log import six from stevedore import named from ceilometer.i18n import _LW LOG = log.getLogger(__name__) OPTS = [ cfg.MultiStrOpt('meter_dispatchers', deprecated_name='dispatcher', default=['database'], help='Dispatchers to process metering data.'), cfg.MultiStrOpt('event_dispatchers', default=['database'], deprecated_name='dispatcher', help='Dispatchers to process event data.'), ] cfg.CONF.register_opts(OPTS) STORAGE_OPTS = [ cfg.IntOpt('max_retries', default=10, deprecated_group='database', help='Maximum number of connection retries during startup. ' 'Set to -1 to specify an infinite retry count.'), cfg.IntOpt('retry_interval', default=10, deprecated_group='database', help='Interval (in seconds) between retries of connection.'), ] cfg.CONF.register_opts(STORAGE_OPTS, group='storage') def _load_dispatcher_manager(dispatcher_type): namespace = 'ceilometer.dispatcher.%s' % dispatcher_type conf_name = '%s_dispatchers' % dispatcher_type LOG.debug('loading dispatchers from %s', namespace) # set propagate_map_exceptions to True to enable stevedore # to propagate exceptions. dispatcher_manager = named.NamedExtensionManager( namespace=namespace, names=getattr(cfg.CONF, conf_name), invoke_on_load=True, invoke_args=[cfg.CONF], propagate_map_exceptions=True) if not list(dispatcher_manager): LOG.warning(_LW('Failed to load any dispatchers for %s'), namespace) return dispatcher_manager def load_dispatcher_manager(): return (_load_dispatcher_manager('meter'), _load_dispatcher_manager('event')) class Base(object): def __init__(self, conf): self.conf = conf @six.add_metaclass(abc.ABCMeta) class MeterDispatcherBase(Base): @abc.abstractmethod def record_metering_data(self, data): """Recording metering data interface.""" @six.add_metaclass(abc.ABCMeta) class EventDispatcherBase(Base): @abc.abstractmethod def record_events(self, events): """Recording events interface.""" ceilometer-6.0.0/ceilometer/dispatcher/file.py0000664000567000056710000000541612701406223022560 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import logging.handlers from oslo_config import cfg from ceilometer import dispatcher OPTS = [ cfg.StrOpt('file_path', help='Name and the location of the file to record ' 'meters.'), cfg.IntOpt('max_bytes', default=0, help='The max size of the file.'), cfg.IntOpt('backup_count', default=0, help='The max number of the files to keep.'), ] cfg.CONF.register_opts(OPTS, group="dispatcher_file") class FileDispatcher(dispatcher.MeterDispatcherBase, dispatcher.EventDispatcherBase): """Dispatcher class for recording metering data to a file. The dispatcher class which logs each meter and/or event into a file configured in ceilometer configuration file. An example configuration may look like the following: [dispatcher_file] file_path = /tmp/meters To enable this dispatcher, the following section needs to be present in ceilometer.conf file [DEFAULT] meter_dispatchers = file event_dispatchers = file """ def __init__(self, conf): super(FileDispatcher, self).__init__(conf) self.log = None # if the directory and path are configured, then log to the file if self.conf.dispatcher_file.file_path: dispatcher_logger = logging.Logger('dispatcher.file') dispatcher_logger.setLevel(logging.INFO) # create rotating file handler which logs meters rfh = logging.handlers.RotatingFileHandler( self.conf.dispatcher_file.file_path, maxBytes=self.conf.dispatcher_file.max_bytes, backupCount=self.conf.dispatcher_file.backup_count, encoding='utf8') rfh.setLevel(logging.INFO) # Only wanted the meters to be saved in the file, not the # project root logger. dispatcher_logger.propagate = False dispatcher_logger.addHandler(rfh) self.log = dispatcher_logger def record_metering_data(self, data): if self.log: self.log.info(data) def record_events(self, events): if self.log: self.log.info(events) ceilometer-6.0.0/ceilometer/dispatcher/http.py0000664000567000056710000001215612701406223022617 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_config import cfg from oslo_log import log import requests from ceilometer import dispatcher from ceilometer.i18n import _, _LE, _LW from ceilometer.publisher import utils as publisher_utils LOG = log.getLogger(__name__) http_dispatcher_opts = [ cfg.StrOpt('target', default='', help='The target where the http request will be sent. ' 'If this is not set, no data will be posted. For ' 'example: target = http://hostname:1234/path'), cfg.StrOpt('event_target', help='The target for event data where the http request ' 'will be sent to. If this is not set, it will default ' 'to same as Sample target.'), cfg.IntOpt('timeout', default=5, help='The max time in seconds to wait for a request to ' 'timeout.'), ] cfg.CONF.register_opts(http_dispatcher_opts, group="dispatcher_http") class HttpDispatcher(dispatcher.MeterDispatcherBase, dispatcher.EventDispatcherBase): """Dispatcher class for posting metering/event data into a http target. To enable this dispatcher, the following option needs to be present in ceilometer.conf file:: [DEFAULT] meter_dispatchers = http event_dispatchers = http Dispatcher specific options can be added as follows:: [dispatcher_http] target = www.example.com event_target = www.example.com timeout = 2 """ def __init__(self, conf): super(HttpDispatcher, self).__init__(conf) self.headers = {'Content-type': 'application/json'} self.timeout = self.conf.dispatcher_http.timeout self.target = self.conf.dispatcher_http.target self.event_target = (self.conf.dispatcher_http.event_target or self.target) def record_metering_data(self, data): if self.target == '': # if the target was not set, do not do anything LOG.error(_('Dispatcher target was not set, no meter will ' 'be posted. Set the target in the ceilometer.conf ' 'file')) return # We may have receive only one counter on the wire if not isinstance(data, list): data = [data] for meter in data: LOG.debug( 'metering data %(counter_name)s ' 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s', {'counter_name': meter['counter_name'], 'resource_id': meter['resource_id'], 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), 'counter_volume': meter['counter_volume']}) if publisher_utils.verify_signature( meter, self.conf.publisher.telemetry_secret): try: # Every meter should be posted to the target res = requests.post(self.target, data=json.dumps(meter), headers=self.headers, timeout=self.timeout) LOG.debug('Message posting finished with status code ' '%d.', res.status_code) except Exception as err: LOG.exception(_('Failed to record metering data: %s'), err) else: LOG.warning(_( 'message signature invalid, discarding message: %r'), meter) def record_events(self, events): if not isinstance(events, list): events = [events] for event in events: if publisher_utils.verify_signature( event, self.conf.publisher.telemetry_secret): res = None try: res = requests.post(self.event_target, data=event, headers=self.headers, timeout=self.timeout) res.raise_for_status() except Exception: error_code = res.status_code if res else 'unknown' LOG.exception(_LE('Status Code: %{code}s. Failed to' 'dispatch event: %{event}s'), {'code': error_code, 'event': event}) else: LOG.warning(_LW( 'event signature invalid, discarding event: %s'), event) ceilometer-6.0.0/ceilometer/dispatcher/database.py0000664000567000056710000001215112701406223023377 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import timeutils from ceilometer import dispatcher from ceilometer.event.storage import models from ceilometer.i18n import _LE, _LW from ceilometer.publisher import utils as publisher_utils from ceilometer import storage LOG = log.getLogger(__name__) class DatabaseDispatcher(dispatcher.MeterDispatcherBase, dispatcher.EventDispatcherBase): """Dispatcher class for recording metering data into database. The dispatcher class which records each meter into a database configured in ceilometer configuration file. To enable this dispatcher, the following section needs to be present in ceilometer.conf file [DEFAULT] meter_dispatchers = database event_dispatchers = database """ def __init__(self, conf): super(DatabaseDispatcher, self).__init__(conf) self._meter_conn = self._get_db_conn('metering', True) self._event_conn = self._get_db_conn('event', True) def _get_db_conn(self, purpose, ignore_exception=False): try: return storage.get_connection_from_config(self.conf, purpose) except Exception as err: params = {"purpose": purpose, "err": err} LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s " "re-try later: %(err)s") % params) if not ignore_exception: raise @property def meter_conn(self): if not self._meter_conn: self._meter_conn = self._get_db_conn('metering') return self._meter_conn @property def event_conn(self): if not self._event_conn: self._event_conn = self._get_db_conn('event') return self._event_conn def record_metering_data(self, data): # We may have receive only one counter on the wire if not isinstance(data, list): data = [data] for meter in data: LOG.debug( 'metering data %(counter_name)s ' 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s', {'counter_name': meter['counter_name'], 'resource_id': meter['resource_id'], 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), 'counter_volume': meter['counter_volume']}) if publisher_utils.verify_signature( meter, self.conf.publisher.telemetry_secret): try: # Convert the timestamp to a datetime instance. # Storage engines are responsible for converting # that value to something they can store. if meter.get('timestamp'): ts = timeutils.parse_isotime(meter['timestamp']) meter['timestamp'] = timeutils.normalize_time(ts) self.meter_conn.record_metering_data(meter) except Exception as err: LOG.exception(_LE('Failed to record metering data: %s'), err) # raise the exception to propagate it up in the chain. raise else: LOG.warning(_LW( 'message signature invalid, discarding message: %r'), meter) def record_events(self, events): if not isinstance(events, list): events = [events] event_list = [] for ev in events: if publisher_utils.verify_signature( ev, self.conf.publisher.telemetry_secret): try: event_list.append( models.Event( message_id=ev['message_id'], event_type=ev['event_type'], generated=timeutils.normalize_time( timeutils.parse_isotime(ev['generated'])), traits=[models.Trait( name, dtype, models.Trait.convert_value(dtype, value)) for name, dtype, value in ev['traits']], raw=ev.get('raw', {})) ) except Exception: LOG.exception(_LE("Error processing event and it will be " "dropped: %s"), ev) else: LOG.warning(_LW( 'event signature invalid, discarding event: %s'), ev) self.event_conn.record_events(event_list) ceilometer-6.0.0/ceilometer/utils.py0000664000567000056710000002202612701406224020650 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import bisect import calendar import copy import datetime import decimal import fnmatch import hashlib import re import struct import sys from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import units import six OPTS = [ cfg.StrOpt('rootwrap_config', default="/etc/ceilometer/rootwrap.conf", help='Path to the rootwrap configuration file to' 'use for running commands as root'), ] CONF = cfg.CONF CONF.register_opts(OPTS) EPOCH_TIME = datetime.datetime(1970, 1, 1) def _get_root_helper(): return 'sudo ceilometer-rootwrap %s' % CONF.rootwrap_config def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() method.""" if 'run_as_root' in kwargs and 'root_helper' not in kwargs: kwargs['root_helper'] = _get_root_helper() return processutils.execute(*cmd, **kwargs) def decode_unicode(input): """Decode the unicode of the message, and encode it into utf-8.""" if isinstance(input, dict): temp = {} # If the input data is a dict, create an equivalent dict with a # predictable insertion order to avoid inconsistencies in the # message signature computation for equivalent payloads modulo # ordering for key, value in sorted(six.iteritems(input)): temp[decode_unicode(key)] = decode_unicode(value) return temp elif isinstance(input, (tuple, list)): # When doing a pair of JSON encode/decode operations to the tuple, # the tuple would become list. So we have to generate the value as # list here. return [decode_unicode(element) for element in input] elif six.PY2 and isinstance(input, six.text_type): return input.encode('utf-8') elif six.PY3 and isinstance(input, six.binary_type): return input.decode('utf-8') else: return input def recursive_keypairs(d, separator=':'): """Generator that produces sequence of keypairs for nested dictionaries.""" for name, value in sorted(six.iteritems(d)): if isinstance(value, dict): for subname, subvalue in recursive_keypairs(value, separator): yield ('%s%s%s' % (name, separator, subname), subvalue) elif isinstance(value, (tuple, list)): yield name, decode_unicode(value) else: yield name, value def restore_nesting(d, separator=':'): """Unwinds a flattened dict to restore nesting.""" d = copy.copy(d) if any([separator in k for k in d.keys()]) else d for k, v in d.copy().items(): if separator in k: top, rem = k.split(separator, 1) nest = d[top] if isinstance(d.get(top), dict) else {} nest[rem] = v d[top] = restore_nesting(nest, separator) del d[k] return d def dt_to_decimal(utc): """Datetime to Decimal. Some databases don't store microseconds in datetime so we always store as Decimal unixtime. """ if utc is None: return None decimal.getcontext().prec = 30 return (decimal.Decimal(str(calendar.timegm(utc.utctimetuple()))) + (decimal.Decimal(str(utc.microsecond)) / decimal.Decimal("1000000.0"))) def decimal_to_dt(dec): """Return a datetime from Decimal unixtime format.""" if dec is None: return None integer = int(dec) micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(units.M) daittyme = datetime.datetime.utcfromtimestamp(integer) return daittyme.replace(microsecond=int(round(micro))) def sanitize_timestamp(timestamp): """Return a naive utc datetime object.""" if not timestamp: return timestamp if not isinstance(timestamp, datetime.datetime): timestamp = timeutils.parse_isotime(timestamp) return timeutils.normalize_time(timestamp) def stringify_timestamps(data): """Stringify any datetimes in given dict.""" isa_timestamp = lambda v: isinstance(v, datetime.datetime) return dict((k, v.isoformat() if isa_timestamp(v) else v) for (k, v) in six.iteritems(data)) def dict_to_keyval(value, key_base=None): """Expand a given dict to its corresponding key-value pairs. Generated keys are fully qualified, delimited using dot notation. ie. key = 'key.child_key.grandchild_key[0]' """ val_iter, key_func = None, None if isinstance(value, dict): val_iter = six.iteritems(value) key_func = lambda k: key_base + '.' + k if key_base else k elif isinstance(value, (tuple, list)): val_iter = enumerate(value) key_func = lambda k: key_base + '[%d]' % k if val_iter: for k, v in val_iter: key_gen = key_func(k) if isinstance(v, dict) or isinstance(v, (tuple, list)): for key_gen, v in dict_to_keyval(v, key_gen): yield key_gen, v else: yield key_gen, v def lowercase_keys(mapping): """Converts the values of the keys in mapping to lowercase.""" items = mapping.items() for key, value in items: del mapping[key] mapping[key.lower()] = value def lowercase_values(mapping): """Converts the values in the mapping dict to lowercase.""" items = mapping.items() for key, value in items: mapping[key] = value.lower() def update_nested(original_dict, updates): """Updates the leaf nodes in a nest dict. Updates occur without replacing entire sub-dicts. """ dict_to_update = copy.deepcopy(original_dict) for key, value in six.iteritems(updates): if isinstance(value, dict): sub_dict = update_nested(dict_to_update.get(key, {}), value) dict_to_update[key] = sub_dict else: dict_to_update[key] = updates[key] return dict_to_update def uniq(dupes, attrs): """Exclude elements of dupes with a duplicated set of attribute values.""" key = lambda d: '/'.join([getattr(d, a) or '' for a in attrs]) keys = [] deduped = [] for d in dupes: if key(d) not in keys: deduped.append(d) keys.append(key(d)) return deduped def hash_of_set(s): return str(hash(frozenset(s))) class HashRing(object): def __init__(self, nodes, replicas=100): self._ring = dict() self._sorted_keys = [] for node in nodes: for r in six.moves.range(replicas): hashed_key = self._hash('%s-%s' % (node, r)) self._ring[hashed_key] = node self._sorted_keys.append(hashed_key) self._sorted_keys.sort() @staticmethod def _hash(key): return struct.unpack_from('>I', hashlib.md5(str(key).encode()).digest())[0] def _get_position_on_ring(self, key): hashed_key = self._hash(key) position = bisect.bisect(self._sorted_keys, hashed_key) return position if position < len(self._sorted_keys) else 0 def get_node(self, key): if not self._ring: return None pos = self._get_position_on_ring(key) return self._ring[self._sorted_keys[pos]] def kill_listeners(listeners): # NOTE(gordc): correct usage of oslo.messaging listener is to stop(), # which stops new messages, and wait(), which processes remaining # messages and closes connection for listener in listeners: listener.stop() listener.wait() if sys.version_info > (2, 7, 9): match = fnmatch.fnmatch else: _MATCH_CACHE = {} _MATCH_CACHE_MAX = 100 def match(string, pattern): """Thread safe fnmatch re-implementation. Standard library fnmatch in Python versions <= 2.7.9 has thread safe issue, this helper function is created for such case. see: https://bugs.python.org/issue23191 """ string = string.lower() pattern = pattern.lower() cached_pattern = _MATCH_CACHE.get(pattern) if cached_pattern is None: translated_pattern = fnmatch.translate(pattern) cached_pattern = re.compile(translated_pattern) if len(_MATCH_CACHE) >= _MATCH_CACHE_MAX: _MATCH_CACHE.clear() _MATCH_CACHE[pattern] = cached_pattern return cached_pattern.match(string) is not None ceilometer-6.0.0/ceilometer/event/0000775000567000056710000000000012701406364020262 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/event/converter.py0000664000567000056710000003040412701406223022636 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from debtcollector import moves from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from ceilometer import declarative from ceilometer.event.storage import models from ceilometer.i18n import _ from ceilometer import utils OPTS = [ cfg.StrOpt('definitions_cfg_file', default="event_definitions.yaml", help="Configuration file for event definitions." ), cfg.BoolOpt('drop_unmatched_notifications', default=False, help='Drop notifications if no event definition matches. ' '(Otherwise, we convert them with just the default traits)'), cfg.MultiStrOpt('store_raw', default=[], help='Store the raw notification for select priority ' 'levels (info and/or error). By default, raw details are ' 'not captured.') ] cfg.CONF.register_opts(OPTS, group='event') LOG = log.getLogger(__name__) EventDefinitionException = moves.moved_class(declarative.DefinitionException, 'EventDefinitionException', __name__, version=6.0, removal_version="?") class TraitDefinition(declarative.Definition): def __init__(self, name, trait_cfg, plugin_manager): super(TraitDefinition, self).__init__(name, trait_cfg, plugin_manager) type_name = (trait_cfg.get('type', 'text') if isinstance(trait_cfg, dict) else 'text') self.trait_type = models.Trait.get_type_by_name(type_name) if self.trait_type is None: raise declarative.DefinitionException( _("Invalid trait type '%(type)s' for trait %(trait)s") % dict(type=type_name, trait=name), self.cfg) def to_trait(self, notification_body): value = self.parse(notification_body) if value is None: return None # NOTE(mdragon): some openstack projects (mostly Nova) emit '' # for null fields for things like dates. if self.trait_type != models.Trait.TEXT_TYPE and value == '': return None value = models.Trait.convert_value(self.trait_type, value) return models.Trait(self.name, self.trait_type, value) class EventDefinition(object): DEFAULT_TRAITS = dict( service=dict(type='text', fields='publisher_id'), request_id=dict(type='text', fields='_context_request_id'), project_id=dict(type='text', fields=['payload.tenant_id', '_context_tenant']), user_id=dict(type='text', fields=['payload.user_id', '_context_user_id']), # TODO(dikonoor):tenant_id is old terminology and should # be deprecated tenant_id=dict(type='text', fields=['payload.tenant_id', '_context_tenant']), ) def __init__(self, definition_cfg, trait_plugin_mgr): self._included_types = [] self._excluded_types = [] self.traits = dict() self.cfg = definition_cfg self.raw_levels = [level.lower() for level in cfg.CONF.event.store_raw] try: event_type = definition_cfg['event_type'] traits = definition_cfg['traits'] except KeyError as err: raise declarative.DefinitionException( _("Required field %s not specified") % err.args[0], self.cfg) if isinstance(event_type, six.string_types): event_type = [event_type] for t in event_type: if t.startswith('!'): self._excluded_types.append(t[1:]) else: self._included_types.append(t) if self._excluded_types and not self._included_types: self._included_types.append('*') for trait_name in self.DEFAULT_TRAITS: self.traits[trait_name] = TraitDefinition( trait_name, self.DEFAULT_TRAITS[trait_name], trait_plugin_mgr) for trait_name in traits: self.traits[trait_name] = TraitDefinition( trait_name, traits[trait_name], trait_plugin_mgr) def included_type(self, event_type): for t in self._included_types: if utils.match(event_type, t): return True return False def excluded_type(self, event_type): for t in self._excluded_types: if utils.match(event_type, t): return True return False def match_type(self, event_type): return (self.included_type(event_type) and not self.excluded_type(event_type)) @property def is_catchall(self): return '*' in self._included_types and not self._excluded_types @staticmethod def _extract_when(body): """Extract the generated datetime from the notification.""" # NOTE: I am keeping the logic the same as it was in the collector, # However, *ALL* notifications should have a 'timestamp' field, it's # part of the notification envelope spec. If this was put here because # some openstack project is generating notifications without a # timestamp, then that needs to be filed as a bug with the offending # project (mdragon) when = body.get('timestamp', body.get('_context_timestamp')) if when: return timeutils.normalize_time(timeutils.parse_isotime(when)) return timeutils.utcnow() def to_event(self, notification_body): event_type = notification_body['event_type'] message_id = notification_body['message_id'] when = self._extract_when(notification_body) traits = (self.traits[t].to_trait(notification_body) for t in self.traits) # Only accept non-None value traits ... traits = [trait for trait in traits if trait is not None] raw = (notification_body if notification_body.get('priority') in self.raw_levels else {}) event = models.Event(message_id, event_type, when, traits, raw) return event class NotificationEventsConverter(object): """Notification Event Converter The NotificationEventsConverter handles the conversion of Notifications from openstack systems into Ceilometer Events. The conversion is handled according to event definitions in a config file. The config is a list of event definitions. Order is significant, a notification will be processed according to the LAST definition that matches it's event_type. (We use the last matching definition because that allows you to use YAML merge syntax in the definitions file.) Each definition is a dictionary with the following keys (all are required): - event_type: this is a list of notification event_types this definition will handle. These can be wildcarded with unix shell glob (not regex!) wildcards. An exclusion listing (starting with a '!') will exclude any types listed from matching. If ONLY exclusions are listed, the definition will match anything not matching the exclusions. This item can also be a string, which will be taken as equivalent to 1 item list. Examples: * ['compute.instance.exists'] will only match compute.instance.exists notifications * "compute.instance.exists" Same as above. * ["image.create", "image.delete"] will match image.create and image.delete, but not anything else. * "compute.instance.*" will match compute.instance.create.start but not image.upload * ['*.start','*.end', '!scheduler.*'] will match compute.instance.create.start, and image.delete.end, but NOT compute.instance.exists or scheduler.run_instance.start * '!image.*' matches any notification except image notifications. * ['*', '!image.*'] same as above. - traits: (dict) The keys are trait names, the values are the trait definitions. Each trait definition is a dictionary with the following keys: - type (optional): The data type for this trait. (as a string) Valid options are: 'text', 'int', 'float' and 'datetime', defaults to 'text' if not specified. - fields: a path specification for the field(s) in the notification you wish to extract. The paths can be specified with a dot syntax (e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is also supported. In either case, if the key for the field you are looking for contains special characters, like '.', it will need to be quoted (with double or single quotes) like so:: "payload.image_meta.'org.openstack__1__architecture'" The syntax used for the field specification is a variant of JSONPath, and is fairly flexible. (see: https://github.com/kennknowles/python-jsonpath-rw for more info) Specifications can be written to match multiple possible fields, the value for the trait will be derived from the matching fields that exist and have a non-null (i.e. is not None) values in the notification. By default the value will be the first such field. (plugins can alter that, if they wish) This configuration value is normally a string, for convenience, it can be specified as a list of specifications, which will be OR'ed together (a union query in jsonpath terms) - plugin (optional): (dictionary) with the following keys: - name: (string) name of a plugin to load - parameters: (optional) Dictionary of keyword args to pass to the plugin on initialization. See documentation on each plugin to see what arguments it accepts. For convenience, this value can also be specified as a string, which is interpreted as a plugin name, which will be loaded with no parameters. """ def __init__(self, events_config, trait_plugin_mgr, add_catchall=True): self.definitions = [ EventDefinition(event_def, trait_plugin_mgr) for event_def in reversed(events_config)] if add_catchall and not any(d.is_catchall for d in self.definitions): event_def = dict(event_type='*', traits={}) self.definitions.append(EventDefinition(event_def, trait_plugin_mgr)) def to_event(self, notification_body): event_type = notification_body['event_type'] message_id = notification_body['message_id'] edef = None for d in self.definitions: if d.match_type(event_type): edef = d break if edef is None: msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)') % dict(type=event_type, msgid=message_id)) if cfg.CONF.event.drop_unmatched_notifications: LOG.debug(msg) else: # If drop_unmatched_notifications is False, this should # never happen. (mdragon) LOG.error(msg) return None return edef.to_event(notification_body) def setup_events(trait_plugin_mgr): """Setup the event definitions from yaml config file.""" return NotificationEventsConverter( declarative.load_definitions([], cfg.CONF.event.definitions_cfg_file), trait_plugin_mgr, add_catchall=not cfg.CONF.event.drop_unmatched_notifications) ceilometer-6.0.0/ceilometer/event/__init__.py0000664000567000056710000000000012701406223022353 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/event/trait_plugins.py0000664000567000056710000002061412701406223023515 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from debtcollector import moves from oslo_log import log from oslo_utils import timeutils import six from ceilometer.i18n import _LW LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class TraitPluginBase(object): """Base class for plugins. It converts notification fields to Trait values. """ support_return_all_values = False """If True, an exception will be raised if the user expect the plugin to return one trait per match_list, but the plugin doesn't allow/support that. """ def __init__(self, **kw): """Setup the trait plugin. For each Trait definition a plugin is used on in a conversion definition, a new instance of the plugin will be created, and initialized with the parameters (if any) specified in the config file. :param kw: the parameters specified in the event definitions file. """ super(TraitPluginBase, self).__init__() @moves.moved_method('trait_values', version=6.0, removal_version="?") def trait_value(self, match_list): pass def trait_values(self, match_list): """Convert a set of fields to one or multiple Trait values. This method is called each time a trait is attempted to be extracted from a notification. It will be called *even if* no matching fields are found in the notification (in that case, the match_list will be empty). If this method returns None, the trait *will not* be added to the event. Any other value returned by this method will be used as the value for the trait. Values returned will be coerced to the appropriate type for the trait. :param match_list: A list (may be empty if no matches) of *tuples*. Each tuple is (field_path, value) where field_path is the jsonpath for that specific field. Example:: trait's fields definition: ['payload.foobar', 'payload.baz', 'payload.thing.*'] notification body: { 'message_id': '12345', 'publisher': 'someservice.host', 'payload': { 'foobar': 'test', 'thing': { 'bar': 12, 'boing': 13, } } } match_list will be: [('payload.foobar','test'), ('payload.thing.bar',12), ('payload.thing.boing',13)] Here is a plugin that emulates the default (no plugin) behavior: .. code-block:: python class DefaultPlugin(TraitPluginBase): "Plugin that returns the first field value." def __init__(self, **kw): super(DefaultPlugin, self).__init__() def trait_value(self, match_list): if not match_list: return None return [ match[1] for match in match_list] """ # For backwards compatibility for the renamed method. return [self.trait_value(match_list)] class SplitterTraitPlugin(TraitPluginBase): """Plugin that splits a piece off of a string value.""" support_return_all_values = True def __init__(self, separator=".", segment=0, max_split=None, **kw): """Setup how do split the field. :param separator: String to split on. default "." :param segment: Which segment to return. (int) default 0 :param max_split: Limit number of splits. Default: None (no limit) """ LOG.warning(_LW('split plugin is deprecated, ' 'add ".`split(%(sep)s, %(segment)d, ' '%(max_split)d)`" to your jsonpath instead') % dict(sep=separator, segment=segment, max_split=(-1 if max_split is None else max_split))) self.separator = separator self.segment = segment self.max_split = max_split super(SplitterTraitPlugin, self).__init__(**kw) def trait_values(self, match_list): return [self._trait_value(match) for match in match_list] def _trait_value(self, match): value = six.text_type(match[1]) if self.max_split is not None: values = value.split(self.separator, self.max_split) else: values = value.split(self.separator) try: return values[self.segment] except IndexError: return None class BitfieldTraitPlugin(TraitPluginBase): """Plugin to set flags on a bitfield.""" def __init__(self, initial_bitfield=0, flags=None, **kw): """Setup bitfield trait. :param initial_bitfield: (int) initial value for the bitfield Flags that are set will be OR'ed with this. :param flags: List of dictionaries defining bitflags to set depending on data in the notification. Each one has the following keys: path: jsonpath of field to match. bit: (int) number of bit to set (lsb is bit 0) value: set bit if corresponding field's value matches this. If value is not provided, bit will be set if the field exists (and is non-null), regardless of its value. """ self.initial_bitfield = initial_bitfield if flags is None: flags = [] self.flags = flags super(BitfieldTraitPlugin, self).__init__(**kw) def trait_values(self, match_list): matches = dict(match_list) bitfield = self.initial_bitfield for flagdef in self.flags: path = flagdef['path'] bit = 2 ** int(flagdef['bit']) if path in matches: if 'value' in flagdef: if matches[path] == flagdef['value']: bitfield |= bit else: bitfield |= bit return [bitfield] class TimedeltaPluginMissedFields(Exception): def __init__(self): msg = ('It is required to use two timestamp field with Timedelta ' 'plugin.') super(TimedeltaPluginMissedFields, self).__init__(msg) class TimedeltaPlugin(TraitPluginBase): """Setup timedelta meter volume of two timestamps fields. Example:: trait's fields definition: ['payload.created_at', 'payload.launched_at'] value is been created as total seconds between 'launched_at' and 'created_at' timestamps. """ # TODO(idegtiarov): refactor code to have meter_plugins separate from # trait_plugins def trait_value(self, match_list): if len(match_list) != 2: LOG.warning(_LW('Timedelta plugin is required two timestamp fields' ' to create timedelta value.')) return start, end = match_list try: start_time = timeutils.parse_isotime(start[1]) end_time = timeutils.parse_isotime(end[1]) except Exception as err: LOG.warning(_LW('Failed to parse date from set fields, both ' 'fields %(start)s and %(end)s must be datetime: ' '%(err)s') % dict(start=start[0], end=end[0], err=err) ) return return abs((end_time - start_time).total_seconds()) ceilometer-6.0.0/ceilometer/event/storage/0000775000567000056710000000000012701406364021726 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/event/storage/impl_sqlalchemy.py0000664000567000056710000004471712701406223025472 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" from __future__ import absolute_import import datetime import os from oslo_config import cfg from oslo_db import exception as dbexc from oslo_db.sqlalchemy import session as db_session from oslo_log import log from oslo_utils import timeutils import sqlalchemy as sa from ceilometer.event.storage import base from ceilometer.event.storage import models as api_models from ceilometer.i18n import _LE, _LI from ceilometer import storage from ceilometer.storage.sqlalchemy import models from ceilometer import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } TRAIT_MAPLIST = [(api_models.Trait.NONE_TYPE, models.TraitText), (api_models.Trait.TEXT_TYPE, models.TraitText), (api_models.Trait.INT_TYPE, models.TraitInt), (api_models.Trait.FLOAT_TYPE, models.TraitFloat), (api_models.Trait.DATETIME_TYPE, models.TraitDatetime)] TRAIT_ID_TO_MODEL = dict((x, y) for x, y in TRAIT_MAPLIST) TRAIT_MODEL_TO_ID = dict((y, x) for x, y in TRAIT_MAPLIST) trait_models_dict = {'string': models.TraitText, 'integer': models.TraitInt, 'datetime': models.TraitDatetime, 'float': models.TraitFloat} def _build_trait_query(session, trait_type, key, value, op='eq'): trait_model = trait_models_dict[trait_type] op_dict = {'eq': (trait_model.value == value), 'lt': (trait_model.value < value), 'le': (trait_model.value <= value), 'gt': (trait_model.value > value), 'ge': (trait_model.value >= value), 'ne': (trait_model.value != value)} conditions = [trait_model.key == key, op_dict[op]] return (session.query(trait_model.event_id.label('ev_id')) .filter(*conditions)) class Connection(base.Connection): """Put the event data into a SQLAlchemy database. Tables:: - EventType - event definition - { id: event type id desc: description of event } - Event - event data - { id: event id message_id: message id generated = timestamp of event event_type_id = event type -> eventtype.id } - TraitInt - int trait value - { event_id: event -> event.id key: trait name value: integer value } - TraitDatetime - datetime trait value - { event_id: event -> event.id key: trait name value: datetime value } - TraitText - text trait value - { event_id: event -> event.id key: trait name value: text value } - TraitFloat - float trait value - { event_id: event -> event.id key: trait name value: float value } """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def __init__(self, url): # Set max_retries to 0, since oslo.db in certain cases may attempt # to retry making the db connection retried max_retries ^ 2 times # in failure case and db reconnection has already been implemented # in storage.__init__.get_connection_from_config function options = dict(cfg.CONF.database.items()) options['max_retries'] = 0 # oslo.db doesn't support options defined by Ceilometer for opt in storage.OPTS: options.pop(opt.name, None) self._engine_facade = db_session.EngineFacade(url, **options) def upgrade(self): # NOTE(gordc): to minimise memory, only import migration when needed from oslo_db.sqlalchemy import migration path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', 'storage', 'sqlalchemy', 'migrate_repo') migration.db_sync(self._engine_facade.get_engine(), path) def clear(self): engine = self._engine_facade.get_engine() for table in reversed(models.Base.metadata.sorted_tables): engine.execute(table.delete()) engine.dispose() def _get_or_create_event_type(self, event_type, session=None): """Check if an event type with the supplied name is already exists. If not, we create it and return the record. This may result in a flush. """ try: if session is None: session = self._engine_facade.get_session() with session.begin(subtransactions=True): et = session.query(models.EventType).filter( models.EventType.desc == event_type).first() if not et: et = models.EventType(event_type) session.add(et) except dbexc.DBDuplicateEntry: et = self._get_or_create_event_type(event_type, session) return et def record_events(self, event_models): """Write the events to SQL database via sqlalchemy. :param event_models: a list of model.Event objects. """ session = self._engine_facade.get_session() error = None for event_model in event_models: event = None try: with session.begin(): event_type = self._get_or_create_event_type( event_model.event_type, session=session) event = models.Event(event_model.message_id, event_type, event_model.generated, event_model.raw) session.add(event) session.flush() if event_model.traits: trait_map = {} for trait in event_model.traits: if trait_map.get(trait.dtype) is None: trait_map[trait.dtype] = [] trait_map[trait.dtype].append( {'event_id': event.id, 'key': trait.name, 'value': trait.value}) for dtype in trait_map.keys(): model = TRAIT_ID_TO_MODEL[dtype] session.execute(model.__table__.insert(), trait_map[dtype]) except dbexc.DBDuplicateEntry as e: LOG.info(_LI("Duplicate event detected, skipping it: %s") % e) except KeyError as e: LOG.exception(_LE('Failed to record event: %s') % e) except Exception as e: LOG.exception(_LE('Failed to record event: %s') % e) error = e if error: raise error def get_events(self, event_filter, limit=None): """Return an iterable of model.Event objects. :param event_filter: EventFilter instance """ if limit == 0: return session = self._engine_facade.get_session() with session.begin(): # Build up the join conditions event_join_conditions = [models.EventType.id == models.Event.event_type_id] if event_filter.event_type: event_join_conditions.append(models.EventType.desc == event_filter.event_type) # Build up the where conditions event_filter_conditions = [] if event_filter.message_id: event_filter_conditions.append( models.Event.message_id == event_filter.message_id) if event_filter.start_timestamp: event_filter_conditions.append( models.Event.generated >= event_filter.start_timestamp) if event_filter.end_timestamp: event_filter_conditions.append( models.Event.generated <= event_filter.end_timestamp) trait_subq = None # Build trait filter if event_filter.traits_filter: filters = list(event_filter.traits_filter) trait_filter = filters.pop() key = trait_filter.pop('key') op = trait_filter.pop('op', 'eq') trait_type, value = list(trait_filter.items())[0] trait_subq = _build_trait_query(session, trait_type, key, value, op) for trait_filter in filters: key = trait_filter.pop('key') op = trait_filter.pop('op', 'eq') trait_type, value = list(trait_filter.items())[0] q = _build_trait_query(session, trait_type, key, value, op) trait_subq = trait_subq.filter( trait_subq.subquery().c.ev_id == q.subquery().c.ev_id) trait_subq = trait_subq.subquery() query = (session.query(models.Event.id) .join(models.EventType, sa.and_(*event_join_conditions))) if trait_subq is not None: query = query.join(trait_subq, trait_subq.c.ev_id == models.Event.id) if event_filter.admin_proj: no_proj_q = session.query(models.TraitText.event_id).filter( models.TraitText.key == 'project_id') admin_q = (session.query(models.TraitText.event_id).filter( ~sa.exists().where(models.TraitText.event_id == no_proj_q.subquery().c.event_id)).union( session.query(models.TraitText.event_id).filter(sa.and_( models.TraitText.key == 'project_id', models.TraitText.value == event_filter.admin_proj, models.Event.id == models.TraitText.event_id)))) query = query.filter(sa.exists().where( models.Event.id == admin_q.subquery().c.trait_text_event_id)) if event_filter_conditions: query = query.filter(sa.and_(*event_filter_conditions)) query = query.order_by(models.Event.generated).limit(limit) event_list = {} # get a list of all events that match filters for (id_, generated, message_id, desc, raw) in query.add_columns( models.Event.generated, models.Event.message_id, models.EventType.desc, models.Event.raw).all(): event_list[id_] = api_models.Event(message_id, desc, generated, [], raw) # Query all traits related to events. # NOTE (gordc): cast is done because pgsql defaults to TEXT when # handling unknown values such as null. trait_q = ( session.query( models.TraitDatetime.event_id, models.TraitDatetime.key, models.TraitDatetime.value, sa.cast(sa.null(), sa.Integer), sa.cast(sa.null(), sa.Float(53)), sa.cast(sa.null(), sa.String(255))) .filter(sa.exists().where( models.TraitDatetime.event_id == query.subquery().c.id)) ).union_all( session.query( models.TraitInt.event_id, models.TraitInt.key, sa.null(), models.TraitInt.value, sa.null(), sa.null()) .filter(sa.exists().where( models.TraitInt.event_id == query.subquery().c.id)), session.query( models.TraitFloat.event_id, models.TraitFloat.key, sa.null(), sa.null(), models.TraitFloat.value, sa.null()) .filter(sa.exists().where( models.TraitFloat.event_id == query.subquery().c.id)), session.query( models.TraitText.event_id, models.TraitText.key, sa.null(), sa.null(), sa.null(), models.TraitText.value) .filter(sa.exists().where( models.TraitText.event_id == query.subquery().c.id))) for id_, key, t_date, t_int, t_float, t_text in ( trait_q.order_by(models.TraitDatetime.key)).all(): if t_int is not None: dtype = api_models.Trait.INT_TYPE val = t_int elif t_float is not None: dtype = api_models.Trait.FLOAT_TYPE val = t_float elif t_date is not None: dtype = api_models.Trait.DATETIME_TYPE val = t_date else: dtype = api_models.Trait.TEXT_TYPE val = t_text try: trait_model = api_models.Trait(key, dtype, val) event_list[id_].append_trait(trait_model) except KeyError: # NOTE(gordc): this is expected as we do not set REPEATABLE # READ (bug 1506717). if query is run while recording new # event data, trait query may return more data than event # query. they can be safely discarded. pass return event_list.values() def get_event_types(self): """Return all event types as an iterable of strings.""" session = self._engine_facade.get_session() with session.begin(): query = (session.query(models.EventType.desc). order_by(models.EventType.desc)) for name in query.all(): # The query returns a tuple with one element. yield name[0] def get_trait_types(self, event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event """ session = self._engine_facade.get_session() with session.begin(): for trait_model in [models.TraitText, models.TraitInt, models.TraitFloat, models.TraitDatetime]: query = (session.query(trait_model.key) .join(models.Event, models.Event.id == trait_model.event_id) .join(models.EventType, sa.and_(models.EventType.id == models.Event.event_type_id, models.EventType.desc == event_type)) .distinct()) dtype = TRAIT_MODEL_TO_ID.get(trait_model) for row in query.all(): yield {'name': row[0], 'data_type': dtype} def get_traits(self, event_type, trait_type=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_type: the name of the Trait to filter by """ session = self._engine_facade.get_session() with session.begin(): for trait_model in [models.TraitText, models.TraitInt, models.TraitFloat, models.TraitDatetime]: query = (session.query(trait_model.key, trait_model.value) .join(models.Event, models.Event.id == trait_model.event_id) .join(models.EventType, sa.and_(models.EventType.id == models.Event.event_type_id, models.EventType.desc == event_type)) .order_by(trait_model.key)) if trait_type: query = query.filter(trait_model.key == trait_type) dtype = TRAIT_MODEL_TO_ID.get(trait_model) for k, v in query.all(): yield api_models.Trait(name=k, dtype=dtype, value=v) def clear_expired_event_data(self, ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ session = self._engine_facade.get_session() with session.begin(): end = timeutils.utcnow() - datetime.timedelta(seconds=ttl) event_q = (session.query(models.Event.id) .filter(models.Event.generated < end)) event_subq = event_q.subquery() for trait_model in [models.TraitText, models.TraitInt, models.TraitFloat, models.TraitDatetime]: (session.query(trait_model) .filter(trait_model.event_id.in_(event_subq)) .delete(synchronize_session="fetch")) event_rows = event_q.delete() # remove EventType and TraitType with no corresponding # matching events and traits (session.query(models.EventType) .filter(~models.EventType.events.any()) .delete(synchronize_session="fetch")) LOG.info(_LI("%d events are removed from database"), event_rows) ceilometer-6.0.0/ceilometer/event/storage/__init__.py0000664000567000056710000000000012701406223024017 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/event/storage/impl_log.py0000664000567000056710000000203512701406223024074 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.event.storage import base from ceilometer.i18n import _LI LOG = log.getLogger(__name__) class Connection(base.Connection): """Log event data.""" @staticmethod def clear_expired_event_data(ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ LOG.info(_LI("Dropping event data with TTL %d"), ttl) ceilometer-6.0.0/ceilometer/event/storage/impl_elasticsearch.py0000664000567000056710000002776012701406223026141 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import operator import elasticsearch as es from elasticsearch import helpers from oslo_log import log from oslo_utils import netutils from oslo_utils import timeutils import six from ceilometer.event.storage import base from ceilometer.event.storage import models from ceilometer.i18n import _LE, _LI from ceilometer import storage from ceilometer import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(base.Connection): """Put the event data into an ElasticSearch db. Events in ElasticSearch are indexed by day and stored by event_type. An example document:: {"_index":"events_2014-10-21", "_type":"event_type0", "_id":"dc90e464-65ab-4a5d-bf66-ecb956b5d779", "_score":1.0, "_source":{"timestamp": "2014-10-21T20:02:09.274797" "traits": {"id4_0": "2014-10-21T20:02:09.274797", "id3_0": 0.7510790937279408, "id2_0": 5, "id1_0": "18c97ba1-3b74-441a-b948-a702a30cbce2"} } } """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) index_name = 'events' # NOTE(gordc): mainly for testing, data is not searchable after write, # it is only searchable after periodic refreshes. _refresh_on_write = False def __init__(self, url): url_split = netutils.urlsplit(url) self.conn = es.Elasticsearch(url_split.netloc) def upgrade(self): iclient = es.client.IndicesClient(self.conn) ts_template = { 'template': '*', 'mappings': {'_default_': {'_timestamp': {'enabled': True, 'store': True}, 'properties': {'traits': {'type': 'nested'}}}}} iclient.put_template(name='enable_timestamp', body=ts_template) def record_events(self, events): def _build_bulk_index(event_list): for ev in event_list: traits = {t.name: t.value for t in ev.traits} yield {'_op_type': 'create', '_index': '%s_%s' % (self.index_name, ev.generated.date().isoformat()), '_type': ev.event_type, '_id': ev.message_id, '_source': {'timestamp': ev.generated.isoformat(), 'traits': traits, 'raw': ev.raw}} error = None for ok, result in helpers.streaming_bulk( self.conn, _build_bulk_index(events)): if not ok: __, result = result.popitem() if result['status'] == 409: LOG.info(_LI('Duplicate event detected, skipping it: %s') % result) else: LOG.exception(_LE('Failed to record event: %s') % result) error = storage.StorageUnknownWriteError(result) if self._refresh_on_write: self.conn.indices.refresh(index='%s_*' % self.index_name) while self.conn.cluster.pending_tasks(local=True)['tasks']: pass if error: raise error def _make_dsl_from_filter(self, indices, ev_filter): q_args = {} filters = [] if ev_filter.start_timestamp: filters.append({'range': {'timestamp': {'ge': ev_filter.start_timestamp.isoformat()}}}) while indices[0] < ( '%s_%s' % (self.index_name, ev_filter.start_timestamp.date().isoformat())): del indices[0] if ev_filter.end_timestamp: filters.append({'range': {'timestamp': {'le': ev_filter.end_timestamp.isoformat()}}}) while indices[-1] > ( '%s_%s' % (self.index_name, ev_filter.end_timestamp.date().isoformat())): del indices[-1] q_args['index'] = indices if ev_filter.event_type: q_args['doc_type'] = ev_filter.event_type if ev_filter.message_id: filters.append({'term': {'_id': ev_filter.message_id}}) if ev_filter.traits_filter or ev_filter.admin_proj: trait_filters = [] or_cond = [] for t_filter in ev_filter.traits_filter or []: value = None for val_type in ['integer', 'string', 'float', 'datetime']: if t_filter.get(val_type): value = t_filter.get(val_type) if isinstance(value, six.string_types): value = value.lower() elif isinstance(value, datetime.datetime): value = value.isoformat() break if t_filter.get('op') in ['gt', 'ge', 'lt', 'le']: op = (t_filter.get('op').replace('ge', 'gte') .replace('le', 'lte')) trait_filters.append( {'range': {t_filter['key']: {op: value}}}) else: tf = {"query": {"query_string": { "query": "%s: \"%s\"" % (t_filter['key'], value)}}} if t_filter.get('op') == 'ne': tf = {"not": tf} trait_filters.append(tf) if ev_filter.admin_proj: or_cond = [{'missing': {'field': 'project_id'}}, {'term': {'project_id': ev_filter.admin_proj}}] filters.append( {'nested': {'path': 'traits', 'query': {'filtered': { 'filter': {'bool': {'must': trait_filters, 'should': or_cond}}}}}}) q_args['body'] = {'query': {'filtered': {'filter': {'bool': {'must': filters}}}}} return q_args def get_events(self, event_filter, limit=None): if limit == 0: return iclient = es.client.IndicesClient(self.conn) indices = iclient.get_mapping('%s_*' % self.index_name).keys() if indices: filter_args = self._make_dsl_from_filter(indices, event_filter) if limit is not None: filter_args['size'] = limit results = self.conn.search(fields=['_id', 'timestamp', '_type', '_source'], sort='timestamp:asc', **filter_args) trait_mappings = {} for record in results['hits']['hits']: trait_list = [] if not record['_type'] in trait_mappings: trait_mappings[record['_type']] = list( self.get_trait_types(record['_type'])) for key in record['_source']['traits'].keys(): value = record['_source']['traits'][key] for t_map in trait_mappings[record['_type']]: if t_map['name'] == key: dtype = t_map['data_type'] break else: dtype = models.Trait.TEXT_TYPE trait_list.append(models.Trait( name=key, dtype=dtype, value=models.Trait.convert_value(dtype, value))) gen_ts = timeutils.normalize_time(timeutils.parse_isotime( record['_source']['timestamp'])) yield models.Event(message_id=record['_id'], event_type=record['_type'], generated=gen_ts, traits=sorted( trait_list, key=operator.attrgetter('dtype')), raw=record['_source']['raw']) def get_event_types(self): iclient = es.client.IndicesClient(self.conn) es_mappings = iclient.get_mapping('%s_*' % self.index_name) seen_types = set() for index in es_mappings.keys(): for ev_type in es_mappings[index]['mappings'].keys(): seen_types.add(ev_type) # TODO(gordc): tests assume sorted ordering but backends are not # explicitly ordered. # NOTE: _default_ is a type that appears in all mappings but is not # real 'type' seen_types.discard('_default_') return sorted(list(seen_types)) @staticmethod def _remap_es_types(d_type): if d_type == 'string': d_type = 'text' elif d_type == 'long': d_type = 'int' elif d_type == 'double': d_type = 'float' elif d_type == 'date' or d_type == 'date_time': d_type = 'datetime' return d_type def get_trait_types(self, event_type): iclient = es.client.IndicesClient(self.conn) es_mappings = iclient.get_mapping('%s_*' % self.index_name) seen_types = [] for index in es_mappings.keys(): # if event_type exists in index and has traits if (es_mappings[index]['mappings'].get(event_type) and es_mappings[index]['mappings'][event_type]['properties'] ['traits'].get('properties')): for t_type in (es_mappings[index]['mappings'][event_type] ['properties']['traits']['properties'].keys()): d_type = (es_mappings[index]['mappings'][event_type] ['properties']['traits']['properties'] [t_type]['type']) d_type = models.Trait.get_type_by_name( self._remap_es_types(d_type)) if (t_type, d_type) not in seen_types: yield {'name': t_type, 'data_type': d_type} seen_types.append((t_type, d_type)) def get_traits(self, event_type, trait_type=None): t_types = dict((res['name'], res['data_type']) for res in self.get_trait_types(event_type)) if not t_types or (trait_type and trait_type not in t_types.keys()): return result = self.conn.search('%s_*' % self.index_name, event_type) for ev in result['hits']['hits']: if trait_type and ev['_source']['traits'].get(trait_type): yield models.Trait( name=trait_type, dtype=t_types[trait_type], value=models.Trait.convert_value( t_types[trait_type], ev['_source']['traits'][trait_type])) else: for trait in ev['_source']['traits'].keys(): yield models.Trait( name=trait, dtype=t_types[trait], value=models.Trait.convert_value( t_types[trait], ev['_source']['traits'][trait])) ceilometer-6.0.0/ceilometer/event/storage/base.py0000664000567000056710000000623312701406223023210 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ceilometer class Connection(object): """Base class for event storage system connections.""" # A dictionary representing the capabilities of this driver. CAPABILITIES = { 'events': {'query': {'simple': False}}, } STORAGE_CAPABILITIES = { 'storage': {'production_ready': False}, } def __init__(self, url): pass @staticmethod def upgrade(): """Migrate the database to `version` or the most recent version.""" @staticmethod def clear(): """Clear database.""" @staticmethod def record_events(events): """Write the events to the backend storage system. :param events: a list of model.Event objects. """ raise ceilometer.NotImplementedError('Events not implemented.') @staticmethod def get_events(event_filter, limit=None): """Return an iterable of model.Event objects.""" raise ceilometer.NotImplementedError('Events not implemented.') @staticmethod def get_event_types(): """Return all event types as an iterable of strings.""" raise ceilometer.NotImplementedError('Events not implemented.') @staticmethod def get_trait_types(event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event """ raise ceilometer.NotImplementedError('Events not implemented.') @staticmethod def get_traits(event_type, trait_type=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_type: the name of the Trait to filter by """ raise ceilometer.NotImplementedError('Events not implemented.') @classmethod def get_capabilities(cls): """Return an dictionary with the capabilities of each driver.""" return cls.CAPABILITIES @classmethod def get_storage_capabilities(cls): """Return a dictionary representing the performance capabilities. This is needed to evaluate the performance of each driver. """ return cls.STORAGE_CAPABILITIES @staticmethod def clear_expired_event_data(ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ raise ceilometer.NotImplementedError('Clearing events not implemented') ceilometer-6.0.0/ceilometer/event/storage/impl_db2.py0000664000567000056710000000547712701406223023777 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """DB2 storage backend """ import pymongo from ceilometer.event.storage import pymongo_base from ceilometer import storage from ceilometer.storage.mongo import utils as pymongo_utils class Connection(pymongo_base.Connection): """The db2 event storage for Ceilometer.""" CONNECTION_POOL = pymongo_utils.ConnectionPool() def __init__(self, url): # Since we are using pymongo, even though we are connecting to DB2 # we still have to make sure that the scheme which used to distinguish # db2 driver from mongodb driver be replaced so that pymongo will not # produce an exception on the scheme. url = url.replace('db2:', 'mongodb:', 1) self.conn = self.CONNECTION_POOL.connect(url) # Require MongoDB 2.2 to use aggregate(), since we are using mongodb # as backend for test, the following code is necessary to make sure # that the test wont try aggregate on older mongodb during the test. # For db2, the versionArray won't be part of the server_info, so there # will not be exception when real db2 gets used as backend. server_info = self.conn.server_info() if server_info.get('sysInfo'): self._using_mongodb = True else: self._using_mongodb = False if self._using_mongodb and server_info.get('versionArray') < [2, 2]: raise storage.StorageBadVersion("Need at least MongoDB 2.2") connection_options = pymongo.uri_parser.parse_uri(url) self.db = getattr(self.conn, connection_options['database']) if connection_options.get('username'): self.db.authenticate(connection_options['username'], connection_options['password']) self.upgrade() def upgrade(self): # create collection if not present if 'event' not in self.db.conn.collection_names(): self.db.conn.create_collection('event') def clear(self): # drop_database command does nothing on db2 database since this has # not been implemented. However calling this method is important for # removal of all the empty dbs created during the test runs since # test run is against mongodb on Jenkins self.conn.drop_database(self.db.name) self.conn.close() ceilometer-6.0.0/ceilometer/event/storage/impl_hbase.py0000664000567000056710000002114412701406223024377 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_log import log from ceilometer.event.storage import base from ceilometer.event.storage import models from ceilometer.i18n import _LE from ceilometer.storage.hbase import base as hbase_base from ceilometer.storage.hbase import utils as hbase_utils from ceilometer import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(hbase_base.Connection, base.Connection): """Put the event data into a HBase database Collections: - events: - row_key: timestamp of event's generation + uuid of event in format: "%s:%s" % (ts, Event.message_id) - Column Families: f: contains the following qualifiers: - event_type: description of event's type - timestamp: time stamp of event generation - all traits for this event in format: .. code-block:: python "%s:%s" % (trait_name, trait_type) """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) _memory_instance = None EVENT_TABLE = "event" def __init__(self, url): super(Connection, self).__init__(url) def upgrade(self): tables = [self.EVENT_TABLE] column_families = {'f': dict(max_versions=1)} with self.conn_pool.connection() as conn: hbase_utils.create_tables(conn, tables, column_families) def clear(self): LOG.debug('Dropping HBase schema...') with self.conn_pool.connection() as conn: for table in [self.EVENT_TABLE]: try: conn.disable_table(table) except Exception: LOG.debug('Cannot disable table but ignoring error') try: conn.delete_table(table) except Exception: LOG.debug('Cannot delete table but ignoring error') def record_events(self, event_models): """Write the events to Hbase. :param event_models: a list of models.Event objects. """ error = None with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) for event_model in event_models: # Row key consists of timestamp and message_id from # models.Event or purposes of storage event sorted by # timestamp in the database. ts = event_model.generated row = hbase_utils.prepare_key( hbase_utils.timestamp(ts, reverse=False), event_model.message_id) event_type = event_model.event_type traits = {} if event_model.traits: for trait in event_model.traits: key = hbase_utils.prepare_key(trait.name, trait.dtype) traits[key] = trait.value record = hbase_utils.serialize_entry(traits, event_type=event_type, timestamp=ts, raw=event_model.raw) try: events_table.put(row, record) except Exception as ex: LOG.exception(_LE("Failed to record event: %s") % ex) error = ex if error: raise error def get_events(self, event_filter, limit=None): """Return an iter of models.Event objects. :param event_filter: storage.EventFilter object, consists of filters for events that are stored in database. """ if limit == 0: return q, start, stop = hbase_utils.make_events_query_from_filter( event_filter) with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan(filter=q, row_start=start, row_stop=stop, limit=limit) for event_id, data in gen: traits = [] events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if isinstance(key, tuple): trait_name, trait_dtype = key traits.append(models.Trait(name=trait_name, dtype=int(trait_dtype), value=value)) ts, mess = event_id.split(':') yield models.Event( message_id=hbase_utils.unquote(mess), event_type=events_dict['event_type'], generated=events_dict['timestamp'], traits=sorted(traits, key=operator.attrgetter('dtype')), raw=events_dict['raw'] ) def get_event_types(self): """Return all event types as an iterable of strings.""" with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan() event_types = set() for event_id, data in gen: events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if not isinstance(key, tuple) and key.startswith('event_type'): if value not in event_types: event_types.add(value) yield value def get_trait_types(self, event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event """ q = hbase_utils.make_query(event_type=event_type) trait_names = set() with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan(filter=q) for event_id, data in gen: events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if isinstance(key, tuple): trait_name, trait_type = key if trait_name not in trait_names: # Here we check that our method return only unique # trait types, for ex. if it is found the same trait # types in different events with equal event_type, # method will return only one trait type. It is # proposed that certain trait name could have only one # trait type. trait_names.add(trait_name) data_type = models.Trait.type_names[int(trait_type)] yield {'name': trait_name, 'data_type': data_type} def get_traits(self, event_type, trait_type=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_type: the name of the Trait to filter by """ q = hbase_utils.make_query(event_type=event_type, trait_type=trait_type) with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan(filter=q) for event_id, data in gen: events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if isinstance(key, tuple): trait_name, trait_type = key yield models.Trait(name=trait_name, dtype=int(trait_type), value=value) ceilometer-6.0.0/ceilometer/event/storage/impl_mongodb.py0000664000567000056710000000640612701406223024746 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """MongoDB storage backend""" from oslo_config import cfg from oslo_log import log import pymongo from ceilometer.event.storage import pymongo_base from ceilometer import storage from ceilometer.storage import impl_mongodb from ceilometer.storage.mongo import utils as pymongo_utils LOG = log.getLogger(__name__) class Connection(pymongo_base.Connection): """Put the event data into a MongoDB database.""" CONNECTION_POOL = pymongo_utils.ConnectionPool() def __init__(self, url): # NOTE(jd) Use our own connection pooling on top of the Pymongo one. # We need that otherwise we overflow the MongoDB instance with new # connection since we instantiate a Pymongo client each time someone # requires a new storage connection. self.conn = self.CONNECTION_POOL.connect(url) # Require MongoDB 2.4 to use $setOnInsert if self.conn.server_info()['versionArray'] < [2, 4]: raise storage.StorageBadVersion("Need at least MongoDB 2.4") connection_options = pymongo.uri_parser.parse_uri(url) self.db = getattr(self.conn, connection_options['database']) if connection_options.get('username'): self.db.authenticate(connection_options['username'], connection_options['password']) # NOTE(jd) Upgrading is just about creating index, so let's do this # on connection to be sure at least the TTL is correctly updated if # needed. self.upgrade() def upgrade(self): # create collection if not present if 'event' not in self.db.conn.collection_names(): self.db.conn.create_collection('event') # Establish indexes # NOTE(idegtiarov): This indexes cover get_events, get_event_types, and # get_trait_types requests based on event_type and timestamp fields. self.db.event.create_index( [('event_type', pymongo.ASCENDING), ('timestamp', pymongo.ASCENDING)], name='event_type_idx' ) ttl = cfg.CONF.database.event_time_to_live impl_mongodb.Connection.update_ttl(ttl, 'event_ttl', 'timestamp', self.db.event) def clear(self): self.conn.drop_database(self.db.name) # Connection will be reopened automatically if needed self.conn.close() @staticmethod def clear_expired_event_data(ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ LOG.debug("Clearing expired event data is based on native " "MongoDB time to live feature and going in background.") ceilometer-6.0.0/ceilometer/event/storage/models.py0000664000567000056710000001015612701406223023560 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes for use in the events storage API. """ from oslo_utils import timeutils import six from ceilometer.storage import base def serialize_dt(value): """Serializes parameter if it is datetime.""" return value.isoformat() if hasattr(value, 'isoformat') else value class Event(base.Model): """A raw event from the source system. Events have Traits. Metrics will be derived from one or more Events. """ DUPLICATE = 1 UNKNOWN_PROBLEM = 2 INCOMPATIBLE_TRAIT = 3 def __init__(self, message_id, event_type, generated, traits, raw): """Create a new event. :param message_id: Unique ID for the message this event stemmed from. This is different than the Event ID, which comes from the underlying storage system. :param event_type: The type of the event. :param generated: UTC time for when the event occurred. :param traits: list of Traits on this Event. :param raw: Unindexed raw notification details. """ base.Model.__init__(self, message_id=message_id, event_type=event_type, generated=generated, traits=traits, raw=raw) def append_trait(self, trait_model): self.traits.append(trait_model) def __repr__(self): trait_list = [] if self.traits: trait_list = [six.text_type(trait) for trait in self.traits] return ("" % (self.message_id, self.event_type, self.generated, " ".join(trait_list))) def serialize(self): return {'message_id': self.message_id, 'event_type': self.event_type, 'generated': serialize_dt(self.generated), 'traits': [trait.serialize() for trait in self.traits], 'raw': self.raw} class Trait(base.Model): """A Trait is a key/value pair of data on an Event. The value is variant record of basic data types (int, date, float, etc). """ NONE_TYPE = 0 TEXT_TYPE = 1 INT_TYPE = 2 FLOAT_TYPE = 3 DATETIME_TYPE = 4 type_names = { NONE_TYPE: "none", TEXT_TYPE: "string", INT_TYPE: "integer", FLOAT_TYPE: "float", DATETIME_TYPE: "datetime" } def __init__(self, name, dtype, value): if not dtype: dtype = Trait.NONE_TYPE base.Model.__init__(self, name=name, dtype=dtype, value=value) def __repr__(self): return "" % (self.name, self.dtype, self.value) def serialize(self): return self.name, self.dtype, serialize_dt(self.value) def get_type_name(self): return self.get_name_by_type(self.dtype) @classmethod def get_type_by_name(cls, type_name): return getattr(cls, '%s_TYPE' % type_name.upper(), None) @classmethod def get_type_names(cls): return cls.type_names.values() @classmethod def get_name_by_type(cls, type_id): return cls.type_names.get(type_id, "none") @classmethod def convert_value(cls, trait_type, value): if trait_type is cls.INT_TYPE: return int(value) if trait_type is cls.FLOAT_TYPE: return float(value) if trait_type is cls.DATETIME_TYPE: return timeutils.normalize_time(timeutils.parse_isotime(value)) # Cropping the text value to match the TraitText value size if isinstance(value, six.binary_type): return value.decode('utf-8')[:255] return six.text_type(value)[:255] ceilometer-6.0.0/ceilometer/event/storage/pymongo_base.py0000664000567000056710000001353212701406223024760 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common functions for MongoDB and DB2 backends """ from oslo_log import log import pymongo from ceilometer.event.storage import base from ceilometer.event.storage import models from ceilometer.i18n import _LE, _LI from ceilometer.storage.mongo import utils as pymongo_utils from ceilometer import utils LOG = log.getLogger(__name__) COMMON_AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(base.Connection): """Base event Connection class for MongoDB and DB2 drivers.""" CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, COMMON_AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def record_events(self, event_models): """Write the events to database. :param event_models: a list of models.Event objects. """ error = None for event_model in event_models: traits = [] if event_model.traits: for trait in event_model.traits: traits.append({'trait_name': trait.name, 'trait_type': trait.dtype, 'trait_value': trait.value}) try: self.db.event.insert_one( {'_id': event_model.message_id, 'event_type': event_model.event_type, 'timestamp': event_model.generated, 'traits': traits, 'raw': event_model.raw}) except pymongo.errors.DuplicateKeyError as ex: LOG.info(_LI("Duplicate event detected, skipping it: %s") % ex) except Exception as ex: LOG.exception(_LE("Failed to record event: %s") % ex) error = ex if error: raise error def get_events(self, event_filter, limit=None): """Return an iter of models.Event objects. :param event_filter: storage.EventFilter object, consists of filters for events that are stored in database. :param limit: Maximum number of results to return. """ if limit == 0: return q = pymongo_utils.make_events_query_from_filter(event_filter) if limit is not None: results = self.db.event.find(q, limit=limit) else: results = self.db.event.find(q) for event in results: traits = [] for trait in event['traits']: traits.append(models.Trait(name=trait['trait_name'], dtype=int(trait['trait_type']), value=trait['trait_value'])) yield models.Event(message_id=event['_id'], event_type=event['event_type'], generated=event['timestamp'], traits=traits, raw=event.get('raw')) def get_event_types(self): """Return all event types as an iter of strings.""" return self.db.event.distinct('event_type') def get_trait_types(self, event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event. """ trait_names = set() events = self.db.event.find({'event_type': event_type}) for event in events: for trait in event['traits']: trait_name = trait['trait_name'] if trait_name not in trait_names: # Here we check that our method return only unique # trait types. Method will return only one trait type. It # is proposed that certain trait name could have only one # trait type. trait_names.add(trait_name) yield {'name': trait_name, 'data_type': trait['trait_type']} def get_traits(self, event_type, trait_name=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_name: the name of the Trait to filter by """ if not trait_name: events = self.db.event.find({'event_type': event_type}) else: # We choose events that simultaneously have event_type and certain # trait_name, and retrieve events contains only mentioned traits. events = self.db.event.find({'$and': [{'event_type': event_type}, {'traits.trait_name': trait_name}]}, {'traits': {'$elemMatch': {'trait_name': trait_name}} }) for event in events: for trait in event['traits']: yield models.Trait(name=trait['trait_name'], dtype=trait['trait_type'], value=trait['trait_value']) ceilometer-6.0.0/ceilometer/event/endpoint.py0000664000567000056710000000527212701406224022455 0ustar jenkinsjenkins00000000000000# Copyright 2012-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_config import cfg from oslo_context import context import oslo_messaging from stevedore import extension from ceilometer.event import converter as event_converter from ceilometer.i18n import _LE from ceilometer import messaging LOG = logging.getLogger(__name__) class EventsNotificationEndpoint(object): def __init__(self, manager): super(EventsNotificationEndpoint, self).__init__() LOG.debug('Loading event definitions') self.ctxt = context.get_admin_context() self.event_converter = event_converter.setup_events( extension.ExtensionManager( namespace='ceilometer.event.trait_plugin')) self.manager = manager def info(self, notifications): """Convert message at info level to Ceilometer Event. :param notifications: list of notifications """ return self.process_notification('info', notifications) def error(self, notifications): """Convert message at error level to Ceilometer Event. :param notifications: list of notifications """ return self.process_notification('error', notifications) def process_notification(self, priority, notifications): for notification in notifications: # NOTE: the rpc layer currently rips out the notification # delivery_info, which is critical to determining the # source of the notification. This will have to get added back # later. notification = messaging.convert_to_old_notification_format( priority, notification) try: event = self.event_converter.to_event(notification) if event is not None: with self.manager.publisher(self.ctxt) as p: p(event) except Exception: if not cfg.CONF.notification.ack_on_event_error: return oslo_messaging.NotificationResult.REQUEUE LOG.error(_LE('Fail to process a notification'), exc_info=True) return oslo_messaging.NotificationResult.HANDLED ceilometer-6.0.0/ceilometer/__init__.py0000664000567000056710000000146112701406223021246 0ustar jenkinsjenkins00000000000000# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class NotImplementedError(NotImplementedError): # FIXME(jd) This is used by WSME to return a correct HTTP code. We should # not expose it here but wrap our methods in the API to convert it to a # proper HTTP error. code = 501 ceilometer-6.0.0/ceilometer/collector.py0000664000567000056710000001526112701406223021500 0ustar jenkinsjenkins00000000000000# # Copyright 2012-2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from itertools import chain import socket import msgpack from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_service import service as os_service from oslo_utils import netutils from oslo_utils import units from ceilometer import dispatcher from ceilometer.i18n import _, _LE from ceilometer import messaging from ceilometer import utils OPTS = [ cfg.StrOpt('udp_address', default='0.0.0.0', help='Address to which the UDP socket is bound. Set to ' 'an empty string to disable.'), cfg.PortOpt('udp_port', default=4952, help='Port to which the UDP socket is bound.'), cfg.IntOpt('batch_size', default=1, help='Number of notification messages to wait before ' 'dispatching them'), cfg.IntOpt('batch_timeout', default=None, help='Number of seconds to wait before dispatching samples' 'when batch_size is not reached (None means indefinitely)'), ] cfg.CONF.register_opts(OPTS, group="collector") cfg.CONF.import_opt('metering_topic', 'ceilometer.publisher.messaging', group='publisher_notifier') cfg.CONF.import_opt('event_topic', 'ceilometer.publisher.messaging', group='publisher_notifier') cfg.CONF.import_opt('store_events', 'ceilometer.notification', group='notification') LOG = log.getLogger(__name__) class CollectorService(os_service.Service): """Listener for the collector service.""" def start(self): """Bind the UDP socket and handle incoming data.""" # ensure dispatcher is configured before starting other services dispatcher_managers = dispatcher.load_dispatcher_manager() (self.meter_manager, self.event_manager) = dispatcher_managers self.sample_listener = None self.event_listener = None super(CollectorService, self).start() if cfg.CONF.collector.udp_address: self.tg.add_thread(self.start_udp) transport = messaging.get_transport(optional=True) if transport: if list(self.meter_manager): sample_target = oslo_messaging.Target( topic=cfg.CONF.publisher_notifier.metering_topic) self.sample_listener = ( messaging.get_batch_notification_listener( transport, [sample_target], [SampleEndpoint(self.meter_manager)], allow_requeue=True, batch_size=cfg.CONF.collector.batch_size, batch_timeout=cfg.CONF.collector.batch_timeout)) self.sample_listener.start() if cfg.CONF.notification.store_events and list(self.event_manager): event_target = oslo_messaging.Target( topic=cfg.CONF.publisher_notifier.event_topic) self.event_listener = ( messaging.get_batch_notification_listener( transport, [event_target], [EventEndpoint(self.event_manager)], allow_requeue=True, batch_size=cfg.CONF.collector.batch_size, batch_timeout=cfg.CONF.collector.batch_timeout)) self.event_listener.start() if not cfg.CONF.collector.udp_address: # Add a dummy thread to have wait() working self.tg.add_timer(604800, lambda: None) def start_udp(self): address_family = socket.AF_INET if netutils.is_valid_ipv6(cfg.CONF.collector.udp_address): address_family = socket.AF_INET6 udp = socket.socket(address_family, socket.SOCK_DGRAM) udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) udp.bind((cfg.CONF.collector.udp_address, cfg.CONF.collector.udp_port)) self.udp_run = True while self.udp_run: # NOTE(jd) Arbitrary limit of 64K because that ought to be # enough for anybody. data, source = udp.recvfrom(64 * units.Ki) try: sample = msgpack.loads(data, encoding='utf-8') except Exception: LOG.warning(_("UDP: Cannot decode data sent by %s"), source) else: try: LOG.debug("UDP: Storing %s", sample) self.meter_manager.map_method('record_metering_data', sample) except Exception: LOG.exception(_("UDP: Unable to store meter")) def stop(self): self.udp_run = False if self.sample_listener: utils.kill_listeners([self.sample_listener]) if self.event_listener: utils.kill_listeners([self.event_listener]) super(CollectorService, self).stop() def record_metering_data(self, context, data): """RPC endpoint for messages we send to ourselves. When the notification messages are re-published through the RPC publisher, this method receives them for processing. """ self.meter_manager.map_method('record_metering_data', data=data) class CollectorEndpoint(object): def __init__(self, dispatcher_manager): self.dispatcher_manager = dispatcher_manager def sample(self, messages): """RPC endpoint for notification messages When another service sends a notification over the message bus, this method receives it. """ samples = list(chain.from_iterable(m["payload"] for m in messages)) try: self.dispatcher_manager.map_method(self.method, samples) except Exception: LOG.exception(_LE("Dispatcher failed to handle the %s, " "requeue it."), self.ep_type) return oslo_messaging.NotificationResult.REQUEUE class SampleEndpoint(CollectorEndpoint): method = 'record_metering_data' ep_type = 'sample' class EventEndpoint(CollectorEndpoint): method = 'record_events' ep_type = 'event' ceilometer-6.0.0/ceilometer/cmd/0000775000567000056710000000000012701406364017704 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/cmd/__init__.py0000664000567000056710000000000012701406223021775 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/cmd/collector.py0000664000567000056710000000164612701406223022245 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_service import service as os_service from ceilometer import collector from ceilometer import service CONF = cfg.CONF def main(): service.prepare_service() os_service.launch(CONF, collector.CollectorService(), workers=CONF.collector.workers).wait() ceilometer-6.0.0/ceilometer/cmd/polling.py0000664000567000056710000000572412701406223021724 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014-2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_service import service as os_service from ceilometer.agent import manager from ceilometer.i18n import _LW from ceilometer import service LOG = log.getLogger(__name__) CONF = cfg.CONF class MultiChoicesOpt(cfg.Opt): def __init__(self, name, choices=None, **kwargs): super(MultiChoicesOpt, self).__init__( name, type=DeduplicatedCfgList(choices), **kwargs) self.choices = choices def _get_argparse_kwargs(self, group, **kwargs): """Extends the base argparse keyword dict for multi choices options.""" kwargs = super(MultiChoicesOpt, self)._get_argparse_kwargs(group) kwargs['nargs'] = '+' choices = kwargs.get('choices', self.choices) if choices: kwargs['choices'] = choices return kwargs class DeduplicatedCfgList(cfg.types.List): def __init__(self, choices=None, **kwargs): super(DeduplicatedCfgList, self).__init__(**kwargs) self.choices = choices or [] def __call__(self, *args, **kwargs): result = super(DeduplicatedCfgList, self).__call__(*args, **kwargs) result_set = set(result) if len(result) != len(result_set): LOG.warning(_LW("Duplicated values: %s found in CLI options, " "auto de-duplicated"), result) result = list(result_set) if self.choices and not (result_set <= set(self.choices)): raise Exception('Valid values are %s, but found %s' % (self.choices, result)) return result CLI_OPTS = [ MultiChoicesOpt('polling-namespaces', default=['compute', 'central'], choices=['compute', 'central', 'ipmi'], dest='polling_namespaces', help='Polling namespace(s) to be used while ' 'resource polling'), MultiChoicesOpt('pollster-list', default=[], dest='pollster_list', help='List of pollsters (or wildcard templates) to be ' 'used while polling'), ] CONF.register_cli_opts(CLI_OPTS) def main(): service.prepare_service() os_service.launch(CONF, manager.AgentManager(CONF.polling_namespaces, CONF.pollster_list)).wait() ceilometer-6.0.0/ceilometer/cmd/sample.py0000664000567000056710000000625412701406224021541 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Copyright 2012-2014 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for creating meter for Ceilometer. """ import logging import sys from oslo_config import cfg from oslo_context import context from oslo_utils import timeutils from stevedore import extension from ceilometer import pipeline from ceilometer import sample from ceilometer import service def send_sample(): cfg.CONF.register_cli_opts([ cfg.StrOpt('sample-name', short='n', help='Meter name.', required=True), cfg.StrOpt('sample-type', short='y', help='Meter type (gauge, delta, cumulative).', default='gauge', required=True), cfg.StrOpt('sample-unit', short='U', help='Meter unit.'), cfg.IntOpt('sample-volume', short='l', help='Meter volume value.', default=1), cfg.StrOpt('sample-resource', short='r', help='Meter resource id.', required=True), cfg.StrOpt('sample-user', short='u', help='Meter user id.'), cfg.StrOpt('sample-project', short='p', help='Meter project id.'), cfg.StrOpt('sample-timestamp', short='i', help='Meter timestamp.', default=timeutils.utcnow().isoformat()), cfg.StrOpt('sample-metadata', short='m', help='Meter metadata.'), ]) service.prepare_service() # Set up logging to use the console console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) pipeline_manager = pipeline.setup_pipeline( extension.ExtensionManager('ceilometer.transformer')) with pipeline_manager.publisher(context.get_admin_context()) as p: p([sample.Sample( name=cfg.CONF.sample_name, type=cfg.CONF.sample_type, unit=cfg.CONF.sample_unit, volume=cfg.CONF.sample_volume, user_id=cfg.CONF.sample_user, project_id=cfg.CONF.sample_project, resource_id=cfg.CONF.sample_resource, timestamp=cfg.CONF.sample_timestamp, resource_metadata=cfg.CONF.sample_metadata and eval( cfg.CONF.sample_metadata))]) ceilometer-6.0.0/ceilometer/cmd/agent_notification.py0000664000567000056710000000166212701406223024121 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_service import service as os_service from ceilometer import notification from ceilometer import service CONF = cfg.CONF def main(): service.prepare_service() os_service.launch(CONF, notification.NotificationService(), workers=CONF.notification.workers).wait() ceilometer-6.0.0/ceilometer/cmd/api.py0000664000567000056710000000134712701406223021026 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.api import app from ceilometer import service def main(): service.prepare_service() app.build_server() ceilometer-6.0.0/ceilometer/cmd/storage.py0000664000567000056710000000342712701406223021722 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_config import cfg from ceilometer.i18n import _LI from ceilometer import service from ceilometer import storage LOG = logging.getLogger(__name__) def dbsync(): service.prepare_service() storage.get_connection_from_config(cfg.CONF, 'metering').upgrade() storage.get_connection_from_config(cfg.CONF, 'event').upgrade() def expirer(): service.prepare_service() if cfg.CONF.database.metering_time_to_live > 0: LOG.debug("Clearing expired metering data") storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering') storage_conn.clear_expired_metering_data( cfg.CONF.database.metering_time_to_live) else: LOG.info(_LI("Nothing to clean, database metering time to live " "is disabled")) if cfg.CONF.database.event_time_to_live > 0: LOG.debug("Clearing expired event data") event_conn = storage.get_connection_from_config(cfg.CONF, 'event') event_conn.clear_expired_event_data( cfg.CONF.database.event_time_to_live) else: LOG.info(_LI("Nothing to clean, database event time to live " "is disabled")) ceilometer-6.0.0/ceilometer/conf/0000775000567000056710000000000012701406364020066 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/conf/__init__.py0000664000567000056710000000000012701406223022157 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/conf/defaults.py0000664000567000056710000000335012701406223022242 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_middleware import cors def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" # CORS Defaults # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ cfg.set_defaults(cors.CORS_OPTS, allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-Openstack-Request-Id'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-Openstack-Request-Id'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ceilometer-6.0.0/ceilometer/declarative.py0000664000567000056710000001451712701406223022000 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from jsonpath_rw_ext import parser from oslo_config import cfg from oslo_log import log import six import yaml from ceilometer.i18n import _, _LI LOG = log.getLogger(__name__) class DefinitionException(Exception): def __init__(self, message, definition_cfg): super(DefinitionException, self).__init__(message) self.definition_cfg = definition_cfg class Definition(object): JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser() GETTERS_CACHE = {} def __init__(self, name, cfg, plugin_manager): self.cfg = cfg self.name = name self.plugin = None if isinstance(cfg, dict): if 'fields' not in cfg: raise DefinitionException( _("The field 'fields' is required for %s") % name, self.cfg) if 'plugin' in cfg: plugin_cfg = cfg['plugin'] if isinstance(plugin_cfg, six.string_types): plugin_name = plugin_cfg plugin_params = {} else: try: plugin_name = plugin_cfg['name'] except KeyError: raise DefinitionException( _('Plugin specified, but no plugin name supplied ' 'for %s') % name, self.cfg) plugin_params = plugin_cfg.get('parameters') if plugin_params is None: plugin_params = {} try: plugin_ext = plugin_manager[plugin_name] except KeyError: raise DefinitionException( _('No plugin named %(plugin)s available for ' '%(name)s') % dict( plugin=plugin_name, name=name), self.cfg) plugin_class = plugin_ext.plugin self.plugin = plugin_class(**plugin_params) fields = cfg['fields'] else: # Simple definition "foobar: jsonpath" fields = cfg if isinstance(fields, list): # NOTE(mdragon): if not a string, we assume a list. if len(fields) == 1: fields = fields[0] else: fields = '|'.join('(%s)' % path for path in fields) if isinstance(fields, six.integer_types): self.getter = fields else: try: self.getter = self.make_getter(fields) except Exception as e: raise DefinitionException( _("Parse error in JSONPath specification " "'%(jsonpath)s' for %(name)s: %(err)s") % dict(jsonpath=fields, name=name, err=e), self.cfg) def _get_path(self, match): if match.context is not None: for path_element in self._get_path(match.context): yield path_element yield str(match.path) def parse(self, obj, return_all_values=False): if callable(self.getter): values = self.getter(obj) else: return self.getter values = [match for match in values if return_all_values or match.value is not None] if self.plugin is not None: if return_all_values and not self.plugin.support_return_all_values: raise DefinitionException("Plugin %s don't allows to " "return multiple values" % self.cfg["plugin"]["name"], self.cfg) values_map = [('.'.join(self._get_path(match)), match.value) for match in values] values = [v for v in self.plugin.trait_values(values_map) if v is not None] else: values = [match.value for match in values if match is not None] if return_all_values: return values else: return values[0] if values else None def make_getter(self, fields): if fields in self.GETTERS_CACHE: return self.GETTERS_CACHE[fields] else: getter = self.JSONPATH_RW_PARSER.parse(fields).find self.GETTERS_CACHE[fields] = getter return getter def load_definitions(defaults, config_file, fallback_file=None): """Setup a definitions from yaml config file.""" if not os.path.exists(config_file): config_file = cfg.CONF.find_file(config_file) if not config_file and fallback_file is not None: LOG.debug("No Definitions configuration file found!" "Using default config.") config_file = fallback_file if config_file is not None: LOG.debug("Loading definitions configuration file: %s", config_file) with open(config_file) as cf: config = cf.read() try: definition_cfg = yaml.safe_load(config) except yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark errmsg = (_("Invalid YAML syntax in Definitions file " "%(file)s at line: %(line)s, column: %(column)s.") % dict(file=config_file, line=mark.line + 1, column=mark.column + 1)) else: errmsg = (_("YAML error reading Definitions file " "%(file)s") % dict(file=config_file)) LOG.error(errmsg) raise else: LOG.debug("No Definitions configuration file found!" "Using default config.") definition_cfg = defaults LOG.info(_LI("Definitions: %s"), definition_cfg) return definition_cfg ceilometer-6.0.0/ceilometer/network/0000775000567000056710000000000012701406364020632 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/network/__init__.py0000664000567000056710000000000012701406223022723 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/network/services/0000775000567000056710000000000012701406364022455 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/network/services/fwaas.py0000664000567000056710000000577412701406223024137 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import timeutils from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import sample LOG = log.getLogger(__name__) class FirewallPollster(base.BaseServicesPollster): """Pollster to capture firewalls status samples.""" FIELDS = ['admin_state_up', 'description', 'name', 'status', 'firewall_policy_id', ] @property def default_discovery(self): return 'fw_services' def get_samples(self, manager, cache, resources): resources = resources or [] for fw in resources: LOG.debug("Firewall : %s" % fw) status = self.get_status_id(fw['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on fw %(id)s," "skipping sample") % {'stat': fw['status'], 'id': fw['id']}) continue yield sample.Sample( name='network.services.firewall', type=sample.TYPE_GAUGE, unit='firewall', volume=status, user_id=None, project_id=fw['tenant_id'], resource_id=fw['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(fw) ) class FirewallPolicyPollster(base.BaseServicesPollster): """Pollster to capture firewall policy samples.""" FIELDS = ['name', 'description', 'name', 'firewall_rules', 'shared', 'audited', ] @property def default_discovery(self): return 'fw_policy' def get_samples(self, manager, cache, resources): resources = resources or [] for fw in resources: LOG.debug("Firewall Policy: %s" % fw) yield sample.Sample( name='network.services.firewall.policy', type=sample.TYPE_GAUGE, unit='firewall_policy', volume=1, user_id=None, project_id=fw['tenant_id'], resource_id=fw['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(fw) ) ceilometer-6.0.0/ceilometer/network/services/lbaas.py0000664000567000056710000003623712701406223024116 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import neutron_client from ceilometer import sample LOG = log.getLogger(__name__) LBStatsData = collections.namedtuple( 'LBStats', ['active_connections', 'total_connections', 'bytes_in', 'bytes_out'] ) LOAD_BALANCER_STATUS_V2 = { 'offline': 0, 'online': 1, 'no_monitor': 3, 'error': 4, 'degraded': 5 } class BaseLBPollster(base.BaseServicesPollster): """Base Class for Load Balancer pollster""" def __init__(self): super(BaseLBPollster, self).__init__() self.lb_version = cfg.CONF.service_types.neutron_lbaas_version def get_load_balancer_status_id(self, value): if self.lb_version == 'v1': resource_status = self.get_status_id(value) elif self.lb_version == 'v2': status = value.lower() resource_status = LOAD_BALANCER_STATUS_V2.get(status, -1) return resource_status class LBPoolPollster(BaseLBPollster): """Pollster to capture Load Balancer pool status samples.""" FIELDS = ['admin_state_up', 'description', 'lb_method', 'name', 'protocol', 'provider', 'status', 'status_description', 'subnet_id', 'vip_id' ] @property def default_discovery(self): return 'lb_pools' def get_samples(self, manager, cache, resources): resources = resources or [] for pool in resources: LOG.debug("Load Balancer Pool : %s" % pool) status = self.get_load_balancer_status_id(pool['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on pool " "%(id)s, skipping sample") % {'stat': pool['status'], 'id': pool['id']}) continue yield sample.Sample( name='network.services.lb.pool', type=sample.TYPE_GAUGE, unit='pool', volume=status, user_id=None, project_id=pool['tenant_id'], resource_id=pool['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(pool) ) class LBVipPollster(base.BaseServicesPollster): """Pollster to capture Load Balancer Vip status samples.""" FIELDS = ['admin_state_up', 'address', 'connection_limit', 'description', 'name', 'pool_id', 'port_id', 'protocol', 'protocol_port', 'status', 'status_description', 'subnet_id', 'session_persistence', ] @property def default_discovery(self): return 'lb_vips' def get_samples(self, manager, cache, resources): resources = resources or [] for vip in resources: LOG.debug("Load Balancer Vip : %s" % vip) status = self.get_status_id(vip['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on vip " "%(id)s, skipping sample") % {'stat': vip['status'], 'id': vip['id']}) continue yield sample.Sample( name='network.services.lb.vip', type=sample.TYPE_GAUGE, unit='vip', volume=status, user_id=None, project_id=vip['tenant_id'], resource_id=vip['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(vip) ) class LBMemberPollster(BaseLBPollster): """Pollster to capture Load Balancer Member status samples.""" FIELDS = ['admin_state_up', 'address', 'pool_id', 'protocol_port', 'status', 'status_description', 'weight', ] @property def default_discovery(self): return 'lb_members' def get_samples(self, manager, cache, resources): resources = resources or [] for member in resources: LOG.debug("Load Balancer Member : %s" % member) status = self.get_load_balancer_status_id(member['status']) if status == -1: LOG.warning(_("Unknown status %(stat)s received on member " "%(id)s, skipping sample") % {'stat': member['status'], 'id': member['id']}) continue yield sample.Sample( name='network.services.lb.member', type=sample.TYPE_GAUGE, unit='member', volume=status, user_id=None, project_id=member['tenant_id'], resource_id=member['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(member) ) class LBHealthMonitorPollster(base.BaseServicesPollster): """Pollster to capture Load Balancer Health probes status samples.""" FIELDS = ['admin_state_up', 'delay', 'max_retries', 'pools', 'timeout', 'type' ] @property def default_discovery(self): return 'lb_health_probes' def get_samples(self, manager, cache, resources): for probe in resources: LOG.debug("Load Balancer Health probe : %s" % probe) yield sample.Sample( name='network.services.lb.health_monitor', type=sample.TYPE_GAUGE, unit='health_monitor', volume=1, user_id=None, project_id=probe['tenant_id'], resource_id=probe['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(probe) ) @six.add_metaclass(abc.ABCMeta) class _LBStatsPollster(base.BaseServicesPollster): """Base Statistics pollster. It is capturing the statistics info and yielding samples for connections and bandwidth. """ def __init__(self): super(_LBStatsPollster, self).__init__() self.client = neutron_client.Client() self.lb_version = cfg.CONF.service_types.neutron_lbaas_version @staticmethod def make_sample_from_pool(pool, name, type, unit, volume, resource_metadata=None): if not resource_metadata: resource_metadata = {} return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=None, project_id=pool['tenant_id'], resource_id=pool['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=resource_metadata, ) def _populate_stats_cache(self, pool_id, cache): i_cache = cache.setdefault("lbstats", {}) if pool_id not in i_cache: stats = self.client.pool_stats(pool_id)['stats'] i_cache[pool_id] = LBStatsData( active_connections=stats['active_connections'], total_connections=stats['total_connections'], bytes_in=stats['bytes_in'], bytes_out=stats['bytes_out'], ) return i_cache[pool_id] def _populate_stats_cache_v2(self, loadbalancer_id, cache): i_cache = cache.setdefault("lbstats", {}) if loadbalancer_id not in i_cache: stats = self.client.get_loadbalancer_stats(loadbalancer_id) i_cache[loadbalancer_id] = LBStatsData( active_connections=stats['active_connections'], total_connections=stats['total_connections'], bytes_in=stats['bytes_in'], bytes_out=stats['bytes_out'], ) return i_cache[loadbalancer_id] @property def default_discovery(self): discovery_resource = 'lb_pools' if self.lb_version == 'v2': discovery_resource = 'lb_loadbalancers' return discovery_resource @abc.abstractmethod def _get_sample(pool, c_data): """Return one Sample.""" def get_samples(self, manager, cache, resources): if self.lb_version == 'v1': for pool in resources: try: c_data = self._populate_stats_cache(pool['id'], cache) yield self._get_sample(pool, c_data) except Exception: LOG.exception(_('Ignoring pool %(pool_id)s'), {'pool_id': pool['id']}) elif self.lb_version == 'v2': for loadbalancer in resources: try: c_data = self._populate_stats_cache_v2(loadbalancer['id'], cache) yield self._get_sample(loadbalancer, c_data) except Exception: LOG.exception( _('Ignoring ' 'loadbalancer %(loadbalancer_id)s'), {'loadbalancer_id': loadbalancer['id']}) class LBActiveConnectionsPollster(_LBStatsPollster): """Pollster to capture Active Load Balancer connections.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.active.connections', type=sample.TYPE_GAUGE, unit='connection', volume=data.active_connections, ) class LBTotalConnectionsPollster(_LBStatsPollster): """Pollster to capture Total Load Balancer connections.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.total.connections', type=sample.TYPE_CUMULATIVE, unit='connection', volume=data.total_connections, ) class LBBytesInPollster(_LBStatsPollster): """Pollster to capture incoming bytes.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.incoming.bytes', type=sample.TYPE_GAUGE, unit='B', volume=data.bytes_in, ) class LBBytesOutPollster(_LBStatsPollster): """Pollster to capture outgoing bytes.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.outgoing.bytes', type=sample.TYPE_GAUGE, unit='B', volume=data.bytes_out, ) def make_sample_from_pool(pool, name, type, unit, volume, resource_metadata=None): resource_metadata = resource_metadata or {} return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=None, project_id=pool['tenant_id'], resource_id=pool['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=resource_metadata, ) class LBListenerPollster(BaseLBPollster): """Pollster to capture Load Balancer Listener status samples.""" FIELDS = ['admin_state_up', 'connection_limit', 'description', 'name', 'default_pool_id', 'protocol', 'protocol_port', 'operating_status', 'loadbalancers' ] @property def default_discovery(self): return 'lb_listeners' def get_samples(self, manager, cache, resources): resources = resources or [] for listener in resources: LOG.debug("Load Balancer Listener : %s" % listener) status = self.get_load_balancer_status_id( listener['operating_status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on listener " "%(id)s, skipping sample") % {'stat': listener['operating_status'], 'id': listener['id']}) continue yield sample.Sample( name='network.services.lb.listener', type=sample.TYPE_GAUGE, unit='listener', volume=status, user_id=None, project_id=listener['tenant_id'], resource_id=listener['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(listener) ) class LBLoadBalancerPollster(BaseLBPollster): """Pollster to capture Load Balancer status samples.""" FIELDS = ['admin_state_up', 'description', 'vip_address', 'listeners', 'name', 'vip_subnet_id', 'operating_status', ] @property def default_discovery(self): return 'lb_loadbalancers' def get_samples(self, manager, cache, resources): resources = resources or [] for loadbalancer in resources: LOG.debug("Load Balancer: %s" % loadbalancer) status = self.get_load_balancer_status_id( loadbalancer['operating_status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received " "on Load Balancer " "%(id)s, skipping sample") % {'stat': loadbalancer['operating_status'], 'id': loadbalancer['id']}) continue yield sample.Sample( name='network.services.lb.loadbalancer', type=sample.TYPE_GAUGE, unit='loadbalancer', volume=status, user_id=None, project_id=loadbalancer['tenant_id'], resource_id=loadbalancer['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(loadbalancer) ) ceilometer-6.0.0/ceilometer/network/services/vpnaas.py0000664000567000056710000000640312701406223024314 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import timeutils from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import sample LOG = log.getLogger(__name__) class VPNServicesPollster(base.BaseServicesPollster): """Pollster to capture VPN status samples.""" FIELDS = ['admin_state_up', 'description', 'name', 'status', 'subnet_id', 'router_id' ] @property def default_discovery(self): return 'vpn_services' def get_samples(self, manager, cache, resources): resources = resources or [] for vpn in resources: LOG.debug("VPN : %s" % vpn) status = self.get_status_id(vpn['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on vpn " "%(id)s, skipping sample") % {'stat': vpn['status'], 'id': vpn['id']}) continue yield sample.Sample( name='network.services.vpn', type=sample.TYPE_GAUGE, unit='vpnservice', volume=status, user_id=None, project_id=vpn['tenant_id'], resource_id=vpn['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(vpn) ) class IPSecConnectionsPollster(base.BaseServicesPollster): """Pollster to capture vpn ipsec connections status samples.""" FIELDS = ['name', 'description', 'peer_address', 'peer_id', 'peer_cidrs', 'psk', 'initiator', 'ikepolicy_id', 'dpd', 'ipsecpolicy_id', 'vpnservice_id', 'mtu', 'admin_state_up', 'tenant_id' ] @property def default_discovery(self): return 'ipsec_connections' def get_samples(self, manager, cache, resources): resources = resources or [] for conn in resources: LOG.debug("IPSec Connection Info: %s" % conn) yield sample.Sample( name='network.services.vpn.connections', type=sample.TYPE_GAUGE, unit='ipsec_site_connection', volume=1, user_id=None, project_id=conn['tenant_id'], resource_id=conn['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_metadata(conn) ) ceilometer-6.0.0/ceilometer/network/services/__init__.py0000664000567000056710000000000012701406223024546 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/network/services/base.py0000664000567000056710000000232012701406223023730 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.agent import plugin_base # status map for converting metric status to volume int STATUS = { 'inactive': 0, 'active': 1, 'pending_create': 2, } class BaseServicesPollster(plugin_base.PollsterBase): FIELDS = [] @staticmethod def _iter_cache(cache, meter_name, method): if meter_name not in cache: cache[meter_name] = list(method()) return iter(cache[meter_name]) def extract_metadata(self, metric): return dict((k, metric[k]) for k in self.FIELDS) @staticmethod def get_status_id(value): status = value.lower() return STATUS.get(status, -1) ceilometer-6.0.0/ceilometer/network/services/discovery.py0000664000567000056710000000703112701406223025031 0ustar jenkinsjenkins00000000000000# # Copyright (c) 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.agent import plugin_base from ceilometer import neutron_client class _BaseServicesDiscovery(plugin_base.DiscoveryBase): KEYSTONE_REQUIRED_FOR_SERVICE = 'neutron' def __init__(self): super(_BaseServicesDiscovery, self).__init__() self.neutron_cli = neutron_client.Client() class LBPoolsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" pools = self.neutron_cli.pool_get_all() return [i for i in pools if i.get('status') != 'error'] class LBVipsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" vips = self.neutron_cli.vip_get_all() return [i for i in vips if i.get('status', None) != 'error'] class LBMembersDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" members = self.neutron_cli.member_get_all() return [i for i in members if i.get('status', None) != 'error'] class LBListenersDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover load balancer listener resources to monitor.""" listeners = self.neutron_cli.list_listener() return [i for i in listeners if i.get('operating_status', None) != 'error'] class LBLoadBalancersDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover load balancer resources to monitor.""" loadbalancers = self.neutron_cli.list_loadbalancer() return [i for i in loadbalancers if i.get('operating_status', None) != 'error'] class LBHealthMonitorsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" probes = self.neutron_cli.health_monitor_get_all() return probes class VPNServicesDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" vpnservices = self.neutron_cli.vpn_get_all() return [i for i in vpnservices if i.get('status', None) != 'error'] class IPSecConnectionsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" conns = self.neutron_cli.ipsec_site_connections_get_all() return conns class FirewallDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" fw = self.neutron_cli.firewall_get_all() return [i for i in fw if i.get('status', None) != 'error'] class FirewallPolicyDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" return self.neutron_cli.fw_policy_get_all() ceilometer-6.0.0/ceilometer/network/floatingip.py0000664000567000056710000000523312701406223023335 0ustar jenkinsjenkins00000000000000# Copyright 2016 Sungard Availability Services # Copyright 2016 Red Hat # Copyright 2012 eNovance # Copyright 2013 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ceilometer.agent import plugin_base from ceilometer.i18n import _LW from ceilometer import neutron_client from ceilometer import sample LOG = log.getLogger(__name__) cfg.CONF.import_group('service_types', 'ceilometer.neutron_client') class FloatingIPPollster(plugin_base.PollsterBase): STATUS = { 'inactive': 0, 'active': 1, 'pending_create': 2, } def __init__(self): self.neutron_cli = neutron_client.Client() @property def default_discovery(self): return 'endpoint:%s' % cfg.CONF.service_types.neutron @staticmethod def _form_metadata_for_fip(fip): """Return a metadata dictionary for the fip usage data.""" metadata = { 'router_id': fip.get("router_id"), 'status': fip.get("status"), 'floating_network_id': fip.get("floating_network_id"), 'fixed_ip_address': fip.get("fixed_ip_address"), 'port_id': fip.get("port_id"), 'floating_ip_address': fip.get("floating_ip_address") } return metadata def get_samples(self, manager, cache, resources): for fip in self.neutron_cli.fip_get_all(): status = self.STATUS.get(fip['status'].lower()) if status is None: LOG.warning(_LW("Invalid status, skipping IP address %s") % fip['floating_ip_address']) continue res_metadata = self._form_metadata_for_fip(fip) yield sample.Sample( name='ip.floating', type=sample.TYPE_GAUGE, unit='ip', volume=status, user_id=fip.get('user_id'), project_id=fip['tenant_id'], resource_id=fip['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=res_metadata ) ceilometer-6.0.0/ceilometer/network/notifications.py0000664000567000056710000002124612701406223024054 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handler for producing network counter messages from Neutron notification events. """ from oslo_config import cfg import oslo_messaging from ceilometer.agent import plugin_base from ceilometer import sample OPTS = [ cfg.StrOpt('neutron_control_exchange', default='neutron', help="Exchange name for Neutron notifications."), ] cfg.CONF.register_opts(OPTS) class NetworkNotificationBase(plugin_base.NotificationBase): resource_name = None @property def event_types(self): return [ # NOTE(flwang): When the *.create.start notification sending, # there is no resource id assigned by Neutron yet. So we ignore # the *.create.start notification for now and only listen the # *.create.end to make sure the resource id is existed. '%s.create.end' % self.resource_name, '%s.update.*' % self.resource_name, '%s.exists' % self.resource_name, # FIXME(dhellmann): Neutron delete notifications do # not include the same metadata as the other messages, # so we ignore them for now. This isn't ideal, since # it may mean we miss charging for some amount of time, # but it is better than throwing away the existing # metadata for a resource when it is deleted. # '%s.delete.start' % (self.resource_name), ] def get_targets(self, conf): """Return a sequence of oslo_messaging.Target This sequence is defining the exchange and topics to be connected for this plugin. """ return [oslo_messaging.Target(topic=topic, exchange=conf.neutron_control_exchange) for topic in self.get_notification_topics(conf)] def process_notification(self, message): counter_name = getattr(self, 'counter_name', self.resource_name) unit_value = getattr(self, 'unit', self.resource_name) resource = message['payload'].get(self.resource_name) if resource: # NOTE(liusheng): In %s.update.start notifications, the id is in # message['payload'] instead of resource itself. if message['event_type'].endswith('update.start'): resource['id'] = message['payload']['id'] resources = [resource] else: resources = message['payload'].get(self.resource_name + 's', []) resource_message = message.copy() for resource in resources: resource_message['payload'] = resource yield sample.Sample.from_notification( name=counter_name, type=sample.TYPE_GAUGE, unit=unit_value, volume=1, user_id=resource_message['_context_user_id'], project_id=resource_message['_context_tenant_id'], resource_id=resource['id'], message=resource_message) event_type_split = resource_message['event_type'].split('.') if len(event_type_split) > 2: yield sample.Sample.from_notification( name=counter_name + "." + event_type_split[1], type=sample.TYPE_DELTA, unit=unit_value, volume=1, user_id=resource_message['_context_user_id'], project_id=resource_message['_context_tenant_id'], resource_id=resource['id'], message=resource_message) class Network(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron network notifications. Handle network.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'network' class Subnet(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle subnet.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'subnet' class Port(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle port.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'port' class Router(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle router.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'router' class FloatingIP(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle floatingip.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'floatingip' counter_name = 'ip.floating' unit = 'ip' class Pool(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle pool.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'pool' counter_name = 'network.services.lb.pool' class Vip(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle vip.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'vip' counter_name = 'network.services.lb.vip' class Member(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle member.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'member' counter_name = 'network.services.lb.member' class HealthMonitor(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle health_monitor.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'health_monitor' counter_name = 'network.services.lb.health_monitor' class Firewall(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle firewall.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'firewall' counter_name = 'network.services.firewall' class FirewallPolicy(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle firewall_policy.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'firewall_policy' counter_name = 'network.services.firewall.policy' class FirewallRule(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle firewall_rule.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'firewall_rule' counter_name = 'network.services.firewall.rule' class VPNService(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle vpnservice.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'vpnservice' counter_name = 'network.services.vpn' class IPSecPolicy(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle pool.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'ipsecpolicy' counter_name = 'network.services.vpn.ipsecpolicy' class IKEPolicy(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle ikepolicy.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'ikepolicy' counter_name = 'network.services.vpn.ikepolicy' class IPSecSiteConnection(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): """Listen for Neutron notifications. Handle ipsec_site_connection.{create.end|update.*|exists} notifications from neutron. """ resource_name = 'ipsec_site_connection' counter_name = 'network.services.vpn.connections' ceilometer-6.0.0/ceilometer/network/statistics/0000775000567000056710000000000012701406364023024 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/network/statistics/opencontrail/0000775000567000056710000000000012701406364025521 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/network/statistics/opencontrail/__init__.py0000664000567000056710000000000012701406223027612 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/network/statistics/opencontrail/driver.py0000664000567000056710000001557512701406223027375 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_utils import timeutils from six.moves.urllib import parse as urlparse from ceilometer.network.statistics import driver from ceilometer.network.statistics.opencontrail import client from ceilometer import neutron_client class OpencontrailDriver(driver.Driver): """Driver of network analytics of Opencontrail. This driver uses resources in "pipeline.yaml". Resource requires below conditions: * resource is url * scheme is "opencontrail" This driver can be configured via query parameters. Supported parameters: * scheme: The scheme of request url to Opencontrail Analytics endpoint. (default "http") * virtual_network Specify the virtual network. (default None) * fqdn_uuid: Specify the VM fqdn UUID. (default "*") * resource: The resource on which the counters are retrieved. (default "if_stats_list") * fip_stats_list: Traffic on floating ips * if_stats_list: Traffic on VM interfaces e.g.:: opencontrail://localhost:8081/?resource=fip_stats_list& virtual_network=default-domain:openstack:public """ @staticmethod def _prepare_cache(endpoint, params, cache): if 'network.statistics.opencontrail' in cache: return cache['network.statistics.opencontrail'] data = { 'o_client': client.Client(endpoint), 'n_client': neutron_client.Client() } cache['network.statistics.opencontrail'] = data return data def get_sample_data(self, meter_name, parse_url, params, cache): parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], parse_url.netloc, parse_url.path, None, None, None) endpoint = urlparse.urlunparse(parts) iter = self._get_iter(meter_name) if iter is None: # The extractor for this meter is not implemented or the API # doesn't have method to get this meter. return extractor = self._get_extractor(meter_name) if extractor is None: # The extractor for this meter is not implemented or the API # doesn't have method to get this meter. return data = self._prepare_cache(endpoint, params, cache) ports = data['n_client'].port_get_all() ports_map = dict((port['id'], port) for port in ports) resource = params.get('resource', ['if_stats_list'])[0] fqdn_uuid = params.get('fqdn_uuid', ['*'])[0] virtual_network = params.get('virtual_network', [None])[0] timestamp = timeutils.utcnow().isoformat() statistics = data['o_client'].networks.get_vm_statistics(fqdn_uuid) if not statistics: return for value in statistics['value']: for sample in iter(extractor, value, ports_map, resource, virtual_network): if sample is not None: yield sample + (timestamp, ) def _get_iter(self, meter_name): if meter_name.startswith('switch.port'): return self._iter_port def _get_extractor(self, meter_name): method_name = '_' + meter_name.replace('.', '_') return getattr(self, method_name, None) @staticmethod def _explode_name(fq_name): m = re.match( "(?P[^:]+):(?P.+):(?P[^:]+)", fq_name) if not m: return return m.group('domain'), m.group('project'), m.group('port_id') @staticmethod def _get_resource_meta(ports_map, stat, resource, network): if resource == 'fip_stats_list': if network and (network != stat['virtual_network']): return name = stat['iface_name'] else: name = stat['name'] domain, project, port_id = OpencontrailDriver._explode_name(name) port = ports_map.get(port_id) tenant_id = None network_id = None device_owner_id = None if port: tenant_id = port['tenant_id'] network_id = port['network_id'] device_owner_id = port['device_id'] resource_meta = {'device_owner_id': device_owner_id, 'network_id': network_id, 'project_id': tenant_id, 'project': project, 'resource': resource, 'domain': domain} return port_id, resource_meta @staticmethod def _iter_port(extractor, value, ports_map, resource, virtual_network=None): stats = value['value']['UveVirtualMachineAgent'].get(resource, []) for stat in stats: if type(stat) is list: for sub_stats, node in zip(*[iter(stat)] * 2): for sub_stat in sub_stats: result = OpencontrailDriver._get_resource_meta( ports_map, sub_stat, resource, virtual_network) if not result: continue port_id, resource_meta = result yield extractor(sub_stat, port_id, resource_meta) else: result = OpencontrailDriver._get_resource_meta( ports_map, stat, resource, virtual_network) if not result: continue port_id, resource_meta = result yield extractor(stat, port_id, resource_meta) @staticmethod def _switch_port_receive_packets(statistic, resource_id, resource_meta): return int(statistic['in_pkts']), resource_id, resource_meta @staticmethod def _switch_port_transmit_packets(statistic, resource_id, resource_meta): return int(statistic['out_pkts']), resource_id, resource_meta @staticmethod def _switch_port_receive_bytes(statistic, resource_id, resource_meta): return int(statistic['in_bytes']), resource_id, resource_meta @staticmethod def _switch_port_transmit_bytes(statistic, resource_id, resource_meta): return int(statistic['out_bytes']), resource_id, resource_meta ceilometer-6.0.0/ceilometer/network/statistics/opencontrail/client.py0000664000567000056710000000710012701406223027341 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg from oslo_log import log import requests import six from six.moves.urllib import parse as urlparse from ceilometer.i18n import _ CONF = cfg.CONF CONF.import_opt('http_timeout', 'ceilometer.service') LOG = log.getLogger(__name__) class OpencontrailAPIFailed(Exception): pass class AnalyticsAPIBaseClient(object): """Opencontrail Base Statistics REST API Client.""" def __init__(self, endpoint, data): self.endpoint = endpoint self.data = data or {} def request(self, path, fqdn_uuid, data=None): req_data = copy.copy(self.data) if data: req_data.update(data) req_params = self._get_req_params(data=req_data) url = urlparse.urljoin(self.endpoint, path + fqdn_uuid) self._log_req(url, req_params) resp = requests.get(url, **req_params) self._log_res(resp) if resp.status_code != 200: raise OpencontrailAPIFailed( _('Opencontrail API returned %(status)s %(reason)s') % {'status': resp.status_code, 'reason': resp.reason}) return resp def _get_req_params(self, data=None): req_params = { 'headers': { 'Accept': 'application/json' }, 'data': data, 'allow_redirects': False, 'timeout': CONF.http_timeout, } return req_params @staticmethod def _log_req(url, req_params): if not CONF.debug: return curl_command = ['REQ: curl -i -X GET '] params = [] for name, value in six.iteritems(req_params['data']): params.append("%s=%s" % (name, value)) curl_command.append('"%s?%s" ' % (url, '&'.join(params))) for name, value in six.iteritems(req_params['headers']): curl_command.append('-H "%s: %s" ' % (name, value)) LOG.debug(''.join(curl_command)) @staticmethod def _log_res(resp): if not CONF.debug: return dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, resp.status_code, resp.reason)] dump.extend('%s: %s\n' % (k, v) for k, v in six.iteritems(resp.headers)) dump.append('\n') if resp.content: dump.extend([resp.content, '\n']) LOG.debug(''.join(dump)) class NetworksAPIClient(AnalyticsAPIBaseClient): """Opencontrail Statistics REST API Client.""" def get_vm_statistics(self, fqdn_uuid, data=None): """Get statistics of a virtual-machines. URL: {endpoint}/analytics/uves/virtual-machine/{fqdn_uuid} """ path = '/analytics/uves/virtual-machine/' resp = self.request(path, fqdn_uuid, data) return resp.json() class Client(object): def __init__(self, endpoint, data=None): self.networks = NetworksAPIClient(endpoint, data) ceilometer-6.0.0/ceilometer/network/statistics/switch.py0000664000567000056710000000144512701406223024675 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class SWPollster(statistics._Base): meter_name = 'switch' meter_type = sample.TYPE_GAUGE meter_unit = 'switch' ceilometer-6.0.0/ceilometer/network/statistics/port.py0000664000567000056710000000541412701406223024360 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class PortPollster(statistics._Base): meter_name = 'switch.port' meter_type = sample.TYPE_GAUGE meter_unit = 'port' class PortPollsterReceivePackets(statistics._Base): meter_name = 'switch.port.receive.packets' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterTransmitPackets(statistics._Base): meter_name = 'switch.port.transmit.packets' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveBytes(statistics._Base): meter_name = 'switch.port.receive.bytes' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'B' class PortPollsterTransmitBytes(statistics._Base): meter_name = 'switch.port.transmit.bytes' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'B' class PortPollsterReceiveDrops(statistics._Base): meter_name = 'switch.port.receive.drops' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterTransmitDrops(statistics._Base): meter_name = 'switch.port.transmit.drops' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveErrors(statistics._Base): meter_name = 'switch.port.receive.errors' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterTransmitErrors(statistics._Base): meter_name = 'switch.port.transmit.errors' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveFrameErrors(statistics._Base): meter_name = 'switch.port.receive.frame_error' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveOverrunErrors(statistics._Base): meter_name = 'switch.port.receive.overrun_error' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveCRCErrors(statistics._Base): meter_name = 'switch.port.receive.crc_error' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterCollisionCount(statistics._Base): meter_name = 'switch.port.collision.count' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' ceilometer-6.0.0/ceilometer/network/statistics/flow.py0000664000567000056710000000263412701406223024344 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class FlowPollster(statistics._Base): meter_name = 'switch.flow' meter_type = sample.TYPE_GAUGE meter_unit = 'flow' class FlowPollsterDurationSeconds(statistics._Base): meter_name = 'switch.flow.duration_seconds' meter_type = sample.TYPE_GAUGE meter_unit = 's' class FlowPollsterDurationNanoseconds(statistics._Base): meter_name = 'switch.flow.duration_nanoseconds' meter_type = sample.TYPE_GAUGE meter_unit = 'ns' class FlowPollsterPackets(statistics._Base): meter_name = 'switch.flow.packets' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class FlowPollsterBytes(statistics._Base): meter_name = 'switch.flow.bytes' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'B' ceilometer-6.0.0/ceilometer/network/statistics/__init__.py0000664000567000056710000000656112701406223025137 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_utils import netutils import six from six.moves.urllib import parse as urlparse from stevedore import driver as _driver from ceilometer.agent import plugin_base from ceilometer import sample @six.add_metaclass(abc.ABCMeta) class _Base(plugin_base.PollsterBase): NAMESPACE = 'network.statistics.drivers' drivers = {} @property def default_discovery(self): # this signifies that the pollster gets its resources from # elsewhere, in this case they're manually listed in the # pipeline configuration return None @abc.abstractproperty def meter_name(self): """Return a Meter Name.""" @abc.abstractproperty def meter_type(self): """Return a Meter Type.""" @abc.abstractproperty def meter_unit(self): """Return a Meter Unit.""" @staticmethod def _parse_my_resource(resource): parse_url = netutils.urlsplit(resource) params = urlparse.parse_qs(parse_url.query) parts = urlparse.ParseResult(parse_url.scheme, parse_url.netloc, parse_url.path, None, None, None) return parts, params @staticmethod def get_driver(scheme): if scheme not in _Base.drivers: _Base.drivers[scheme] = _driver.DriverManager(_Base.NAMESPACE, scheme).driver() return _Base.drivers[scheme] def get_samples(self, manager, cache, resources): resources = resources or [] for resource in resources: parse_url, params = self._parse_my_resource(resource) ext = self.get_driver(parse_url.scheme) sample_data = ext.get_sample_data(self.meter_name, parse_url, params, cache) for data in sample_data or []: if data is None: continue if not isinstance(data, list): data = [data] for (volume, resource_id, resource_metadata, timestamp) in data: yield sample.Sample( name=self.meter_name, type=self.meter_type, unit=self.meter_unit, volume=volume, user_id=None, project_id=None, resource_id=resource_id, timestamp=timestamp, resource_metadata=resource_metadata ) ceilometer-6.0.0/ceilometer/network/statistics/driver.py0000664000567000056710000000163612701406223024671 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class Driver(object): @abc.abstractmethod def get_sample_data(self, meter_name, parse_url, params, cache): """Return volume, resource_id, resource_metadata, timestamp in tuple. If not implemented for meter_name, returns None """ ceilometer-6.0.0/ceilometer/network/statistics/table.py0000664000567000056710000000242712701406223024464 0ustar jenkinsjenkins00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class TablePollster(statistics._Base): meter_name = 'switch.table' meter_type = sample.TYPE_GAUGE meter_unit = 'table' class TablePollsterActiveEntries(statistics._Base): meter_name = 'switch.table.active.entries' meter_type = sample.TYPE_GAUGE meter_unit = 'entry' class TablePollsterLookupPackets(statistics._Base): meter_name = 'switch.table.lookup.packets' meter_type = sample.TYPE_GAUGE meter_unit = 'packet' class TablePollsterMatchedPackets(statistics._Base): meter_name = 'switch.table.matched.packets' meter_type = sample.TYPE_GAUGE meter_unit = 'packet' ceilometer-6.0.0/ceilometer/network/statistics/opendaylight/0000775000567000056710000000000012701406364025513 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/network/statistics/opendaylight/__init__.py0000664000567000056710000000000012701406223027604 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/network/statistics/opendaylight/driver.py0000664000567000056710000004246412701406224027365 0ustar jenkinsjenkins00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import timeutils import six from six import moves from six.moves.urllib import parse as urlparse from ceilometer.i18n import _ from ceilometer.network.statistics import driver from ceilometer.network.statistics.opendaylight import client from ceilometer import utils LOG = log.getLogger(__name__) def _get_properties(properties, prefix='properties'): resource_meta = {} if properties is not None: for k, v in six.iteritems(properties): value = v['value'] key = prefix + '_' + k if 'name' in v: key += '_' + v['name'] resource_meta[key] = value return resource_meta def _get_int_sample(key, statistic, resource_id, resource_meta): if key not in statistic: return None return int(statistic[key]), resource_id, resource_meta class OpenDayLightDriver(driver.Driver): """Driver of network info collector from OpenDaylight. This driver uses resources in "pipeline.yaml". Resource requires below conditions: * resource is url * scheme is "opendaylight" This driver can be configured via query parameters. Supported parameters: * scheme: The scheme of request url to OpenDaylight REST API endpoint. (default http) * auth: Auth strategy of http. This parameter can be set basic and digest.(default None) * user: This is username that is used by auth.(default None) * password: This is password that is used by auth.(default None) * container_name: Name of container of OpenDaylight.(default "default") This parameter allows multi vaues. e.g.:: opendaylight://127.0.0.1:8080/controller/nb/v2?container_name=default& container_name=egg&auth=basic&user=admin&password=admin&scheme=http In this case, the driver send request to below URLs: http://127.0.0.1:8080/controller/nb/v2/statistics/default/flow http://127.0.0.1:8080/controller/nb/v2/statistics/egg/flow """ @staticmethod def _prepare_cache(endpoint, params, cache): if 'network.statistics.opendaylight' in cache: return cache['network.statistics.opendaylight'] data = {} container_names = params.get('container_name', ['default']) odl_params = {} if 'auth' in params: odl_params['auth'] = params['auth'][0] if 'user' in params: odl_params['user'] = params['user'][0] if 'password' in params: odl_params['password'] = params['password'][0] cs = client.Client(endpoint, odl_params) for container_name in container_names: try: container_data = {} # get flow statistics container_data['flow'] = cs.statistics.get_flow_statistics( container_name) # get port statistics container_data['port'] = cs.statistics.get_port_statistics( container_name) # get table statistics container_data['table'] = cs.statistics.get_table_statistics( container_name) # get topology container_data['topology'] = cs.topology.get_topology( container_name) # get switch information container_data['switch'] = cs.switch_manager.get_nodes( container_name) # get and optimize user links # e.g. # before: # "OF|2@OF|00:00:00:00:00:00:00:02" # after: # { # 'port': { # 'type': 'OF', # 'id': '2'}, # 'node': { # 'type': 'OF', # 'id': '00:00:00:00:00:00:00:02' # } # } user_links_raw = cs.topology.get_user_links(container_name) user_links = [] container_data['user_links'] = user_links for user_link_row in user_links_raw['userLinks']: user_link = {} for k, v in six.iteritems(user_link_row): if (k == "dstNodeConnector" or k == "srcNodeConnector"): port_raw, node_raw = v.split('@') port = {} port['type'], port['id'] = port_raw.split('|') node = {} node['type'], node['id'] = node_raw.split('|') v = {'port': port, 'node': node} user_link[k] = v user_links.append(user_link) # get link status to hosts container_data['active_hosts'] = ( cs.host_tracker.get_active_hosts(container_name)) container_data['inactive_hosts'] = ( cs.host_tracker.get_inactive_hosts(container_name)) container_data['timestamp'] = timeutils.utcnow().isoformat() data[container_name] = container_data except Exception: LOG.exception(_('Request failed to connect to OpenDaylight' ' with NorthBound REST API')) cache['network.statistics.opendaylight'] = data return data def get_sample_data(self, meter_name, parse_url, params, cache): extractor = self._get_extractor(meter_name) if extractor is None: # The way to getting meter is not implemented in this driver or # OpenDaylight REST API has not api to getting meter. return None iter = self._get_iter(meter_name) if iter is None: # The way to getting meter is not implemented in this driver or # OpenDaylight REST API has not api to getting meter. return None parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], parse_url.netloc, parse_url.path, None, None, None) endpoint = urlparse.urlunparse(parts) data = self._prepare_cache(endpoint, params, cache) samples = [] for name, value in six.iteritems(data): timestamp = value['timestamp'] for sample in iter(extractor, value): if sample is not None: # set controller name and container name # to resource_metadata sample[2]['controller'] = 'OpenDaylight' sample[2]['container'] = name samples.append(sample + (timestamp, )) return samples def _get_iter(self, meter_name): if meter_name == 'switch': return self._iter_switch elif meter_name.startswith('switch.flow'): return self._iter_flow elif meter_name.startswith('switch.table'): return self._iter_table elif meter_name.startswith('switch.port'): return self._iter_port def _get_extractor(self, meter_name): method_name = '_' + meter_name.replace('.', '_') return getattr(self, method_name, None) @staticmethod def _iter_switch(extractor, data): for switch in data['switch']['nodeProperties']: yield extractor(switch, switch['node']['id'], {}) @staticmethod def _switch(statistic, resource_id, resource_meta): resource_meta.update(_get_properties(statistic.get('properties'))) return 1, resource_id, resource_meta @staticmethod def _iter_port(extractor, data): for port_statistic in data['port']['portStatistics']: for statistic in port_statistic['portStatistic']: resource_meta = {'port': statistic['nodeConnector']['id']} yield extractor(statistic, port_statistic['node']['id'], resource_meta, data) @staticmethod def _switch_port(statistic, resource_id, resource_meta, data): my_node_id = resource_id my_port_id = statistic['nodeConnector']['id'] # link status from topology edge_properties = data['topology']['edgeProperties'] for edge_property in edge_properties: edge = edge_property['edge'] if (edge['headNodeConnector']['node']['id'] == my_node_id and edge['headNodeConnector']['id'] == my_port_id): target_node = edge['tailNodeConnector'] elif (edge['tailNodeConnector']['node']['id'] == my_node_id and edge['tailNodeConnector']['id'] == my_port_id): target_node = edge['headNodeConnector'] else: continue resource_meta['topology_node_id'] = target_node['node']['id'] resource_meta['topology_node_port'] = target_node['id'] resource_meta.update(_get_properties( edge_property.get('properties'), prefix='topology')) break # link status from user links for user_link in data['user_links']: if (user_link['dstNodeConnector']['node']['id'] == my_node_id and user_link['dstNodeConnector']['port']['id'] == my_port_id): target_node = user_link['srcNodeConnector'] elif (user_link['srcNodeConnector']['node']['id'] == my_node_id and user_link['srcNodeConnector']['port']['id'] == my_port_id): target_node = user_link['dstNodeConnector'] else: continue resource_meta['user_link_node_id'] = target_node['node']['id'] resource_meta['user_link_node_port'] = target_node['port']['id'] resource_meta['user_link_status'] = user_link['status'] resource_meta['user_link_name'] = user_link['name'] break # link status to hosts for hosts, status in moves.zip( [data['active_hosts'], data['inactive_hosts']], ['active', 'inactive']): for host_config in hosts['hostConfig']: if (host_config['nodeId'] != my_node_id or host_config['nodeConnectorId'] != my_port_id): continue resource_meta['host_status'] = status for key in ['dataLayerAddress', 'vlan', 'staticHost', 'networkAddress']: if key in host_config: resource_meta['host_' + key] = host_config[key] break return 1, resource_id, resource_meta @staticmethod def _switch_port_receive_packets(statistic, resource_id, resource_meta, data): return _get_int_sample('receivePackets', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_packets(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitPackets', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_bytes(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveBytes', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_bytes(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitBytes', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_drops(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveDrops', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_drops(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitDrops', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_errors(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveErrors', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_errors(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitErrors', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_frame_error(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveFrameError', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_overrun_error(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveOverRunError', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_crc_error(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveCrcError', statistic, resource_id, resource_meta) @staticmethod def _switch_port_collision_count(statistic, resource_id, resource_meta, data): return _get_int_sample('collisionCount', statistic, resource_id, resource_meta) @staticmethod def _iter_table(extractor, data): for table_statistic in data['table']['tableStatistics']: for statistic in table_statistic['tableStatistic']: resource_meta = {'table_id': statistic['nodeTable']['id']} yield extractor(statistic, table_statistic['node']['id'], resource_meta) @staticmethod def _switch_table(statistic, resource_id, resource_meta): return 1, resource_id, resource_meta @staticmethod def _switch_table_active_entries(statistic, resource_id, resource_meta): return _get_int_sample('activeCount', statistic, resource_id, resource_meta) @staticmethod def _switch_table_lookup_packets(statistic, resource_id, resource_meta): return _get_int_sample('lookupCount', statistic, resource_id, resource_meta) @staticmethod def _switch_table_matched_packets(statistic, resource_id, resource_meta): return _get_int_sample('matchedCount', statistic, resource_id, resource_meta) @staticmethod def _iter_flow(extractor, data): for flow_statistic in data['flow']['flowStatistics']: for statistic in flow_statistic['flowStatistic']: resource_meta = {'flow_id': statistic['flow']['id'], 'table_id': statistic['tableId']} for key, value in utils.dict_to_keyval(statistic['flow'], 'flow'): resource_meta[key.replace('.', '_')] = value yield extractor(statistic, flow_statistic['node']['id'], resource_meta) @staticmethod def _switch_flow(statistic, resource_id, resource_meta): return 1, resource_id, resource_meta @staticmethod def _switch_flow_duration_seconds(statistic, resource_id, resource_meta): return _get_int_sample('durationSeconds', statistic, resource_id, resource_meta) @staticmethod def _switch_flow_duration_nanoseconds(statistic, resource_id, resource_meta): return _get_int_sample('durationNanoseconds', statistic, resource_id, resource_meta) @staticmethod def _switch_flow_packets(statistic, resource_id, resource_meta): return _get_int_sample('packetCount', statistic, resource_id, resource_meta) @staticmethod def _switch_flow_bytes(statistic, resource_id, resource_meta): return _get_int_sample('byteCount', statistic, resource_id, resource_meta) ceilometer-6.0.0/ceilometer/network/statistics/opendaylight/client.py0000664000567000056710000001443512701406224027345 0ustar jenkinsjenkins00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log import requests from requests import auth import six from ceilometer.i18n import _ CONF = cfg.CONF CONF.import_opt('http_timeout', 'ceilometer.service') LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class _Base(object): """Base class of OpenDaylight REST APIs Clients.""" @abc.abstractproperty def base_url(self): """Returns base url for each REST API.""" def __init__(self, client): self.client = client def request(self, path, container_name): return self.client.request(self.base_url + path, container_name) class OpenDaylightRESTAPIFailed(Exception): pass class StatisticsAPIClient(_Base): """OpenDaylight Statistics REST API Client Base URL: {endpoint}/statistics/{containerName} """ base_url = '/statistics/%(container_name)s' def get_port_statistics(self, container_name): """Get port statistics URL: {Base URL}/port """ return self.request('/port', container_name) def get_flow_statistics(self, container_name): """Get flow statistics URL: {Base URL}/flow """ return self.request('/flow', container_name) def get_table_statistics(self, container_name): """Get table statistics URL: {Base URL}/table """ return self.request('/table', container_name) class TopologyAPIClient(_Base): """OpenDaylight Topology REST API Client Base URL: {endpoint}/topology/{containerName} """ base_url = '/topology/%(container_name)s' def get_topology(self, container_name): """Get topology URL: {Base URL} """ return self.request('', container_name) def get_user_links(self, container_name): """Get user links URL: {Base URL}/userLinks """ return self.request('/userLinks', container_name) class SwitchManagerAPIClient(_Base): """OpenDaylight Switch Manager REST API Client Base URL: {endpoint}/switchmanager/{containerName} """ base_url = '/switchmanager/%(container_name)s' def get_nodes(self, container_name): """Get node information URL: {Base URL}/nodes """ return self.request('/nodes', container_name) class HostTrackerAPIClient(_Base): """OpenDaylight Host Tracker REST API Client Base URL: {endpoint}/hosttracker/{containerName} """ base_url = '/hosttracker/%(container_name)s' def get_active_hosts(self, container_name): """Get active hosts informatinos URL: {Base URL}/hosts/active """ return self.request('/hosts/active', container_name) def get_inactive_hosts(self, container_name): """Get inactive hosts information URL: {Base URL}/hosts/inactive """ return self.request('/hosts/inactive', container_name) class Client(object): def __init__(self, endpoint, params): self.statistics = StatisticsAPIClient(self) self.topology = TopologyAPIClient(self) self.switch_manager = SwitchManagerAPIClient(self) self.host_tracker = HostTrackerAPIClient(self) self._endpoint = endpoint self._req_params = self._get_req_params(params) @staticmethod def _get_req_params(params): req_params = { 'headers': { 'Accept': 'application/json' }, 'timeout': CONF.http_timeout, } auth_way = params.get('auth') if auth_way in ['basic', 'digest']: user = params.get('user') password = params.get('password') if auth_way == 'basic': auth_class = auth.HTTPBasicAuth else: auth_class = auth.HTTPDigestAuth req_params['auth'] = auth_class(user, password) return req_params def _log_req(self, url): curl_command = ['REQ: curl -i -X GET ', '"%s" ' % (url)] if 'auth' in self._req_params: auth_class = self._req_params['auth'] if isinstance(auth_class, auth.HTTPBasicAuth): curl_command.append('--basic ') else: curl_command.append('--digest ') curl_command.append('--user "%s":"%s" ' % (auth_class.username, auth_class.password)) for name, value in six.iteritems(self._req_params['headers']): curl_command.append('-H "%s: %s" ' % (name, value)) LOG.debug(''.join(curl_command)) @staticmethod def _log_res(resp): dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, resp.status_code, resp.reason)] dump.extend('%s: %s\n' % (k, v) for k, v in six.iteritems(resp.headers)) dump.append('\n') if resp.content: dump.extend([resp.content, '\n']) LOG.debug(''.join(dump)) def _http_request(self, url): if CONF.debug: self._log_req(url) resp = requests.get(url, **self._req_params) if CONF.debug: self._log_res(resp) if resp.status_code // 100 != 2: raise OpenDaylightRESTAPIFailed( _('OpenDaylitght API returned %(status)s %(reason)s') % {'status': resp.status_code, 'reason': resp.reason}) return resp.json() def request(self, path, container_name): url = self._endpoint + path % {'container_name': container_name} return self._http_request(url) ceilometer-6.0.0/ceilometer/i18n.py0000664000567000056710000000252612701406223020271 0ustar jenkinsjenkins00000000000000# Copyright 2014 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See http://docs.openstack.org/developer/oslo.i18n/usage.html """ import oslo_i18n DOMAIN = 'ceilometer' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ceilometer-6.0.0/ceilometer/locale/0000775000567000056710000000000012701406364020400 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/ru/0000775000567000056710000000000012701406364021026 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/ru/LC_MESSAGES/0000775000567000056710000000000012701406364022613 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/ru/LC_MESSAGES/ceilometer.po0000664000567000056710000004742612701406223025312 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Grigory Mokhin , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev34\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-09 20:26+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-10 08:01+0000\n" "Last-Translator: Grigory Mokhin \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Russian\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s не найден" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "ĐрифметичеŃкий преобразователь должен иŃпользовать хотя бы один Ńчетчик в " "выражении %s'" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "Не ŃдалоŃŃŚ Ńоздать Ń‚Đ°Đ±Đ»Đ¸Ń†Ń %(table_name)s: Ńже ŃŃщеŃтвŃет. Đгнорирование " "ĐľŃибки" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Продолжить поŃле ĐľŃибки Ń %(name)s: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "Не ŃдалоŃŃŚ подключитьŃŃŹ Đş ĐżĐľĐ´Ń‡Đ¸Đ˝ĐµĐ˝Đ˝ĐľĐĽŃ Ń…ĐľŃŃ‚Ń: %s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Не ŃдалоŃŃŚ подключитьŃŃŹ Đş XenAPI: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "" "Не ŃдалоŃŃŚ полŃчить информацию об иŃпользовании процеŃŃора для %(id)s: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "" "Не ŃдалоŃŃŚ полŃчить информацию об иŃпользовании памяти для %(id)s: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "" "Не ŃдалоŃŃŚ полŃчить информацию об иŃпользовании CPU для виртŃальной маŃины %s" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "Не ŃдалоŃŃŚ полŃчить IP-Đ°Đ´Ń€ĐµŃ ŃŤĐşĐ·ĐµĐĽĐżĐ»ŃŹŃ€Đ° %s" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "Целевой объект диŃпетчера не задан, Ń„Ńнкция измерения не бŃдет опŃбликована. " "Укажите целевой объект в файле ceilometer.conf" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Удаление Ńведомления %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Возникла ĐľŃибка в libvirt при поиŃке экземпляра <имя=%(name)s, ĐĐ”=%(id)s>: " "[Код ĐľŃибки: %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "ĐžŃибка анализа ответа HTTP: %s" msgid "Error stopping pollster." msgstr "ĐžŃибка ĐľŃтановки опраŃивающего объекта." msgid "Event" msgstr "Событие" msgid "Expression evaluated to a NaN value!" msgstr "РезŃльтат вычиŃления выражения - значение NaN!" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "Не ŃдалоŃŃŚ импортировать раŃŃирение для %(name)s: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Не ŃдалоŃŃŚ проверить данные экземпляра <имя=%(name)s, ĐĐ”=%(id)s>, ŃĐľŃтояние " "домена - SHUTOFF." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "Не ŃдалоŃŃŚ проверить иŃпользование памяти экземпляром %(instance_uuid)s, не " "ŃдалоŃŃŚ полŃчить информацию от libvirt: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "Не ŃдалоŃŃŚ проверить иŃпользование памяти экземпляром <имя=%(name)s, ĐĐ”=" "%(id)s>, не ŃдалоŃŃŚ полŃчить информацию от libvirt." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "Не ŃдалоŃŃŚ загрŃзить обработчики Ńведомлений для %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "ĐžŃибка анализа значения времени %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Не ŃдалоŃŃŚ опŃбликовать %d точек данных, выполняетŃŃŹ их Ńдаление" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Не ŃдалоŃŃŚ опŃбликовать %d точек данных, Ńоздайте для них очередь" #, python-format msgid "Failed to record metering data: %s" msgstr "Не ŃдалоŃŃŚ запиŃать данные измерений: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "НедопŃŃтимое выражение фильтра: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "Đгнорирование экземпляра %(name)s (%(instance_id)s) : %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "Đгнорирование экземпляра %(name)s: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "БаланŃировщик нагрŃзки %(loadbalancer_id)s игнорирŃетŃŃŹ" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "ĐźŃĐ» %(pool_id)s игнорирŃетŃŃŹ" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "НедопŃŃтимый ŃинтакŃĐ¸Ń YAML в файле определений %(file)s; Ńтрока: %(line)s, " "Ńтолбец: %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "НедопŃŃтимый интервал %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "НедопŃŃтимый тип ĐľŃобенноŃти %(type)s для ĐľŃобенноŃти %(trait)s" msgid "Limit must be positive" msgstr "Ограничение должно быть положительным" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "ĐĐ· драйвера хранилища возвращено неŃколько Ńобытий Ń ĐĐ” %s" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Найдено неŃколько виртŃальных маŃин %s в XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "Необходимо Ńказать connection_url и connection_password" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Нет Đ´ĐľŃŃ‚Ńпного модŃля %(plugin)s для %(name)s" msgid "Node Manager init failed" msgstr "Сбой инициализации админиŃтратора Ńзлов" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Нет прав Đ´ĐľŃŃ‚Ńпа Đş %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "ФŃнкция API OpenDaylight вернŃла %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "API Opencontrail возвратил %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "Оператор %(operator)s не поддерживаетŃŃŹ. Для поля %(field)s возможен только " "оператор равенŃтва" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "Оператор %(operator)s не поддерживаетŃŃŹ. Поддерживаемые операторы: " "%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "НедопŃŃтимое выражение Ńортировки: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "ĐžŃибка анализа Ńпецификации JSONPath %(jsonpath)s для %(name)s: %(err)s" msgid "Period must be positive." msgstr "Период должен быть положительным." #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "Конвейер %(pipeline)s: %(status)s поŃле ĐľŃибки от ĐżŃбликатора %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "Конвейер %(pipeline)s: Продолжение поŃле ĐľŃибки из ĐżŃбликатора %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "Конвейер %(pipeline)s: ĐžŃибка выгрŃзки преобразователя %(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "Конвейер %(pipeline)s: Выход поŃле ĐľŃибки из преобразователя %(trans)s для " "%(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Указан модŃль, но не передано имя модŃля для %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "%(cnt)s-кратный Ńбой датчика опроŃа %(mtr)s!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "ĐžĐżŃ€ĐľŃ %(name)s не ŃдалоŃŃŚ выполнить %(cnt)s раз." #, python-format msgid "Pollster for %s is disabled!" msgstr "ОпраŃивающий объект для %s выключен!" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "Сделайте так, чтобы опраŃивающий объект %(name)s больŃе не опраŃивал " "иŃточник %(source)s!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "ПревыŃена макŃимальная длина local_queue ĐżŃбликатора, Ńдаление %d Ńамых " "Ńтарых образцов" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "Стратегия ĐżŃбликации неизвеŃтна (%s). По Ńмолчанию принŃдительная" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "ФŃнкция API RGW AdminOps вернŃла %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "Сбой запроŃа на подключение Đş OpenDaylight Ń API REST NorthBound" #, python-format msgid "Required field %s not specified" msgstr "Не Ńказано обязательное поле %s" msgid "Resource" msgstr "РеŃŃŃ€Ń" msgid "Sample" msgstr "Образец" msgid "Samples should be included in request body" msgstr "Образцы должны включатьŃŃŹ в тело запроŃа" #, python-format msgid "Skip loading extension for %s" msgstr "ПропŃŃтить загрŃĐ·ĐşŃ Ń€Đ°ŃŃирения для %s" #, python-format msgid "String %s is not a valid isotime" msgstr "Строка %s не являетŃŃŹ допŃŃтимым значением isotime" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "Файл Yaml, определяющий Ńвязи ĐĽĐµĐ¶Đ´Ń ĐľĐ±Ń€Đ°Đ·Ń†Đ°ĐĽĐ¸ и реŃŃŃ€Ńами gnocchi " "(показателями)" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Тип данных %(type)s не поддерживаетŃŃŹ. СпиŃок поддерживаемых типов данных: " "%(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "Поле 'fields' являетŃŃŹ обязательным для %s" msgid "The path for the file publisher is required" msgstr "ТребŃетŃŃŹ ĐżŃть для ĐżŃбликатора файлов" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: не ŃдаетŃŃŹ декодировать данные, отправленные %s" msgid "UDP: Unable to store meter" msgstr "UDP: не ŃдалоŃŃŚ Ńохранить Ńчетчик" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "Не ŃдалоŃŃŚ подключитьŃŃŹ Đş ŃĐµŃ€Đ˛ĐµŃ€Ń Đ±Đ°Đ·Ń‹ данных: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Преобразовать значение %(value)s в ожидаемый тип данных %(type)s невозможно." #, python-format msgid "Unable to discover resources: %s" msgstr "Не ŃдалоŃŃŚ найти реŃŃŃ€ŃŃ‹: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "ВычиŃлить выражение %(expr)s невозможно: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "Не ŃдалоŃŃŚ загрŃзить ĐżŃбликатор %s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "Не ŃдалоŃŃŚ загрŃзить инŃпектор гипервизора: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "Не ŃдалоŃŃŚ повторно подключитьŃŃŹ Đş ĐľŃновной базе данных mongodb поŃле " "%(retries)d попыток. ДальнейŃие попытки прекращены." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "Не ŃдалоŃŃŚ повторно подключитьŃŃŹ Đş ĐľŃновной mongodb: %(errmsg)s. Повторное " "подключение через %(retry_interval)d ŃекŃнд." msgid "Unable to send sample over UDP" msgstr "Не ŃдалоŃŃŚ отправить образец по UDP" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Возникла непредвиденная иŃключительная ŃитŃация при преобразовании %(value)s " "в ожидаемый тип данных %(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "НеизвеŃтное раŃŃирение поиŃка: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "НеизвеŃтный тип метаданных. Ключ (%s) нельзя бŃдет запраŃивать." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Đ’ баланŃировщике нагрŃзки %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s, " "пример пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Đ’ fw %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s,пример пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Đ’ обработчике %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s, пример " "пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Đ’ ŃчаŃтнике %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s, пример " "пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Đ’ ĐżŃле %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s,пример пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Đ’ vip %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s,пример пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Đ’ VPN %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s, пример пропŃŃкаетŃŃŹ" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "ВиртŃальная маŃина %s не найдена в VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "Не найдена виртŃальная маŃина %s в XenServer" msgid "Wrong sensor type" msgstr "Неверный тип датчика" msgid "XenAPI not installed" msgstr "XenAPI не ŃŃтановлен" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "ĐžŃибка YAML при чтении файла определений %(file)s" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "не ŃдалоŃŃŚ полŃчить процеŃŃорное время для %(id)s: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "Параметр direct не может быть равен true, еŃли включен Gnocchi." #, python-format msgid "dropping out of time order sample: %s" msgstr "Ńдаление образца, выпадающего из хронологичеŃкого порядка: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "Ńдаление образца без предŃеŃтвенника: %s" msgid "ipmitool output length mismatch" msgstr "неŃоответŃтвие длины вывода ipmitool" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes и backup_count должны быть чиŃлами." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "недопŃŃтимая подпиŃŃŚ Ńообщения, Ńдаление Ńообщения: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "Ńбой анализа данных датчика IPMI, не полŃчены данные из переданного ввода" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "Ńбой анализа данных датчика IPMI, неизвеŃтный тип датчика" msgid "running ipmitool failure" msgstr "Ńбой выполнения ipmitool" ceilometer-6.0.0/ceilometer/locale/ceilometer.pot0000664000567000056710000003440212701406224023252 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0rc2.dev9\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-29 06:16+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: ceilometer/collector.py:134 #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "" #: ceilometer/collector.py:141 msgid "UDP: Unable to store meter" msgstr "" #: ceilometer/declarative.py:44 #, python-format msgid "The field 'fields' is required for %s" msgstr "" #: ceilometer/declarative.py:57 #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "" #: ceilometer/declarative.py:66 #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "" #: ceilometer/declarative.py:92 #, python-format msgid "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" #: ceilometer/declarative.py:157 #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, " "column: %(column)s." msgstr "" #: ceilometer/declarative.py:163 #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "" #: ceilometer/notification.py:206 #, python-format msgid "Failed to load any notification handlers for %s" msgstr "" #: ceilometer/pipeline.py:390 #, python-format msgid "Unable to load publisher %s" msgstr "" #: ceilometer/pipeline.py:428 #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "" #: ceilometer/pipeline.py:458 #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" #: ceilometer/pipeline.py:495 #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "" #: ceilometer/pipeline.py:511 #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "" #: ceilometer/agent/manager.py:207 #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" #: ceilometer/agent/manager.py:213 #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "" #: ceilometer/agent/manager.py:289 #, python-format msgid "Skip loading extension for %s" msgstr "" #: ceilometer/agent/manager.py:292 #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "" #: ceilometer/agent/manager.py:470 #, python-format msgid "Unable to discover resources: %s" msgstr "" #: ceilometer/agent/manager.py:472 #, python-format msgid "Unknown discovery extension: %s" msgstr "" #: ceilometer/agent/manager.py:481 msgid "Error stopping pollster." msgstr "" #: ceilometer/api/middleware.py:102 #, python-format msgid "Error parsing HTTP response: %s" msgstr "" #: ceilometer/api/controllers/v2/base.py:50 #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "" #: ceilometer/api/controllers/v2/base.py:59 #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "" #: ceilometer/api/controllers/v2/base.py:220 #, python-format msgid "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" #: ceilometer/api/controllers/v2/base.py:225 #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is:" " %(supported)s" msgstr "" #: ceilometer/api/controllers/v2/base.py:230 #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type " "%(type)s." msgstr "" #: ceilometer/api/controllers/v2/events.py:190 #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" #: ceilometer/api/controllers/v2/events.py:196 #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is " "available for field %(field)s" msgstr "" #: ceilometer/api/controllers/v2/events.py:303 msgid "Event" msgstr "" #: ceilometer/api/controllers/v2/events.py:306 #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "" #: ceilometer/api/controllers/v2/meters.py:308 msgid "Samples should be included in request body" msgstr "" #: ceilometer/api/controllers/v2/meters.py:394 msgid "Period must be positive." msgstr "" #: ceilometer/api/controllers/v2/meters.py:423 #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "" #: ceilometer/api/controllers/v2/query.py:203 #, python-format msgid "Filter expression not valid: %s" msgstr "" #: ceilometer/api/controllers/v2/query.py:218 #, python-format msgid "Order-by expression not valid: %s" msgstr "" #: ceilometer/api/controllers/v2/query.py:316 #, python-format msgid "String %s is not a valid isotime" msgstr "" #: ceilometer/api/controllers/v2/query.py:317 #, python-format msgid "Failed to parse the timestamp value %s" msgstr "" #: ceilometer/api/controllers/v2/resources.py:128 msgid "Resource" msgstr "" #: ceilometer/api/controllers/v2/root.py:66 msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." msgstr "" #: ceilometer/api/controllers/v2/root.py:169 msgid "direct option cannot be true when Gnocchi is enabled." msgstr "" #: ceilometer/api/controllers/v2/samples.py:144 msgid "Sample" msgstr "" #: ceilometer/api/controllers/v2/utils.py:50 msgid "Limit must be positive" msgstr "" #: ceilometer/compute/pollsters/cpu.py:61 #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "" #: ceilometer/compute/pollsters/cpu.py:92 #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "" #: ceilometer/compute/pollsters/disk.py:174 #: ceilometer/compute/pollsters/disk.py:312 #: ceilometer/compute/pollsters/disk.py:447 #: ceilometer/compute/pollsters/disk.py:517 #: ceilometer/compute/pollsters/net.py:110 #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "" #: ceilometer/compute/pollsters/disk.py:645 #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "" #: ceilometer/compute/pollsters/memory.py:65 #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "" #: ceilometer/compute/virt/inspector.py:314 #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "" #: ceilometer/compute/virt/libvirt/inspector.py:103 #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" #: ceilometer/compute/virt/libvirt/inspector.py:123 #, python-format msgid "" "Failed to inspect data of instance , domain " "state is SHUTOFF." msgstr "" #: ceilometer/compute/virt/libvirt/inspector.py:193 #, python-format msgid "" "Failed to inspect memory usage of instance , " "can not get info from libvirt." msgstr "" #: ceilometer/compute/virt/libvirt/inspector.py:201 #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info " "from libvirt: %(error)s" msgstr "" #: ceilometer/compute/virt/vmware/inspector.py:106 #: ceilometer/compute/virt/vmware/inspector.py:123 #: ceilometer/compute/virt/vmware/inspector.py:155 #: ceilometer/compute/virt/vmware/inspector.py:168 #, python-format msgid "VM %s not found in VMware vSphere" msgstr "" #: ceilometer/compute/virt/xenapi/inspector.py:66 msgid "XenAPI not installed" msgstr "" #: ceilometer/compute/virt/xenapi/inspector.py:72 msgid "Must specify connection_url, and connection_password to use" msgstr "" #: ceilometer/compute/virt/xenapi/inspector.py:87 #, python-format msgid "Could not connect slave host: %s " msgstr "" #: ceilometer/compute/virt/xenapi/inspector.py:90 #, python-format msgid "Could not connect to XenAPI: %s" msgstr "" #: ceilometer/compute/virt/xenapi/inspector.py:113 #, python-format msgid "VM %s not found in XenServer" msgstr "" #: ceilometer/compute/virt/xenapi/inspector.py:116 #, python-format msgid "Multiple VM %s found in XenServer" msgstr "" #: ceilometer/compute/virt/xenapi/inspector.py:129 #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "" #: ceilometer/dispatcher/gnocchi.py:59 msgid "" "The Yaml file that defines mapping between samples and gnocchi " "resources/metrics" msgstr "" #: ceilometer/dispatcher/http.py:76 msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in" " the ceilometer.conf file" msgstr "" #: ceilometer/dispatcher/http.py:104 #, python-format msgid "Failed to record metering data: %s" msgstr "" #: ceilometer/dispatcher/http.py:107 #, python-format msgid "message signature invalid, discarding message: %r" msgstr "" #: ceilometer/event/converter.py:62 #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "" #: ceilometer/event/converter.py:106 #, python-format msgid "Required field %s not specified" msgstr "" #: ceilometer/event/converter.py:283 #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "" #: ceilometer/hardware/discovery.py:95 #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "" #: ceilometer/ipmi/platform/intel_node_manager.py:300 msgid "Node Manager init failed" msgstr "" #: ceilometer/ipmi/platform/ipmi_sensor.py:101 msgid "Wrong sensor type" msgstr "" #: ceilometer/ipmi/platform/ipmitool.py:39 msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" #: ceilometer/ipmi/platform/ipmitool.py:80 msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" #: ceilometer/ipmi/platform/ipmitool.py:102 msgid "ipmitool output length mismatch" msgstr "" #: ceilometer/ipmi/platform/ipmitool.py:128 msgid "running ipmitool failure" msgstr "" #: ceilometer/ipmi/pollsters/node.py:66 #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "" #: ceilometer/ipmi/pollsters/node.py:71 ceilometer/ipmi/pollsters/sensor.py:74 #, python-format msgid "Pollster for %s is disabled!" msgstr "" #: ceilometer/ipmi/pollsters/sensor.py:68 #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "" #: ceilometer/network/services/fwaas.py:48 #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" #: ceilometer/network/services/lbaas.py:88 #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" #: ceilometer/network/services/lbaas.py:136 #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" #: ceilometer/network/services/lbaas.py:177 #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" #: ceilometer/network/services/lbaas.py:297 #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "" #: ceilometer/network/services/lbaas.py:307 #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "" #: ceilometer/network/services/lbaas.py:412 #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" #: ceilometer/network/services/lbaas.py:456 #, python-format msgid "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" #: ceilometer/network/services/vpnaas.py:49 #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" #: ceilometer/network/statistics/opencontrail/client.py:58 #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "" #: ceilometer/network/statistics/opendaylight/client.py:232 #: ceilometer/tests/unit/network/statistics/opendaylight/test_client.py:146 #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "" #: ceilometer/network/statistics/opendaylight/driver.py:169 msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "" #: ceilometer/objectstore/rgw_client.py:48 #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "" #: ceilometer/publisher/file.py:62 msgid "The path for the file publisher is required" msgstr "" #: ceilometer/publisher/file.py:76 msgid "max_bytes and backup_count should be numbers." msgstr "" #: ceilometer/publisher/messaging.py:95 #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" #: ceilometer/publisher/messaging.py:147 #, python-format msgid "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" #: ceilometer/publisher/messaging.py:159 #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "" #: ceilometer/publisher/messaging.py:163 #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "" #: ceilometer/publisher/udp.py:67 msgid "Unable to send sample over UDP" msgstr "" #: ceilometer/storage/impl_sqlalchemy.py:314 #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "" #: ceilometer/storage/hbase/utils.py:474 #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" #: ceilometer/storage/mongo/utils.py:270 #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "" #: ceilometer/storage/mongo/utils.py:412 #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. " "Giving up." msgstr "" #: ceilometer/storage/mongo/utils.py:416 #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" #: ceilometer/transformer/arithmetic.py:57 #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" #: ceilometer/transformer/arithmetic.py:79 msgid "Expression evaluated to a NaN value!" msgstr "" #: ceilometer/transformer/arithmetic.py:95 #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "" #: ceilometer/transformer/conversions.py:198 #, python-format msgid "dropping out of time order sample: %s" msgstr "" #: ceilometer/transformer/conversions.py:216 #, python-format msgid "dropping sample with no predecessor: %s" msgstr "" ceilometer-6.0.0/ceilometer/locale/ceilometer-log-info.pot0000664000567000056710000000770512701406223024767 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b3.dev49\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-02-08 06:51+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: ceilometer/coordination.py:87 msgid "Coordination backend started successfully." msgstr "" #: ceilometer/coordination.py:136 #, python-format msgid "Joined partitioning group %s" msgstr "" #: ceilometer/coordination.py:157 #, python-format msgid "Left partitioning group %s" msgstr "" #: ceilometer/declarative.py:165 #, python-format msgid "Definitions: %s" msgstr "" #: ceilometer/notification.py:306 msgid "Reloading notification agent and listeners." msgstr "" #: ceilometer/pipeline.py:409 #, python-format msgid "" "Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter" " %(param)s" msgstr "" #: ceilometer/pipeline.py:705 ceilometer/pipeline.py:767 msgid "detected decoupled pipeline config format" msgstr "" #: ceilometer/pipeline.py:791 ceilometer/pipeline.py:810 #, python-format msgid "Pipeline config: %s" msgstr "" #: ceilometer/service_base.py:86 msgid "Pipeline configuration file has been updated." msgstr "" #: ceilometer/service_base.py:92 msgid "Detected change in pipeline configuration." msgstr "" #: ceilometer/agent/manager.py:178 #, python-format msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" msgstr "" #: ceilometer/agent/manager.py:183 #, python-format msgid "Polling pollster %(poll)s in the context of %(src)s" msgstr "" #: ceilometer/agent/manager.py:487 msgid "Reconfiguring polling tasks." msgstr "" #: ceilometer/api/app.py:109 #, python-format msgid "Starting server in PID %s" msgstr "" #: ceilometer/api/app.py:110 msgid "Configuration:" msgstr "" #: ceilometer/api/app.py:114 #, python-format msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" msgstr "" #: ceilometer/api/app.py:118 #, python-format msgid "serving on http://%(host)s:%(port)s" msgstr "" #: ceilometer/api/controllers/v2/utils.py:47 #, python-format msgid "No limit value provided, result set will be limited to %(limit)d." msgstr "" #: ceilometer/cmd/storage.py:44 msgid "Nothing to clean, database metering time to live is disabled" msgstr "" #: ceilometer/cmd/storage.py:53 msgid "Nothing to clean, database event time to live is disabled" msgstr "" #: ceilometer/event/storage/impl_elasticsearch.py:107 #: ceilometer/event/storage/impl_sqlalchemy.py:201 #: ceilometer/event/storage/pymongo_base.py:67 #, python-format msgid "Duplicate event detected, skipping it: %s" msgstr "" #: ceilometer/event/storage/impl_log.py:33 #, python-format msgid "Dropping event data with TTL %d" msgstr "" #: ceilometer/event/storage/impl_sqlalchemy.py:439 #, python-format msgid "%d events are removed from database" msgstr "" #: ceilometer/publisher/messaging.py:93 #, python-format msgid "Publishing policy set to %s" msgstr "" #: ceilometer/storage/impl_log.py:41 #, python-format msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" msgstr "" #: ceilometer/storage/impl_log.py:53 #, python-format msgid "Dropping metering data with TTL %d" msgstr "" #: ceilometer/storage/impl_sqlalchemy.py:368 #, python-format msgid "%d samples removed from database" msgstr "" #: ceilometer/storage/impl_sqlalchemy.py:404 msgid "Expired residual resource and meter definition data" msgstr "" #: ceilometer/storage/mongo/utils.py:259 #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "" #: ceilometer/storage/mongo/utils.py:462 #, python-format msgid "Index %s will be recreate." msgstr "" ceilometer-6.0.0/ceilometer/locale/zh_CN/0000775000567000056710000000000012701406364021401 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/zh_CN/LC_MESSAGES/0000775000567000056710000000000012701406364023166 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po0000664000567000056710000003605212701406223025656 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # aji.zqfan , 2015 # yelu , 2013 # Tom Fifield , 2013 # 颜海峰 , 2014 # yelu , 2013 # Yu Zhang, 2013 # Yu Zhang, 2013 # 颜海峰 , 2014 # English translations for ceilometer. # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Alex Eng , 2016. #zanata # Daisy , 2016. #zanata # Linda , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev50\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-19 00:57+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-16 12:12+0000\n" "Last-Translator: Linda \n" "Language: zh-CN\n" "Language-Team: Chinese (China)\n" "Plural-Forms: nplurals=1; plural=0\n" "Generated-By: Babel 2.2.0\n" "X-Generator: Zanata 3.7.3\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "无法找ĺ°%(entity)s %(id)s " #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "算术ĺŹĺ˝˘ĺ™¨ĺś¨čˇ¨čľľĺĽŹ'%s'中必须至少使用一个指标" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "表%(table_name)s已经ĺ­ĺś¨ďĽŚć— ćł•ĺ›ĺ»şă€‚忽略此错误继续执行。" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "é‡ĺ°é”™čŻŻ%(name)s:%(error)s,继续执行" #, python-format msgid "Could not connect slave host: %s " msgstr "无法连接伺服主机:%s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "无法连接ĺ°XenAPI:%s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "无法为虚拟机%(id)s获取CPU使用率:%(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "无法为%(id)s获取内ĺ­ä˝żç”¨äżˇćŻďĽš%(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "无法获取虚拟机%sçš„CPU使用率" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "无法为实例%s获取IP地址" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "ĺ†ĺŹ‘ĺ™¨ç›®ć ‡ćśŞč®ľç˝®ďĽŚć˛ˇćś‰ćŚ‡ć ‡ĺ°†č˘«ĺŹ‘é€ă€‚在ceilometer.conf文件中设置目标。" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "正在丢ĺĽé€šçźĄ%(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "查找实例 <ĺŤç§°ä¸ş %(name)s,标识为 %(id)s> 时,libvirt 中出错:[é”™čŻŻä»Łç  " "%(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "č§ŁćžHTTP响应失败: %s" msgid "Error stopping pollster." msgstr "ĺść­˘č˝®čŻ˘ç¨‹ĺşŹć—¶ĺ‡şé”™ă€‚" msgid "Event" msgstr "事件" msgid "Expression evaluated to a NaN value!" msgstr "表达式计算结果为NaNďĽ" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "无法为%(name)s引入扩展:%(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "为虚拟机获取监控数据失败了,虚拟机状ć€ä¸şSHUTOFF" #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "为虚拟机%(instance_uuid)s采集内ĺ­ä˝żç”¨ćŚ‡ć ‡ĺ¤±č´Ąäş†ďĽŚć— ćł•ä»Žlibvirt获取信ćŻďĽš" "%(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "为虚拟机采集内ĺ­ä˝żç”¨ćŚ‡ć ‡ĺ¤±č´Ąäş†ďĽŚć— ćł•ä»Žlibvirt获取信" "ćŻă€‚" #, python-format msgid "Failed to load any notification handlers for %s" msgstr "无法为%s加载任何通知处ç†ĺ™¨" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "č§Łćžć—¶é—´ćł%s失败" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "发ĺ¸%d个数据点时失败,正在将其丢ĺĽ" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "发ĺ¸%d个数据点时失败,将其入éź" #, python-format msgid "Failed to record metering data: %s" msgstr "无法保ĺ­ç›‘控数据:%s" #, python-format msgid "Filter expression not valid: %s" msgstr "过滤表达式不ĺ法:%s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "忽略虚拟机%(name)s (%(instance_id)s) : %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "忽略虚拟机%(name)s:%(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "正在忽略负载均衡器 %(loadbalancer_id)s" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "正在忽略池 %(pool_id)s" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "定义文件%(file)s中有非法YAML语法,行:%(line)s,ĺ—%(column)s。" #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "非法的间隔%(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "特ĺľ%(trait)s包ĺ«äş†ä¸Ťĺ法的特ĺľç±»ĺž‹'%(type)s' " msgid "Limit must be positive" msgstr "limit必须ćŻć­Łć•°" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "从数据库返回了多个id为%s的事件" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "多个虚拟机%s在XenServer中被找ĺ°" msgid "Must specify connection_url, and connection_password to use" msgstr "使用时必须指定connection_urlĺ’Śconnection_password" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "未对 %(name)s ćŹäľ›ĺŤä¸ş %(plugin)s 的插件" msgid "Node Manager init failed" msgstr "节点管ç†ĺ™¨ĺťĺ§‹ĺŚ–ĺ¤±č´Ą" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "ćťé™ä¸Ťč¶łä»Ąč®żé—®%(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "OpenDaylight接口返回状ć€%(status)s,原因%(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail接口返回状ć€%(status)s,原因%(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "čżç®—符 %(operator)s 不受支ćŚă€‚对于字段 %(field)s,只č˝ä˝żç”¨ç­‰ĺŹ·čżç®—符" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "čżç®—符 %(operator)s 不受支ćŚă€‚受支ćŚçš„čżç®—符为:%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "orderby表达式不ĺ法:%s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "对 %(name)s 指定的 JSONPathďĽĺŤłâ€ś%(jsonpath)s”)ĺ­ĺś¨č§Łćžé”™čŻŻďĽš%(err)s" msgid "Period must be positive." msgstr "period 参数必须ćŻć­Łć•°" #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "" "ç®ˇé“ %(pipeline)s:在发ĺ¸ç¨‹ĺşŹ %(pub)s 中发生错误之ĺŽďĽŚĺ¤„于 %(status)s 状ć€" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "ćµć°´çşż%(pipeline)s:发ĺ¸ĺ™¨%(pub)s报错,继续执行" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "ćµć°´çşż%(pipeline)s:ĺŹĺ˝˘ĺ™¨%(trans)s清空数据时出错" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "ćµć°´çşż%(pipeline)s:数据%(smp)sçš„ĺŹĺ˝˘ĺ™¨%(trans)sé‡ĺ°é”™čŻŻďĽŚé€€ĺ‡ş" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "指定了插件,但未对 %s ćŹäľ›ćŹ’ä»¶ĺŤ" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "拉取%(mtr)s传感器失败了%(cnt)s次ďĽ" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "轮询 %(name)s 已失败 %(cnt)s 次ďĽ" #, python-format msgid "Pollster for %s is disabled!" msgstr "%s的采集器被ç¦ç”¨" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "请é»ć­˘č˝®čŻ˘ç¨‹ĺşŹ %(name)s ĺ†Ťč˝®čŻ˘ćş %(source)sďĽ" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "发ĺ¸çš„数据量超过本地éźĺ—最大长度,正在丢ĺĽćś€č€çš„%d个数据" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "未知的发ĺ¸ç­–略(%s),强ĺ¶ä˝żç”¨é»č®¤ç­–略" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps接口返回%(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "请求无法连接ĺ°OpenDaylight的北ĺ‘REST接口" #, python-format msgid "Required field %s not specified" msgstr "必填项%s没有填写" msgid "Resource" msgstr "资ćş" msgid "Sample" msgstr "数据" msgid "Samples should be included in request body" msgstr "样本应包括在请求主体中" #, python-format msgid "Skip loading extension for %s" msgstr "跳过为%s加载扩展" #, python-format msgid "String %s is not a valid isotime" msgstr "字符串%s不ćŻä¸Şĺ法的标准时间格式" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "用于定义样本与 gnocchi 资ćş/度量值之间的ć ĺ°„çš„Yaml 文件" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "数据类型%(type)s不被支ćŚă€‚支ćŚçš„数据类型ĺ—表:%(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s 需č¦ĺ­—段“fields”" msgid "The path for the file publisher is required" msgstr "文件发ĺ¸ĺ™¨ĺż…须设置文件路径" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: 无法解ç ç”± %s 发é€çš„数据" msgid "UDP: Unable to store meter" msgstr "UDP: ć— ćł•ĺ­ĺ‚¨č®ˇé‡Źĺ™¨" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "无法连接ĺ°ć•°ćŤ®ĺş“服务器:%(errmsg)s。" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "无法转换%(value)sĺ°é˘„期的数据类型%(type)s。" #, python-format msgid "Unable to discover resources: %s" msgstr "无法发现资ćşďĽš%s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "无法计算表达式%(expr)s:%(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "无法加载发ĺ¸ĺ™¨%s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "无法加载管ç†ç¨‹ĺşŹçš„探测器:%s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "在%(retries)d次尝试ĺŽä»Ťć— ćł•重连ĺ°MongoDB主节点。放ĺĽé‡Ťčżžă€‚" #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "无法重连ĺ°MongoDB主节点:%(errmsg)s。在%(retry_interval)dç§’ĺŽčż›čˇŚé‡ŤčŻ•ă€‚" msgid "Unable to send sample over UDP" msgstr "无法通过UDP发é€é‡‡ć ·" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "在转换%(value)sĺ°é˘„期的数据类型%(type)s时发生了未预料的异常。" #, python-format msgid "Unknown discovery extension: %s" msgstr "未知的发现器插件:%s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "未知的ĺ…数据类型。键(%s)将无法进行查询。" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "在负载均衡器 %(id)s 上接收ĺ°ćśŞçźĄçж〠%(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "从fw %(id)sć”¶ĺ°ćśŞçźĄçš„状ć€%(stat)s,跳过该采样数据" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "在侦ĺ¬ĺ™¨ %(id)s 上接收ĺ°ćśŞçźĄçж〠%(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "在ćĺ‘ %(id)s 上接收ĺ°ćśŞçźĄçж〠%(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "从pool %(id)sć”¶ĺ°ćśŞçźĄçš„状ć€%(stat)s,跳过该采样数据" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "从vip %(id)sć”¶ĺ°ćśŞçźĄçš„状ć€%(stat)s,跳过该采样数据" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "在 VPN %(id)s 上接收ĺ°ćśŞçźĄçж〠%(stat)s,正在跳过样本" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "在 VMware vSphere ä¸­ďĽŚć‰ľä¸Ťĺ° VM %s" #, python-format msgid "VM %s not found in XenServer" msgstr "无法在XenServer中找ĺ°č™šć‹źćśş%s" msgid "Wrong sensor type" msgstr "错误的传感器类型" msgid "XenAPI not installed" msgstr "XenAPI没有安装" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "读取定义文件%(file)sć—¶é‡ĺ°YAML错误" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "无法为虚拟机%(id)s获取CPU时间:%(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "ĺŻç”¨ Gnocchi ĺŽďĽŚdirect 选项不č˝ä¸ş true。" #, python-format msgid "dropping out of time order sample: %s" msgstr "正在退出时间顺序样本:%s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "因为之前没有数据ďĽç”¨ćťĄč®ˇç®—差值)因而丢ĺĽć•°ćŤ®ďĽš%s" msgid "ipmitool output length mismatch" msgstr "ipmi输出长度不匹配" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytesĺ’Śbackup_count必须ćŻć•´ć•°ă€‚" #, python-format msgid "message signature invalid, discarding message: %r" msgstr "ć¶ćŻç­ľĺŤä¸Ťĺ法,丢ĺĽć¶ćŻďĽš%r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "č§ŁćžIPMI传感器数据失败,从给定的输入中无法检索ĺ°ć•°ćŤ®" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "č§ŁćžIPMI传感器数据失败,未知的传感器类型" msgid "running ipmitool failure" msgstr "čżčˇŚipmitool时失败了" ceilometer-6.0.0/ceilometer/locale/es/0000775000567000056710000000000012701406364021007 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/es/LC_MESSAGES/0000775000567000056710000000000012701406364022574 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/es/LC_MESSAGES/ceilometer.po0000664000567000056710000004066212701406224025267 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Rafael Rivero , 2015 # Marian Tort , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Eugènia Torrella , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0rc2.dev7\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-23 09:32+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 11:03+0000\n" "Last-Translator: Eugènia Torrella \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s No encontrado" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "El transformador aritmĂ©tico debe utilizar al menos un medidor en la " "expresiĂłn '%s'" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "No se puede crear la tabla %(table_name)s, ya existe. Se ignorará el error." #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Continuar despuĂ©s de error desde %(name)s: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "No se ha podido conectar con el host esclavo: %s" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "No se puede conectar a XenAPI: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "No se ha podido obtener CPU Util para %(id)s: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "No se ha podido obtener el uso de memoria para %(id)s: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "No se puede obtener la utilizaciĂłn de CPU de VM %s" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "No se ha podido obtener la direcciĂłn IP de la instancia %s" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "No se ha establecido el destino de asignador, no se enviará ningĂşn medidor. " "Establezca el destino en el archivo ceilometer.conf" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Descartando la notificaciĂłn %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Error de libvirt al buscar la instancia : [CĂłdigo " "de error %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Error analizándo respuesta HTTP: %s." msgid "Error stopping pollster." msgstr "Error al detener el pollster." msgid "Event" msgstr "Suceso" msgid "Expression evaluated to a NaN value!" msgstr "La expresiĂłn se ha evaluado en un valor NaN." #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "No se ha podido importar la extensiĂłn para %(name)s: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "No se han podido analizar los datos de la instancia , el estado del dominio es SHUTOFF." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "No se ha podido analizar el uso de memoria de %(instance_uuid)s, no se puede " "obtener informaciĂłn de libvirt: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "No se ha podido analizar el uso de memoria de la instancia , no se puede obtener informaciĂłn de libvirt." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "No se ha podido cargar ningĂşn manejador de notificaciĂłn para %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "No se ha podido analizar el valor de indicaciĂłn de fecha y hora %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "No se han podido publicar los puntos de datos %d, descartándolos" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "No se han podido publicar los puntos de datos %d, pĂłngalos en cola" #, python-format msgid "Failed to record metering data: %s" msgstr "No se ha podido registrar los datos de mediciĂłn: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "ExpresiĂłn de filtro no válida: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "Ignorando la instancia %(name)s (%(instance_id)s) : %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "Ignorando la instancia %(name)s: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "Se ignorará el equilibrador de carga %(loadbalancer_id)s" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "Se ignorará la agrupaciĂłn %(pool_id)s" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintaxis de YAML no válida en archivo de definiciones %(file)s en la lĂ­nea: " "%(line)s, columna: %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "Periodo no válido %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Tipo de rasgo no válido '%(type)s' para el rasgo %(trait)s" msgid "Limit must be positive" msgstr "El lĂ­mite debe ser positivo" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "" "Se ha devuelto más de un suceso con el %s del controlador de almacenamiento" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Se han encontrado varias VM %s en XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "" "Debe especificar el url_conexiĂłn y la contraseña_conexiĂłn para utilizar" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "No hay ningĂşn plug-in denominado %(plugin)s disponible para %(name)s" msgid "Node Manager init failed" msgstr "El inicio de Gestor de nodos ha fallado" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "No está autorizado para acceder a %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "La API OpenDaylitght ha devuelto %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "La API Opencontrail ha devuelto %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "El operador %(operator)s no se admite. Solo hay disponible el operador de " "igualdad para el campo %(field)s" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "El operador %(operator)s no está admitido. Los operadores admitidos son: " "%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "ExpresiĂłn de ordenar por no válida: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Error de análisis en especificaciĂłn de JSONPath '%(jsonpath)s' para " "%(name)s: %(err)s" msgid "Period must be positive." msgstr "El perĂ­odo debe ser positivo." #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "" "InterconexiĂłn %(pipeline)s: %(status)s tras el error de la aplicaciĂłn de " "publicaciĂłn %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "" "InterconexiĂłn %(pipeline)s: ContinĂşe tras el error de la aplicaciĂłn de " "publicaciĂłn %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "InterconexiĂłn %(pipeline)s: Error al vaciar el transformador %(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "InterconexiĂłn %(pipeline)s: Salga tras error del transformador %(trans)s " "para %(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "" "Se ha especificado un plug-in, pero no se ha proporcionado ningĂşn nombre de " "plug-in para %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "¡El sensor de sondeo %(mtr)s ha fallado %(cnt)s veces!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "El sondeo %(name)s ha fallado %(cnt)s veces." #, python-format msgid "Pollster for %s is disabled!" msgstr "¡El Pollster para %s está inhabilitado!" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "¡Impedir pollster %(name)s para el origen de sondeo %(source)s ahora!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Se supera la longitud máxima de aplicaciĂłn de publicaciĂłn local_queue, " "descartando los ejemplos más antiguos %d" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "No se conoce la polĂ­tica de publicaciĂłn (%s) forzar para tomar el valor " "predeterminado" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "La API de RGW AdminOps ha devuelto %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "" "La solicitud no ha podido conectar con OpenDaylight con la API REST " "NorthBound" #, python-format msgid "Required field %s not specified" msgstr "Campo necesario %s no especificado" msgid "Resource" msgstr "Recurso" msgid "Sample" msgstr "Muestra" msgid "Samples should be included in request body" msgstr "Los ejemplos se deben incluir en el cuerpo de la solicitud" #, python-format msgid "Skip loading extension for %s" msgstr "Omitir la extensiĂłn de carga para %s" #, python-format msgid "String %s is not a valid isotime" msgstr "La serie %s no es una hora iso válida" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "El archivo Yaml que define la correlaciĂłn entre los ejemplos y recursos/" "mĂ©tricas gnocchi" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "El tipo de datos %(type)s no es compatible. La lista de tipo de datos " "admitido es: %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "El campo 'campos' es obligatorio para %s" msgid "The path for the file publisher is required" msgstr "" "La vĂ­a de acceso para la aplicaciĂłn de publicaciĂłn de archivos es necesaria" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: no se pueden decodificar los datos enviados por %s" msgid "UDP: Unable to store meter" msgstr "UDP: no se puede almacenar el medidor" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "No se ha podido conectar con el servidor de base de datos: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "No se ha podido convertir el valor %(value)s al tipo de datos esperado " "%(type)s." #, python-format msgid "Unable to discover resources: %s" msgstr "No se pueden descubrir recursos: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "No se puede evaluar la expresiĂłn %(expr)s: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "No se puede cargar la aplicaciĂłn de publicaciĂłn %s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "No se puede cargar el inspector de hipervisor: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "No se ha podido volver a conectar con la mongodb primaria despuĂ©s de " "%(retries)d intentos. Se va a abandonar." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "No se ha podido volver a conectar con la mongodb primaria: %(errmsg)s. Se " "volverá a intentar en %(retry_interval)d segundos." msgid "Unable to send sample over UDP" msgstr "No se ha podido enviar una muestra sobre UDP" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "ExcepciĂłn inesperada al convertir %(value)s al tipo de dato esperado " "%(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "ExtensiĂłn de descubrimiento desconocida: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "Tipo de metadatos desconocido. La clave (%s) no se podrá consultar." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en el equilibrador de carga " "%(id)s, se omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en fw %(id)s, se omitirá el " "ejemplo" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en el escucha %(id)s, se " "omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en el miembro %(id)s, se " "omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en la agrupaciĂłn %(id)s, se " "omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en vip %(id)s, se omitirá el " "ejemplo" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en vpn %(id)s, se omitirá el " "ejemplo" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %s no se ha encontrado en VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "No se han encontrado VM %s en XenServer" msgid "Wrong sensor type" msgstr "Tipo de sensor incorrecto" msgid "XenAPI not installed" msgstr "XenAPI no está instalado" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Error de YAML al leer el archivo de definiciones %(file)s" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "no se ha podido obtener tiempo de CPU para %(id)s: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "" "la opciĂłn directo no puede estar definida como true cuando Gnocchi estĂ© " "habilitado." #, python-format msgid "dropping out of time order sample: %s" msgstr "saliendo del ejemplo de orden de tiempo: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "eliminando la muestra sin predecesor: %s" msgid "ipmitool output length mismatch" msgstr "la longitud de salida de ipmitool no coincide" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes y backup_count deben ser nĂşmeros." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "firma de mensaje no válida, descartando mensaje: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "ha fallado el análisis de datos de sensor IPMI,no se ha recuperado ningĂşn " "dato de la entrada" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "ha fallado el análisis de datos de sensor IPMI,tipo de sensor desconocido" msgid "running ipmitool failure" msgstr "fallo de ejecuciĂłn de ipmitool" ceilometer-6.0.0/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-error.po0000664000567000056710000001111612701406223027164 0ustar jenkinsjenkins00000000000000# Eugènia Torrella , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev50\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-19 00:57+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-18 11:52+0000\n" "Last-Translator: Eugènia Torrella \n" "Language-Team: Spanish\n" "Language: es\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "Cannot load inspector %(name)s: %(err)s" msgstr "No se ha podido cargar el inspector %(name)s: %(err)s" #, python-format msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" msgstr "No se ha podido obtener el uso de memoria residente para %(id)s: %(e)s" #, python-format msgid "Dispatcher failed to handle the %s, requeue it." msgstr "El asignador no ha podido manejar el %s, vuelva a ponerlo en la cola." msgid "Error connecting to coordination backend." msgstr "Error de conexiĂłn con el servidor coordinador." msgid "Error getting group membership info from coordination backend." msgstr "" "Error al obtener informaciĂłn de pertenencia a grupos del servidor " "coordinador." #, python-format msgid "Error joining partitioning group %s, re-trying" msgstr "Error al unirse al grupo de particiĂłn %s, se está reintentando" #, python-format msgid "Error loading meter definition : %(err)s" msgstr "Error al cargar la definiciĂłn de medidor : %(err)s" #, python-format msgid "Error processing event and it will be dropped: %s" msgstr "Se ha producido un error al procesar el suceso y se descartará: %s" msgid "Error sending a heartbeat to coordination backend." msgstr "Error al enviar una señal de latido al servidor coordinador." msgid "Fail to process a notification" msgstr "Error al procesar una notificaciĂłn" msgid "Fail to process notification" msgstr "No se ha podido procesar la notificaciĂłn" msgid "Failed to connect to Gnocchi." msgstr "No se ha podido conectar con Gnocchi." #, python-format msgid "Failed to connect to Kafka service: %s" msgstr "No se ha podido conectar con el servicio Kafka: %s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" msgstr "" "No se ha podido establecer conexiĂłn con la base de datos con el propĂłsito " "%(purpose)s. Vuelva a intentarlo más tarde: %(err)s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" msgstr "" "No se ha podido establecer conexiĂłn con la base de datos con el propĂłsito " "%(purpose)s. Vuelva a intentarlo más tarde: %(err)s" #, python-format msgid "Failed to load resource due to error %s" msgstr "No se ha podido cargar el recurso debido a un error: %s" #, python-format msgid "Failed to record event: %s" msgstr "No se ha podido registrar el suceso: %s" #, python-format msgid "Failed to record metering data: %s" msgstr "No se han podido registrar los datos de mediciĂłn: %s" msgid "Failed to retry to send sample data with max_retry times" msgstr "" "No se ha podido volver a intentar enviar datos de ejemplo max_retry veces" msgid "" "Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is " "not part of group and cannot take tasks" msgstr "" "ID de grupo: %{group_id}s, Miembros: %{members}s, Yo: %{me}s: El agente " "actual no forma parte del grupo y no puede coger tareas" #, python-format msgid "Invalid type %s specified" msgstr "Se ha especificado un tipo no válido: %s" #, python-format msgid "Missing field %s" msgstr "Falta el campo %s" msgid "Passed resource dict must contain keys resource_id and resource_url." msgstr "" "El dicionario de recursos que se pase debe contener las claves resource_id y " "resource_url" #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "El campo obligatorio %(field)s s deberĂ­a ser un %(type)s" #, python-format msgid "Required field %s not specified" msgstr "No se ha especificado el campo obligatorio %s" #, python-format msgid "Required fields %s not specified" msgstr "No se han especificado los campos obligatorios %s" #, python-format msgid "Skip invalid resource %s" msgstr "Omitir el recurso no válido %s" msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" msgstr "" "CĂłdigo de estado: %{code}s. No se ha podido asignar el suceso: %{event}s" #, python-format msgid "Unrecognized type value %s" msgstr "Valor de tipo no reconocido %s" #, python-format msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" msgstr "Error en la llamada al inspector del host %(ident)s %(host)s: %(err)s" ceilometer-6.0.0/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-info.po0000664000567000056710000001050312701406223026765 0ustar jenkinsjenkins00000000000000# Eugènia Torrella , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev50\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-19 00:57+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-18 02:45+0000\n" "Last-Translator: Eugènia Torrella \n" "Language-Team: Spanish\n" "Language: es\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "%d events are removed from database" msgstr "Se han eliminado %d sucesos de la base de datos" #, python-format msgid "%d samples removed from database" msgstr "Se han eliminado %d ejemplos de la base de datos" msgid "Configuration:" msgstr "ConfiguraciĂłn:" #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "Se está estableciendo conexiĂłn con %(db)s en %(nodelist)s" msgid "Coordination backend started successfully." msgstr "El servidor coordinador se ha iniciado satisfactoriamente." #, python-format msgid "Definitions: %s" msgstr "Definiciones: %s" msgid "Detected change in pipeline configuration." msgstr "Se ha detectado un cambio en la configuraciĂłn de la interconexiĂłn." #, python-format msgid "Dropping event data with TTL %d" msgstr "Descartando datos de sucesos con TTL %d" #, python-format msgid "Dropping metering data with TTL %d" msgstr "Descartando datos de calibraciĂłn con TTL %d" #, python-format msgid "Duplicate event detected, skipping it: %s" msgstr "Se ha detectado un suceso duplicado, se omitirá: %s" msgid "Expired residual resource and meter definition data" msgstr "El recurso residual y los datos de definiciĂłn del medidor han caducado" #, python-format msgid "Index %s will be recreate." msgstr "Se volverá a crear el Ă­ndice %s." #, python-format msgid "Joined partitioning group %s" msgstr "Se ha unido al grupo de particiĂłn %s" #, python-format msgid "Left partitioning group %s" msgstr "Ha dejado el grupo de particiĂłn %s" #, python-format msgid "No limit value provided, result set will be limited to %(limit)d." msgstr "" "No se ha proporcionado ningĂşn valor lĂ­mite, el conjunto de resultados estará " "limitado a %(limit)d." msgid "Nothing to clean, database event time to live is disabled" msgstr "" "No hay nada que limpiar, el tiempo de vida de sucesos de base de datos está " "inhabilitado" msgid "Nothing to clean, database metering time to live is disabled" msgstr "" "No hay nada que limpiar, el tiempo de vida de mediciĂłn de base de datos está " "inhabilitado" #, python-format msgid "" "Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " "%(param)s" msgstr "" "InterconexiĂłn %(pipeline)s: Configure la instancia de transformador %(name)s " "con el parámetro %(param)s" #, python-format msgid "Pipeline config: %s" msgstr "ConfiguraciĂłn de interconexiĂłn: %s" msgid "Pipeline configuration file has been updated." msgstr "Se ha actualizado el archivo de configuraciĂłn de la interconexiĂłn." #, python-format msgid "Polling pollster %(poll)s in the context of %(src)s" msgstr "Sondeando pollster %(poll)s en el contexto de %(src)s" #, python-format msgid "Publishing policy set to %s" msgstr "PolĂ­tica de publicaciĂłn establecida en %s" msgid "Reconfiguring polling tasks." msgstr "Reconfigurando las tareas de sondeo." msgid "Reloading notification agent and listeners." msgstr "Recargando la notificaciĂłn, el agente y los escuchas." #, python-format msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" msgstr "" "Omitir pollster %(name)s, ningĂşn recurso de %(p_context)s ha encontrado " "este ciclo" #, python-format msgid "Starting server in PID %s" msgstr "Iniciando servidor en PID %s" msgid "detected decoupled pipeline config format" msgstr "" "se ha detectado un formato de configuraciĂłn de interconexiĂłn desacoplado" #, python-format msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" msgstr "" "datos de mediciĂłn %(counter_name)s para %(resource_id)s: %(counter_volume)s" #, python-format msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" msgstr "sirviendo en 0.0.0.0:%(sport)s, vista en http://127.0.0.1:%(vport)s" #, python-format msgid "serving on http://%(host)s:%(port)s" msgstr "sirviendo en http://%(host)s:%(port)s" ceilometer-6.0.0/ceilometer/locale/ja/0000775000567000056710000000000012701406364020772 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/ja/LC_MESSAGES/0000775000567000056710000000000012701406364022557 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/ja/LC_MESSAGES/ceilometer.po0000664000567000056710000004401512701406224025246 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Tomoyuki KATO , 2013 # Akihiro Motoki , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Tsutomu Kimura , 2016. #zanata # 笹原 ćŚçľŽ , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0rc2.dev9\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-28 22:52+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-28 11:41+0000\n" "Last-Translator: 笹原 ćŚçľŽ \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Japanese\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s ăŚč¦‹ă¤ă‹ă‚Šăľă›ă‚“" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "演算変換ă—ă­ă‚°ă©ă ăŻă€ĺĽŹ '%s' ă§ĺ°‘ăŞăŹă¨ă‚‚ 1 ă¤ă®ăˇăĽă‚żăĽă‚’使用ă™ă‚‹ĺż…č¦ăŚă‚り" "ăľă™" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "ă†ăĽă–ă« %(table_name)s ăŻć—˘ă«ĺ­ĺś¨ă™ă‚‹ăźă‚ă€ä˝śćă§ăŤăľă›ă‚“。エă©ăĽă‚’無視ă—ăľ" "ă™" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "%(name)s ă‹ă‚‰ă®ă‚¨ă©ăĽă®ĺľŚă§ç¶ščˇŚă—ăľă™: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "スă¬ăĽă–ă›ă‚ąăă«ćŽĄç¶šă§ăŤăľă›ă‚“ă§ă—ăź: %s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "XenAPI ă«ćŽĄç¶šă§ăŤăľă›ă‚“ă§ă—ăź: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "%(id)s ă® CPU 使用率を取得ă§ăŤăľă›ă‚“ă§ă—ăź: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "%(id)s ă®ăˇă˘ăŞăĽä˝żç”¨é‡Źă‚’取得ă§ăŤăľă›ă‚“ă§ă—ăź: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "VM %s ă®CPU 使用率を取得ă§ăŤăľă›ă‚“ă§ă—ăź" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "イăłă‚ąă‚żăłă‚ą %s ă® IP アă‰ă¬ă‚ąă‚’取得ă§ăŤăľă›ă‚“ă§ă—ăź" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "ă‡ă‚Łă‚ąă‘ăăăŁăĽă‚żăĽă‚˛ăăăŚč¨­ĺ®šă•れă¦ăŠă‚‰ăšă€ăˇăĽă‚żăĽăŻé€šçźĄă•れăľă›ă‚“。" "ceilometer.conf ă•ァイă«ă§ă‚żăĽă‚˛ăăを設定ă—ă¦ăŹă ă•ă„。" #, fuzzy, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "通知 %(type)s を除去ă—ă¦ă„ăľă™ (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "イăłă‚ąă‚żăłă‚ą ă®ć¤śç´˘ä¸­ă« libvirt ă§ă‚¨ă©ăĽăŚç™şç”źă—ăľ" "ă—ăź: [エă©ăĽă‚łăĽă‰ %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "HTTP 応答を解ćžă—ă¦ă„ă‚‹éš›ă«ă‚¨ă©ăĽăŚç™şç”źă—ăľă—ăź: %s" msgid "Error stopping pollster." msgstr "pollster ĺść­˘ă‚¨ă©ăĽă€‚" msgid "Event" msgstr "イă™ăłă" msgid "Expression evaluated to a NaN value!" msgstr "式㌠NaN 値ă«č©•価ă•れăľă—ăźă€‚" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "%(name)s ă®ć‹ˇĺĽµć©źč˝ă®ă‚¤ăłăťăĽăă«ĺ¤±ć•—ă—ăľă—ăź: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "イăłă‚ąă‚żăłă‚ą ă®ă‡ăĽă‚żă‚’検査ă§ăŤăľă›ă‚“ă§ă—ăźă€‚ă‰ăˇ" "イăłçŠ¶ć…‹ăŻ SHUTOFF ă§ă™ă€‚" #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "%(instance_uuid)s ă®ăˇă˘ăŞăĽä˝żç”¨çжćłă‚’検査ă§ăŤăľă›ă‚“ă§ă—ăźă€‚libvirt ă‹ă‚‰ć…ĺ ±" "を取得ă§ăŤăľă›ă‚“: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "イăłă‚ąă‚żăłă‚ą ă®ăˇă˘ăŞăĽä˝żç”¨çжćłă‚’検査ă§ăŤăľă›ă‚“ă§" "ă—ăźă€‚libvirt ă‹ă‚‰ć…報を取得ă§ăŤăľă›ă‚“。" #, python-format msgid "Failed to load any notification handlers for %s" msgstr "%s ă®é€šçźĄăŹăłă‰ă©ăĽă‚’ă­ăĽă‰ă§ăŤăľă›ă‚“ă§ă—ăź" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "タイă ă‚ąă‚żăłă—値 %s ă‚’č§Łćžă§ăŤăľă›ă‚“ă§ă—ăź" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d ă‡ăĽă‚żăťă‚¤ăłăă®ĺ…¬é–‹ă«ĺ¤±ć•—ă—ăľă—ăźă€‚ă“れらăŻĺ»ćŁ„ă•れăľă™" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "%d ă‡ăĽă‚żăťă‚¤ăłăă®ĺ…¬é–‹ă«ĺ¤±ć•—ă—ăľă—ăźă€‚ă“れらをキăĄăĽă«ĺ…Ąă‚Śă¦ăŹă ă•ă„" #, python-format msgid "Failed to record metering data: %s" msgstr "č¨ć¸¬ă‡ăĽă‚żă‚’č¨éڞă§ăŤăľă›ă‚“ă§ă—ăź: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "ă•ィă«ă‚żăĽĺĽŹăŚç„ˇĺŠąă§ă™: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "イăłă‚ąă‚żăłă‚ą %(name)s (%(instance_id)s) を無視ă—ă¦ă„ăľă™: %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "イăłă‚ąă‚żăłă‚ą %(name)s を無視ă—ă¦ă„ăľă™: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "ă­ăĽă‰ăă©ăłă‚µăĽ %(loadbalancer_id)s を無視ă—ă¦ă„ăľă™" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "ă—ăĽă« %(pool_id)s を無視ă—ă¦ă„ăľă™" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "%(line)s 行目㮠%(column)s ĺ—ă§ĺ®šçľ©ă•ァイ㫠%(file)s ă® YAML ć§‹ć–‡ ăŚç„ˇĺŠąă§" "ă™ă€‚" #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "無効ăŞćśźé–“ %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "特性 %(trait)s ă®ç‰ąć€§ă‚żă‚¤ă— '%(type)s' ăŚç„ˇĺŠąă§ă™" msgid "Limit must be positive" msgstr "上é™ăŻć­Łă®ĺ€¤ă§ăŞă‘れă°ăŞă‚Šăľă›ă‚“" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "スăă¬ăĽă‚¸ă‰ă©ă‚¤ăăĽă‹ă‚‰ id %s ă®ă‚¤ă™ăłăăŚč¤‡ć•°čż”ă•れăľă—ăź" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "複数㮠VM %s ㌠XenServer ă«č¦‹ă¤ă‹ă‚Šăľă—ăź" msgid "Must specify connection_url, and connection_password to use" msgstr "" "connection_url ă¨ă€ä˝żç”¨ă™ă‚‹ connection_password を指定ă™ă‚‹ĺż…č¦ăŚă‚りăľă™" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "%(name)s ă«ä˝żç”¨ă§ăŤă‚‹ %(plugin)s ă¨ă„ă†ĺŤĺ‰Ťă®ă—ă©ă‚°ă‚¤ăłăŚă‚りăľă›ă‚“" msgid "Node Manager init failed" msgstr "ăŽăĽă‰ăžăŤăĽă‚¸ăŁăĽă®ĺťćśźĺŚ–ă«ĺ¤±ć•—ă—ăľă—ăź" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "%(aspect)s %(id)s ă«ă‚˘ă‚Żă‚»ă‚ąă™ă‚‹ć¨©é™ăŚă‚りăľă›ă‚“" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "OpenDaylitght API ă‹ă‚‰ %(status)s %(reason)s ăŚčż”ă•れăľă—ăź" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail API ă‹ă‚‰ %(status)s %(reason)s ăŚčż”ă•れăľă—ăź" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "ćĽ”ç®—ĺ­ %(operator)s ăŻă‚µăťăĽăă•れă¦ă„ăľă›ă‚“。ă•ィăĽă«ă‰ %(field)s ă§ä˝żç”¨ă§ăŤ" "ă‚‹ă®ăŻç­‰äľˇćĽ”ç®—ĺ­ă®ăżă§ă™ă€‚" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "ćĽ”ç®—ĺ­ %(operator)s ăŻă‚µăťăĽăă•れă¦ă„ăľă›ă‚“。サăťăĽăă•れă¦ă„る演算ĺ­ăŻ " "%(supported)s ă§ă™ă€‚" #, python-format msgid "Order-by expression not valid: %s" msgstr "order-by 式ăŚç„ˇĺŠąă§ă™: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "%(name)s ă«é–˘ă™ă‚‹ JSONPath ă®ćŚ‡ĺ®š '%(jsonpath)s' ă®ă‚¨ă©ăĽă‚’č§Łćžă—ăľă™: " "%(err)s" msgid "Period must be positive." msgstr "ćśźé–“ăŻć­Łă®ć•°ă§ăŞă‘れă°ăŞă‚Šăľă›ă‚“。" #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "" "ă‘イă—ă©ă‚¤ăł %(pipeline)s: ă‘ă–ăŞăă‚·ăŁăĽ %(pub)s ă‹ă‚‰ă®ă‚¨ă©ăĽă®ç™şç”źĺľŚă® " "%(status)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "" "ă‘イă—ă©ă‚¤ăł %(pipeline)s: ă‘ă–ăŞăă‚·ăŁăĽ %(pub)s ă‹ă‚‰ă®ă‚¨ă©ăĽă®ĺľŚă§ç¶ščˇŚă—ăľ" "ă™" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "" "ă‘イă—ă©ă‚¤ăł %(pipeline)s: 変換ă—ă­ă‚°ă©ă  %(trans)s ă‚’ă•ă©ăă‚·ăĄă™ă‚‹ă¨ăŤă«ă‚¨" "ă©ăĽăŚç™şç”źă—ăľă—ăź" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "ă‘イă—ă©ă‚¤ăł %(pipeline)s: %(smp)s ă«ă¤ă„ă¦ĺ¤‰ćŹ›ă—ă­ă‚°ă©ă  %(trans)s ă‹ă‚‰ă‚¨" "ă©ăĽăŚç™şç”źă—ăźĺľŚă«çµ‚了ă—ăľă™" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "ă—ă©ă‚°ă‚¤ăłăŚćŚ‡ĺ®šă•れă¦ă„ăľă™ăŚă€%s ă«ă—ă©ă‚°ă‚¤ăłĺŤăŚćŹäľ›ă•れă¦ă„ăľă›ă‚“" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "ă‚»ăłă‚µăĽ %(mtr)s ă®ăťăĽăŞăłă‚°ăŚ %(cnt)s 回失敗ă—ăľă—ăź" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "ăťăĽăŞăłă‚° %(name)s ㌠%(cnt)s 回失敗ă—ăľă—ăź" #, python-format msgid "Pollster for %s is disabled!" msgstr "%s ă® pollster ăŚç„ˇĺŠąă«ăŞăŁă¦ă„ăľă™" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "pollster %(name)s ăŚă“れ以上ソăĽă‚ą %(source)s ă‚’ăťăĽăŞăłă‚°ă—ăŞă„ă‚ă†ă«ă—ă¦ăŹ" "ă ă•ă„" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "ă‘ă–ăŞăă‚·ăŁăĽ local_queue 最大長を超ăăľă—ăźă€‚古ă„ć–ąă‹ă‚‰ %d 個ă®ă‚µăłă—ă«ă‚’除" "去ă—ăľă™" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "ă‘ă–ăŞăă‚·ăĄăťăŞă‚·ăĽăŚä¸ŤćŽă§ă™ (%s)。強ĺ¶çš„ă«ă‡ă•ă‚©ă«ăă«č¨­ĺ®šă•れăľă™" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API ă‹ă‚‰ %(status)s %(reason)s ăŚčż”ă•れăľă—ăź" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "NorthBound REST API を使用ă—ăź OpenDaylight ă¸ă®ćŽĄç¶šč¦ć±‚ăŚĺ¤±ć•—ă—ăľă—ăź" #, python-format msgid "Required field %s not specified" msgstr "ĺż…é ă•ィăĽă«ă‰ %s ăŚćŚ‡ĺ®šă•れă¦ă„ăľă›ă‚“" msgid "Resource" msgstr "ăŞă‚˝ăĽă‚ą" msgid "Sample" msgstr "サăłă—ă«" msgid "Samples should be included in request body" msgstr "サăłă—ă«ăŻč¦ć±‚本文ă«ĺ«ăľă‚Śă‚‹ĺż…č¦ăŚă‚りăľă™" #, python-format msgid "Skip loading extension for %s" msgstr "%s ă®ć‹ˇĺĽµć©źč˝ă®ă­ăĽă‰ă‚’スキăă—ă—ăľă™" #, python-format msgid "String %s is not a valid isotime" msgstr "ć–‡ĺ­—ĺ— %s ăŻç„ˇĺŠąăŞ isotime ă§ă™" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "サăłă—ă«ă¨ gnocchi ă®ăŞă‚˝ăĽă‚ą/ăˇăăŞă‚Żă‚ąé–“ă®ăžăă”ăłă‚°ă‚’定義ă™ă‚‹ Yaml ă•ァイ" "ă«" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "ă‡ăĽă‚żĺž‹ %(type)s ăŻă‚µăťăĽăă•れă¦ă„ăľă›ă‚“。サăťăĽăă•れă¦ă„ă‚‹ă‡ăĽă‚żĺž‹ă®ăŞă‚ą" "ă: %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s ă«ăŻă•ィăĽă«ă‰ 'fields' ăŚĺż…č¦ă§ă™" msgid "The path for the file publisher is required" msgstr "ă•ァイă«ă‘ă–ăŞăă‚·ăŁăĽă®ă‘スăŚĺż…č¦ă§ă™" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: %s ă‹ă‚‰é€äżˇă•れăźă‡ăĽă‚żă‚’ă‡ă‚łăĽă‰ă§ăŤăľă›ă‚“" msgid "UDP: Unable to store meter" msgstr "UDP: ăˇăĽă‚żăĽă‚’äżťĺ­ă§ăŤăľă›ă‚“" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "ă‡ăĽă‚żă™ăĽă‚ąă‚µăĽăăĽă«ćŽĄç¶šă§ăŤăľă›ă‚“: %(errmsg)s。" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "値 %(value)s ă‚’ă€ćłĺ®šă•れるă‡ăĽă‚żĺž‹ %(type)s ă«ĺ¤‰ćŹ›ă§ăŤăľă›ă‚“。" #, python-format msgid "Unable to discover resources: %s" msgstr "ăŞă‚˝ăĽă‚ąă‚’検出ă§ăŤăľă›ă‚“: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "式 %(expr)s を評価ă§ăŤăľă›ă‚“: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "ă‘ă–ăŞăă‚·ăŁăĽ %s ă‚’ă­ăĽă‰ă§ăŤăľă›ă‚“" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "ăŹă‚¤ă‘ăĽăイザăĽă‚¤ăłă‚ąăšă‚Żă‚żăĽă‚’ă­ăĽă‰ă§ăŤăľă›ă‚“: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "%(retries)d 回ă®ĺ†Ťč©¦čˇŚĺľŚă€1 次 mongodb ă«ĺ†ŤćŽĄç¶šă§ăŤăľă›ă‚“。中止ă—ăľă™ă€‚" #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "ă—ă©ă‚¤ăžăŞăĽ mongodb ă«ĺ†ŤćŽĄç¶šă§ăŤăľă›ă‚“: %(errmsg)s。%(retry_interval)d 秒以" "内ă«ĺ†Ťč©¦čˇŚă—ăľă™ă€‚" msgid "Unable to send sample over UDP" msgstr "UDP 経由ă§ă‚µăłă—ă«ă‚’é€äżˇă§ăŤăľă›ă‚“" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "%(value)s ă‚’ćłĺ®šă•れるă‡ăĽă‚żĺž‹ %(type)s ă«ĺ¤‰ćŹ›ă™ă‚‹éš›ă«ă€ćłĺ®šă—ăŞă„例外ăŚç™şç”ź" "ă—ăľă—ăźă€‚" #, python-format msgid "Unknown discovery extension: %s" msgstr "不ćŽăŞă‡ă‚Łă‚ąă‚«ăăŞăĽă‚¨ă‚Żă‚ąă†ăłă‚·ă§ăł: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "不ćŽăŞăˇă‚żă‡ăĽă‚żç¨®ĺĄă§ă™ă€‚キ㼠(%s) ăŻç…§äĽšä¸ŤĺŹŻă«ăŞă‚Šăľă™ă€‚" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "ă­ăĽă‰ăă©ăłă‚µăĽ %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’ス" "ă‚­ăă—ă—ăľă™" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "ă•ァイアウォăĽă« %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’ス" "ă‚­ăă—ă—ăľă™" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "ăŞă‚ąăŠăĽ %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’スキăă—ă—ăľ" "ă™" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "ăˇăłă㼠%(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’スキăă—ă—ăľ" "ă™" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "ă—ăĽă« %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’スキăă—ă—ăľă™" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "ä»®ćł IP %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’スキăă—ă—ăľă™" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "vpn %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’スキăă—ă—ăľă™" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VMware vSphere ă§ VM %s ăŚč¦‹ă¤ă‹ă‚Šăľă›ă‚“" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s ㌠XenServer ă«č¦‹ă¤ă‹ă‚Šăľă›ă‚“" msgid "Wrong sensor type" msgstr "ă‚»ăłă‚µăĽç¨®ĺĄăŚć­Łă—ăŹă‚りăľă›ă‚“" msgid "XenAPI not installed" msgstr "XenAPI ăŚă‚¤ăłă‚ąăăĽă«ă•れă¦ă„ăľă›ă‚“" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "定義ă•ァイ㫠%(file)s ă§ă®čŞ­ăżĺŹ–ă‚Šă® YAML エă©ăĽ" msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." msgstr "" "Aodh ăŚç„ˇĺŠąĺŚ–ă•れるă‹ä˝żç”¨ä¸ŤĺŹŻă®ĺ ´ĺă€URL ăŚä˝żç”¨ă§ăŤăŞă„ă“ă¨ă‚’警告ă—ăľă™ă€‚" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "%(id)s ă® CPU 時間を取得ă§ăŤăľă›ă‚“ă§ă—ăź: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "" "Gnocchi を有効化ă—ăźĺ ´ĺăŻă€direct オă—ă‚·ă§ăłă‚’ True ă«č¨­ĺ®šă™ă‚‹ă“ă¨ăŻă§ăŤăľă›" "ん。" #, python-format msgid "dropping out of time order sample: %s" msgstr "ćśźé™ĺ‡ă‚Śă®ă‚ŞăĽă€ăĽă‚µăłă—ă«ă‚’ĺ»ćŁ„ă—ă¦ă„ăľă™: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "サăłă—ă« (ĺ…行ăŞă—) ă‚’ĺ»ćŁ„ă—ă¦ă„ăľă™: %s" msgid "ipmitool output length mismatch" msgstr "ipmitool 出力ă®é•·ă•ăŚä¸€č‡´ă—ăľă›ă‚“" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes 㨠backup_count ăŻć•°ĺ€¤ă§ăŞă‘れă°ăŞă‚Šăľă›ă‚“。" #, python-format msgid "message signature invalid, discarding message: %r" msgstr "ăˇăă‚»ăĽă‚¸ă‚·ă‚°ă‹ăăŁăĽăŚç„ˇĺŠąă§ă™ă€‚ăˇăă‚»ăĽă‚¸ă‚’破棄ă—ăľă™: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "IPMI ă‚»ăłă‚µăĽă‡ăĽă‚żă®č§Łćžă«ĺ¤±ć•—ă—ăľă—ăźă€‚指定ă•れăźĺ…ĄĺŠ›ă‹ă‚‰ă‡ăĽă‚żăŚĺŹ–ĺľ—ă•れăľ" "ă›ă‚“ă§ă—ăź" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "IPMI ă‚»ăłă‚µăĽă‡ăĽă‚żă®č§Łćžă«ĺ¤±ć•—ă—ăľă—ăźă€‚不ćŽăŞă‚»ăłă‚µăĽç¨®ĺĄă§ă™ă€‚" msgid "running ipmitool failure" msgstr "ipmitool ă®ĺ®źčˇŚă«ĺ¤±ć•—ă—ăľă—ăź" ceilometer-6.0.0/ceilometer/locale/ceilometer-log-error.pot0000664000567000056710000001115412701406223025156 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-07 06:08+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: ceilometer/collector.py:176 #, python-format msgid "Dispatcher failed to handle the %s, requeue it." msgstr "" #: ceilometer/coordination.py:50 msgid "" "Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent " "is not part of group and cannot take tasks" msgstr "" #: ceilometer/coordination.py:88 ceilometer/coordination.py:100 msgid "Error connecting to coordination backend." msgstr "" #: ceilometer/coordination.py:115 msgid "Error sending a heartbeat to coordination backend." msgstr "" #: ceilometer/coordination.py:146 #, python-format msgid "Error joining partitioning group %s, re-trying" msgstr "" #: ceilometer/coordination.py:198 msgid "Error getting group membership info from coordination backend." msgstr "" #: ceilometer/service_base.py:118 #, python-format msgid "Unable to load changed pipeline: %s" msgstr "" #: ceilometer/service_base.py:140 #, python-format msgid "Unable to load changed event pipeline: %s" msgstr "" #: ceilometer/agent/manager.py:467 #, python-format msgid "Skipping %(name)s, keystone issue: %(exc)s" msgstr "" #: ceilometer/agent/plugin_base.py:109 msgid "Fail to process notification" msgstr "" #: ceilometer/api/hooks.py:65 #, python-format msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" msgstr "" #: ceilometer/compute/pollsters/memory.py:108 #, python-format msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" msgstr "" #: ceilometer/dispatcher/database.py:54 #, python-format msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" msgstr "" #: ceilometer/dispatcher/database.py:97 #, python-format msgid "Failed to record metering data: %s" msgstr "" #: ceilometer/dispatcher/database.py:128 #, python-format msgid "Error processing event and it will be dropped: %s" msgstr "" #: ceilometer/dispatcher/gnocchi.py:102 #, python-format msgid "Required field %s not specified" msgstr "" #: ceilometer/dispatcher/gnocchi.py:105 #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "" #: ceilometer/dispatcher/gnocchi.py:234 msgid "Failed to connect to Gnocchi." msgstr "" #: ceilometer/dispatcher/gnocchi.py:250 #, python-format msgid "Failed to load resource due to error %s" msgstr "" #: ceilometer/dispatcher/http.py:126 msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" msgstr "" #: ceilometer/event/endpoint.py:69 msgid "Fail to process a notification" msgstr "" #: ceilometer/event/storage/impl_elasticsearch.py:110 #: ceilometer/event/storage/impl_hbase.py:121 #: ceilometer/event/storage/impl_sqlalchemy.py:203 #: ceilometer/event/storage/impl_sqlalchemy.py:205 #: ceilometer/event/storage/pymongo_base.py:69 #, python-format msgid "Failed to record event: %s" msgstr "" #: ceilometer/hardware/pollsters/generic.py:69 #, python-format msgid "Missing field %s" msgstr "" #: ceilometer/hardware/pollsters/generic.py:72 #, python-format msgid "Unrecognized type value %s" msgstr "" #: ceilometer/hardware/pollsters/generic.py:107 msgid "Passed resource dict must contain keys resource_id and resource_url." msgstr "" #: ceilometer/hardware/pollsters/generic.py:126 #, python-format msgid "Cannot load inspector %(name)s: %(err)s" msgstr "" #: ceilometer/hardware/pollsters/generic.py:149 #, python-format msgid "Skip invalid resource %s" msgstr "" #: ceilometer/hardware/pollsters/generic.py:177 #, python-format msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" msgstr "" #: ceilometer/hardware/pollsters/generic.py:227 #: ceilometer/meter/notifications.py:197 #, python-format msgid "Error loading meter definition : %(err)s" msgstr "" #: ceilometer/meter/notifications.py:65 #, python-format msgid "Required fields %s not specified" msgstr "" #: ceilometer/meter/notifications.py:74 #, python-format msgid "Invalid type %s specified" msgstr "" #: ceilometer/publisher/kafka_broker.py:83 #, python-format msgid "Failed to connect to Kafka service: %s" msgstr "" #: ceilometer/publisher/messaging.py:168 msgid "Failed to retry to send sample data with max_retry times" msgstr "" ceilometer-6.0.0/ceilometer/locale/pt_BR/0000775000567000056710000000000012701406364021406 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/pt_BR/LC_MESSAGES/0000775000567000056710000000000012701406364023173 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po0000664000567000056710000003767212701406224025675 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Gabriel Wainer, 2013 # Gabriel Wainer, 2013 # Andreas Jaeger , 2015. #zanata # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Carlos Marques , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0rc2.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 18:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-22 11:05+0000\n" "Last-Translator: Carlos Marques \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s NĂŁo Encontrada" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "O transformador aritmĂ©tico deve usar pelo menos um medidor na expressĂŁo '%s'" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "NĂŁo Ă© possĂ­vel criar a tabela %(table_name)s; ela já existe. Ignorando erro" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Continuar apĂłs erro de %(name)s: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "NĂŁo foi possĂ­vel conectar-se ao host escravo: %s" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "NĂŁo foi possĂ­vel conectar-se ao XenAPI: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "NĂŁo foi possĂ­vel obter Uso de CPU para %(id)s: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "NĂŁo foi possĂ­vel obter de Uso de MemĂłria para %(id)s: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "NĂŁo foi possĂ­vel obter a utilização de CPU da máquina virtual %s" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "NĂŁo foi possĂ­vel obter o endereço IP da instância %s" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "O destino do despachante nĂŁo foi configurado, nenhum medidor será postado. " "Defina o destino no arquivo ceilometer.conf" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Descartando Notificação %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Erro de libvirt ao consultar instância : [CĂłdigo " "de Erro %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Erro ao analisar a resposta de HTTP: %s" msgid "Error stopping pollster." msgstr "Erro ao parar pesquisador. " msgid "Event" msgstr "Evento" msgid "Expression evaluated to a NaN value!" msgstr "ExpressĂŁo avaliada para um valor NaN!" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "Falha ao importar extensĂŁo para %(name)s: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Falha ao inspecionar os dados da instância , " "estado do domĂ­nio Ă© SHUTOFF." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "Falha ao inspecionar o uso da memĂłria de %(instance_uuid)s, nĂŁo Ă© possĂ­vel " "obter informações a partir de libvirt: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "Falha ao inspecionar o uso da memĂłria da instância , nĂŁo Ă© possĂ­vel obter informações a partir de libvirt." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "Falha ao carregar qualquer manipulador de notificações para %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Falha ao analisar o valor do registro de data e hora %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Falha ao publicar %d pontos de dados, descartando-os" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Falha ao publicar %d pontos de dados, enfileire-os" #, python-format msgid "Failed to record metering data: %s" msgstr "Falha ao gravar dados de medição: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "ExpressĂŁo de filtro inválida: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "Ignorando a instância %(name)s (%(instance_id)s): %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "Ignorando a instância %(name)s: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "Ignorando loadbalancer %(loadbalancer_id)s" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "Ignorando conjunto%(pool_id)s" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintaxe YAML inválida no arquivo de definições %(file)s na linha: %(line)s, " "coluna: %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "PerĂ­odo inválido %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Tipo de traço inválido '%(type)s' para traço %(trait)s" msgid "Limit must be positive" msgstr "Limite deve ser positivo" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "" "Mais de um evento com o ID %s retornado a partir do driver de armazenamento" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Várias máquinas virtuais %s localizadas no XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "connection_url e connection_password devem ser especificados para uso" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Nenhum plug-in nomeado %(plugin)s disponĂ­vel para %(name)s" msgid "Node Manager init failed" msgstr "Inicialização do gerenciador de nĂł com falha" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "NĂŁo Autorizado a acessar %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "API OpenDaylitght retornou %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "A API Opencontrail retornou%(status)s%(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "O operador %(operator)s nĂŁo Ă© suportado. Somente operador de igualdade está " "disponĂ­vel para o campo %(field)s" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "O operador %(operator)s nĂŁo Ă© suportado. Os operadores suportados sĂŁo: " "%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "ExpressĂŁo solicitada inválida: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Erro de análise na especificação JSONPath '%(jsonpath)s' para %(name)s: " "%(err)s" msgid "Period must be positive." msgstr "PerĂ­odo deve ser positivo." #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: %(status)s apĂłs erro do publicador %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: Continue apĂłs erro do publicador %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "Pipeline %(pipeline)s: Erro ao limpar transformador %(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "Pipeline %(pipeline)s: Saia apĂłs erro do transformador %(trans)s para %(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in especificado, mas nenhum nome de plug-in fornecido para %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "O sensor de pesquisa %(mtr)s falhou para %(cnt)s vezes!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "A pesquisa %(name)s falhou para %(cnt)s vezes!" #, python-format msgid "Pollster for %s is disabled!" msgstr "O pesquisador para %s está desativado!" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "Evite o pesquisador %(name)s para a origem de pesquisa %(source)s atualmente!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Comprimento máximo de local_queue do publicador foi excedido, descartando %d " "amostras antigas" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "Publicando polĂ­tica desconhecida (%s) força para o padrĂŁo" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "A API AdminOps RGW retornou %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "O pedido falhou ao conectar-se ao OpenDaylight com API REST NorthBound" #, python-format msgid "Required field %s not specified" msgstr "Campo obrigatĂłrio %s nĂŁo especificado" msgid "Resource" msgstr "Recurso" msgid "Sample" msgstr "Amostra" msgid "Samples should be included in request body" msgstr "As amostras devem ser incluĂ­das no corpo da solicitação" #, python-format msgid "Skip loading extension for %s" msgstr "Ignorar a extensĂŁo de carregamento para %s" #, python-format msgid "String %s is not a valid isotime" msgstr "SequĂŞncia %s nĂŁo Ă© um isotime válido" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "O arquivo Yaml que define o mapeamento entre amostras e recursos gnocchi/" "mĂ©trica" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "O tipo de dados %(type)s nĂŁo Ă© suportado. A lista de tipos de dados " "suportados Ă©: %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "O campo 'fields' Ă© necessário para %s" msgid "The path for the file publisher is required" msgstr "O caminho para o publicador do arquivo Ă© necessário" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: NĂŁo pode decodificar dados enviados por %s" msgid "UDP: Unable to store meter" msgstr "UDP: NĂŁo Ă© possĂ­vel armazenar medida" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "NĂŁo Ă© possĂ­vel conectar-se ao servidor de banco de dados: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "NĂŁo Ă© possĂ­vel converter o valor %(value)s para o tipo de dados esperado " "%(type)s." #, python-format msgid "Unable to discover resources: %s" msgstr "NĂŁo Ă© possĂ­vel descobrir recursos: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "NĂŁo Ă© possĂ­vel avaliar expressĂŁo %(expr)s:%(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "ImpossĂ­vel carregar publicador %s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "ImpossĂ­vel carregar o inspetor do hypervisor: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "NĂŁo Ă© possĂ­vel se reconectar ao mongodb primário apĂłs %(retries)d novas " "tentativas. Desistindo." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "NĂŁo Ă© possĂ­vel se reconectar ao mongodb primário: %(errmsg)s. Tentando " "novamente em %(retry_interval)d segundos." msgid "Unable to send sample over UDP" msgstr "NĂŁo Ă© possĂ­vel enviar amostra sobre UDP" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Exceção inesperada convertendo %(value)s para o tipo de dado esperado " "%(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "ExtensĂŁo de descoberta desconhecida: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "Tipo de metada desconhecido. Chave (%s) nĂŁo será consultável." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no Balanceador de Carga %(id)s, " "ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Status desconhecido %(stat)s recebido na largura da fonte %(id)s, ignorando " "a amostra" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no listener %(id)s, ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no membro %(id)s, ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no conjunto %(id)s, ignorando amostras" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido em vip %(id)s, ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido recebido no vpn %(id)s, ignorando a " "amostra" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %s nĂŁo localizado no VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "Máquina virtual %s nĂŁo localizada no XenServer" msgid "Wrong sensor type" msgstr "Tipo de sensor errado" msgid "XenAPI not installed" msgstr "XenAPI nĂŁo instalado" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Erro YAML ao ler o arquivo de definições %(file)s" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "nĂŁo pĂ´de obter o tempo de CPU para %(id)s: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "A opção direta nĂŁo pode ser true quando o Gnocchi está ativado. " #, python-format msgid "dropping out of time order sample: %s" msgstr "eliminando amostra fora de ordem de tempo: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "descartando amostra sem predecessor: %s" msgid "ipmitool output length mismatch" msgstr "incompatibilidade no comprimento da saĂ­da de ipmitool" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes e backup_count devem ser nĂşmeros." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "assinatura de mensagem inválida, descartando mensagem: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "análise dos dados do sensor IPMI com falha, nenhum dado recuperado da " "entrada fornecida" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "análise dos dados do sensor IPMI com falha,tipo de sensor desconhecido" msgid "running ipmitool failure" msgstr "executando falha de ipmitool" ceilometer-6.0.0/ceilometer/locale/it/0000775000567000056710000000000012701406364021014 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/it/LC_MESSAGES/0000775000567000056710000000000012701406364022601 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/it/LC_MESSAGES/ceilometer.po0000664000567000056710000003775412701406224025304 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Stefano Maffulli , 2013 # Tom Cocozzello , 2015. #zanata # Alessandra , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0rc2.dev1\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-20 20:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-19 05:24+0000\n" "Last-Translator: Alessandra \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Italian\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s non trovato" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "Il trasformatore aritmetico deve utilizzare almeno un contatore " "nell'espressione '%s'" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "Impossibile creare la tabella %(table_name)s la tabella giĂ  esiste. " "Ignorare l'errore" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Continua dopo errore da %(name)s: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "Impossibile connettersi all'host slave: %s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Impossibile connettersi a XenAPI: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "Impossibile ricevere CPU Util per %(id)s: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "Impossibile ricevere l'Uso della Memoria per %(id)s: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "Impossibile conoscere l'utilizzo CPU della VM %s" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "Impossibile ottenere l'indirizzo IP dell'istanza %s" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "La destinazione del dispatcher non è stata impostata, nessun contatore verrĂ  " "inviato. Impostare la destinazione nel file ceilometer.conf" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Eliminazione della notifica %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Errore da libvirt durante la ricerca dell'istanza : [Codice di errore %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Errore durante l'analisi della risposta HTTP: %s" msgid "Error stopping pollster." msgstr "Errore durante l'arresto del sondaggio. " msgid "Event" msgstr "Evento" msgid "Expression evaluated to a NaN value!" msgstr "Espressione valutata a un valore NaN!" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "Impossibile importare l'estensione per %(name)s: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Impossibile ispezionare i dati dell'istanza = , " "stato dominio SHUTOFF." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "Impossibile ispezionare l'utilizzo della memoria da parte di " "%(instance_uuid)s, impossibile ottenere informazioni da libvirt: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "Impossibile ispezionare l'utilizzo della memoria da parte dell'istanza = " ", impossibile ottenere informazioni da libvirt." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "Impossibile caricare eventuali gestori di notifica per %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Impossibile analizzare il valore data/ora %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Impossibile pubblicare %d datapoint, eliminati" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Impossibile pubblicare %d datapoint, inseriti in coda" #, python-format msgid "Failed to record metering data: %s" msgstr "Impossibile registrare i dati di misurazione: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Espressione del filtro non valida: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "L'istanza %(name)s (%(instance_id)s) viene ignorata: %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "Si sta ignorando l'istanza %(name)s: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "Ignora loadbalancer %(loadbalancer_id)s" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "Ignora pool %(pool_id)s" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintassi YAML non valida nel file delle definizioni %(file)s alla riga: " "%(line)s, colonna: %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "Periodo non valido %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "" "Tipo di caratteristica non valido '%(type)s' per la caratteristica %(trait)s" msgid "Limit must be positive" msgstr "Il limite deve essere un positivo" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "PiĂą di un evento con id %s restituito dal driver di archiviazione" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "PiĂą VM %s trovate in XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "" "Ă necessario specificare connection_url e connection_password da utilizzare" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Nessun plug-in con nome %(plugin)s disponibile per %(name)s" msgid "Node Manager init failed" msgstr "Inizializzazione gestore nodi non riuscita" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Non autorizzato ad accedere %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "L'API OpenDaylitght ha restituito %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "L'API Opencontrail ha restituito %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "Operatore %(operator)s non è supportato. Solo gli operatori di uguaglianza " "sono disponibili per il campo %(field)s" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "Operatore %(operator)s non è supportato. Gli operatori supportati sono: " "%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "L'espressione ordina per non è valida: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Errore di analisi nella specifica JSONPath '%(jsonpath)s' per %(name)s: " "%(err)s" msgid "Period must be positive." msgstr "Il periodo deve essere positivo" #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: %(status)s dopo errore da publisher %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: Continuare dopo errore da publisher %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "" "Pipeline %(pipeline)s: errore durante lo scaricamento del trasformatore " "%(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "Pipeline %(pipeline)s: Uscita dopo errore del trasformatore %(trans)s per " "%(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in specificato, ma nessun nome di plug-in fornito per %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "Polling del sensore %(mtr)s non riuscito per %(cnt)s volte!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Polling di %(name)s non riuscito per %(cnt)s volte!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Pollster per %s disabilitato!" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "Impedire al pollster %(name)s di eseguire il polling dell'origine %(source)s." #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "La lunghezza local_queue massima del publisher è stata superata, " "eliminazione di esempi %d meno recenti" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "La politica di pubblicazione è sconosciuta (%s), applicazione del valore " "predefinito" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "L'API RGW AdminOps ha restituito %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "" "Richiesta di collegamento a OpenDaylight con API NorthBound REST non riuscita" #, python-format msgid "Required field %s not specified" msgstr "Campo richiesto %s non specificato" msgid "Resource" msgstr "Risorsa" msgid "Sample" msgstr "Esempio" msgid "Samples should be included in request body" msgstr "I campioni devono essere inclusi nel corpo della richiesta " #, python-format msgid "Skip loading extension for %s" msgstr "Ignora caricamento dell'estensione per %s" #, python-format msgid "String %s is not a valid isotime" msgstr "La stringa %s non è un orario standard (isotime) valido" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "Il file Yaml che definisce l'associazione tra i campioni e le risorse " "gnocchi/metriche" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Il tipo di dati %(type)s non è supportato. L'elenco dei tipi di dati " "supportati è: %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "Il campo 'fields' è obbligatorio per %s" msgid "The path for the file publisher is required" msgstr "Il percorso per il publisher di file è obbligatorio" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: impossibile decodificare i dati inviati da %s" msgid "UDP: Unable to store meter" msgstr "UDP: impossibile memorizzare il contatore" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "Impossibile connettersi al server di database: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Impossibile convertire il valore %(value)s nel tipo di dati previsto " "%(type)s." #, python-format msgid "Unable to discover resources: %s" msgstr "Impossibile rilevare le risorse: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "Impossibile valutare l'espressione %(expr)s: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "Impossibile caricare il publisher %s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "Impossibile caricare il programma di controllo hypervisor: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "Impossibile riconnettersi al mongodb primario dopo %(retries)d tentativi. " "L'operazione viene interrotta." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "Impossibile connettersi al mongodb primario: %(errmsg)s. Prossimo tentativo " "tra %(retry_interval)d secondi." msgid "Unable to send sample over UDP" msgstr "Impossibile inviare l'esempio su UDP" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Eccezione non prevista durante la conversione di %(value)s per il tipo di " "dati previsto %(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "Estensione di rilevamento sconosciuta: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "" "Tipo di metadati sconosciuto. La chiave (%s) non potrĂ  essere sottoposta a " "query." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su bilanciatore del carico %(id)s, " "ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "Stato non conosciuto %(stat)s ricevuto su fw %(id)s,ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su listener %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su membro %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto sul pool %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su vip %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su vpn %(id)s, ignorare l'esempio" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %s non trovata in VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s non trovata in XenServer" msgid "Wrong sensor type" msgstr "Tipo di sensore errato" msgid "XenAPI not installed" msgstr "XenAPI non installato" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Errore YAML durante la lettura del file definizioni %(file)s" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "impossibile ricevere l'ora CPU per %(id)s: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "L'opzione direct non può essere true quando Gnocchi è abilitato." #, python-format msgid "dropping out of time order sample: %s" msgstr "rilascio campione ordinamento fuori tempo: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "eliminazione in corso dell'esempio senza predecessore: %s" msgid "ipmitool output length mismatch" msgstr "mancata corrispondenza della lunghezza dell'output ipmitool" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes e backup_count devono essere numeri." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "Firma messaggio non valida, eliminazione del messaggio: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "analisi dei dati del sensore IPMI non riuscita, nessun dato recuperato " "dall'input fornito" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "analisi dei dati del sensore IPMI non riuscita, tipo di sensore sconosciuto" msgid "running ipmitool failure" msgstr "errore nell'esecuzione ipmitool" ceilometer-6.0.0/ceilometer/locale/de/0000775000567000056710000000000012701406364020770 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/de/LC_MESSAGES/0000775000567000056710000000000012701406364022555 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-warning.po0000664000567000056710000001132712701406223027465 0ustar jenkinsjenkins00000000000000# Monika Wolf , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev34\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-09 20:26+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-04 10:08+0000\n" "Last-Translator: Monika Wolf \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, fuzzy msgid "Can't connect to keystone, assuming aodh is disabled and retry later." msgstr "" "Herstellen einer Verbindung zu Keystone nicht möglich. Aodh ist " "möglicherweise inaktiviert. Operation wird später wiederholt." #, fuzzy msgid "Can't connect to keystone, assuming gnocchi is disabled and retry later" msgstr "" "Herstellen einer Verbindung zu Keystone nicht möglich. Gnocchi ist " "möglicherweise inaktiviert. Operation wird päter wiederholt." msgid "" "Cannot extract tasks because agent failed to join group properly. Rejoining " "group." msgstr "" "Extrahieren der Tasks nicht möglich, da der Agent nicht ordnungsgemäß in die " "Gruppe eingebunden werden konnte. Operation zum Wiedereinbinden in die " "Gruppe wird durchgefĂĽhrt." #, python-format msgid "" "Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal reason: " "%(exc)s" msgstr "" "Die %(pollster)s-Daten fĂĽr %(instance_id)s können nicht untersucht werden. " "Behebbare Ursache: %(exc)s" #, python-format msgid "Dropping out of time order sample: %s" msgstr "" "Löschen des nicht in die zeitliche Reihenfolge gehörenden Beispiels: %s" #, python-format msgid "Dropping sample with no predecessor: %s" msgstr "Beispiel ohne Vorgänger wird gelöscht: %s" #, python-format msgid "Failed to load any dispatchers for %s" msgstr "Es konnten keine Dispatcher fĂĽr %s geladen werden." #, python-format msgid "Ignore unrecognized field %s" msgstr "Nicht erkanntes Feld %s ignorieren" #, python-format msgid "Invalid status, skipping IP address %s" msgstr "UngĂĽltiger Status. IP-Adresse %s wird ĂĽbersprungen." msgid "Negative delta detected, dropping value" msgstr "Negatives Delta erkannt. Wert wird verworfen." #, python-format msgid "No endpoints found for service %s" msgstr "Es wurden keine Endpunkte fĂĽr den Service %s gefunden." msgid "" "Non-metric meters may be collected. It is highly advisable to disable these " "meters using ceilometer.conf or the pipeline.yaml" msgstr "" "Es werden möglicherweise nicht metrische Daten erfasst. Es wird dringend " "empfohlen, diese Zähler ĂĽber die Datei ceilometer.conf oder pipeline.yaml zu " "inaktivieren." #, python-format msgid "" "Skipping %(name)s, %(service_type)s service is not registered in keystone" msgstr "" "%(name)s wird ĂĽbersprungen. Der Service %(service_type)s ist nicht in " "Keystone registriert." #, python-format msgid "Skipping duplicate meter definition %s" msgstr "Doppelte Messdefinition %s wird ĂĽbersprungen." msgid "" "ceilometer-api started with aodh enabled. Alarms URLs will be redirected to " "aodh endpoint." msgstr "" "Die ceilometer-api wurde mit aktiviertem aodh gestartet. Alarm-URLs werden " "an den aodh-Endpunkt umgeleitet. " msgid "" "ceilometer-api started with gnocchi enabled. The resources/meters/samples " "URLs are disabled." msgstr "" "Die ceilometer-api wurde mit aktiviertem Gnocchi gestartet. Die URLs fĂĽr " "resources/meters/samples sind inaktiviert." #, python-format msgid "event signature invalid, discarding event: %s" msgstr "Ereignissignatur ungĂĽltig. Ereignis wird verworfen: %s" #, python-format msgid "message signature invalid, discarding message: %r" msgstr "Nachrichtensignatur ungĂĽltig, Nachricht wird verworfen: %r" #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no " "volume (volume: None), the sample will be dropped" msgstr "" "Die Messung von Daten %(counter_name)s fĂĽr %(resource_id)s @ %(timestamp)s " "enthält keinen Datenträger (volume: None). Die Stichprobe wird gelöscht." #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has " "volume which is not a number (volume: %(counter_volume)s), the sample will " "be dropped" msgstr "" "Die Messung von Daten %(counter_name)s fĂĽr %(resource_id)s @ %(timestamp)s " "enthält einen Datenträger ohne Zahl (volume: %(counter_volume)s). Die " "Stichprobe wird gelöscht." msgid "" "pecan_debug cannot be enabled, if workers is > 1, the value is overrided " "with False" msgstr "" "pecan_debug kann nicht aktiviert werden, wenn Worker > 1 ist. Der Wert wird " "mit False ĂĽberschrieben." #, python-format msgid "unable to configure oslo_cache: %s" msgstr "Konfigurieren von oslo_cache nicht möglich: %s" ceilometer-6.0.0/ceilometer/locale/de/LC_MESSAGES/ceilometer.po0000664000567000056710000004071212701406223025243 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Carsten Duch , 2014 # Christian Berendt , 2014 # Ettore Atalan , 2014 # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Monika Wolf , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev50\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-19 00:57+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-14 08:40+0000\n" "Last-Translator: Monika Wolf \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: German\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s nicht gefunden" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "Arithmetiktransformer muss mindestens eine Messgröße im Ausdruck '%s' " "verwenden" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "Tabelle %(table_name)s kann nicht erstellt werden, da sie bereits vorhanden " "ist. Fehler wird ignoriert" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Fortfahren nach Fehler von %(name)s: %(error)s" #, python-format msgid "Could not connect slave host: %s " msgstr "" "Es konnte keine Verbindung zum untergeordneten Host hergestellt werden: %s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Es konnte keine Verbindung zu XenAPI hergestellt werden: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "Abruf von CPU-Auslastung nicht möglich fĂĽr %(id)s: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "Abruf von Speicherbelegung nicht möglich fĂĽr %(id)s: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "CPU-Auslastung fĂĽr VM %s konnte nicht abgerufen werden" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "IP-Adresse von Instanz %s konnte nicht abgerufen werden" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "Dispatcher-Ziel nicht definiert, es werden keine Messgrößen ĂĽbergeben. Das " "Ziel in der Datei 'ceilometer.conf' definieren." #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Löschen von Benachrichtigung %(type)s (UUID:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Fehler von libvirt während Suche nach Instanz : " "[Fehlercode %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Fehler bei Auswertung der HTTP-Antwort %s" msgid "Error stopping pollster." msgstr "Fehler beim Stoppen des Pollster." msgid "Event" msgstr "Ereignis" msgid "Expression evaluated to a NaN value!" msgstr "Ausdruck ergab einen NaN-Wert!" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "Fehler beim Importieren der Erweiterung fĂĽr %(name)s: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Fehler beim ĂśberprĂĽfen von Daten der Instanz , " "Domänenstatus ist ABGESCHALTET." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "Fehler beim ĂśberprĂĽfen der Speicherbelegung von %(instance_uuid)s, " "Informationen können nicht von libvirt abgerufen werden: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "Fehler beim ĂśberprĂĽfen der Speicherbelegung von Instanz , Informationen können nicht von libvirt abgerufen werden." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "Es konnten keine Benachrichtigungshandler fĂĽr %s geladen werden" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Zeitmarkenwert %s konnte nicht analysiert werden" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d Datenpunkte konnten nicht veröffentlicht werden; werden gelöscht" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "" "%d Datenpunkte konnten nicht veröffentlicht werden; in Warteschlange " "einreihen" #, python-format msgid "Failed to record metering data: %s" msgstr "Messdaten wurden nicht aufgezeichnet: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Filterausdruck nicht gĂĽltig: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "Instanz %(name)s (%(instance_id)s) wird ignoriert: %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "Instanz %(name)s wird ignoriert: %(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "Loadbalancer %(loadbalancer_id)s wird ignoriert." #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "Pool %(pool_id)s wird ignoriert." #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "UngĂĽltige YAML-Syntax in Definitionsdatei %(file)s in Zeile: %(line)s, " "Spalte: %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "UngĂĽltiger Zeitraum %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "UngĂĽltiger Traittyp '%(type)s' fĂĽr Trait %(trait)s" msgid "Limit must be positive" msgstr "Grenzwert muss positiv sein" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "Mehr als ein Ereignis mit der ID %s vom Speichertreiber zurĂĽckgegeben" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Mehrere VMs %s in XenServer gefunden" msgid "Must specify connection_url, and connection_password to use" msgstr "" "Angabe von connection_url und connection_password fĂĽr die Verwendung " "erforderlich" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Kein Plug-in mit dem Namen %(plugin)s verfĂĽgbar fĂĽr %(name)s." msgid "Node Manager init failed" msgstr "Initialisierung von Node Manager fehlgeschlagen" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Nicht berechtigt fĂĽr den Zugriff auf %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "OpenDaylight-API hat Folgendes zurĂĽckgegeben: %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail-API hat Folgendes zurĂĽckgegeben: %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "Operator %(operator)s wird nicht unterstĂĽtzt. FĂĽr das Feld %(field)s ist " "nur der Gleichheitsoperator verfĂĽgbar." #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "Operator %(operator)s wird nicht unterstĂĽtzt. UnterstĂĽtzte Operatoren: " "%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "Ausdruck fĂĽr 'Sortieren nach' nicht gĂĽltig: %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Analysefehler in JSONPath-Spezifikation '%(jsonpath)s' fĂĽr %(name)s: %(err)s" msgid "Period must be positive." msgstr "Zeitraum muss positiv sein." #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: %(status)s nach Fehler von Publisher %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: Fortsetzen nach Fehler von Publisher %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "" "Pipeline %(pipeline)s: Fehler bei Flushoperation fĂĽr Transformer %(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "Pipeline %(pipeline)s: Beendigung nach Fehler von Transformer %(trans)s fĂĽr " "%(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in angegeben, aber kein Plug-in-Name fĂĽr %s angegeben." #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "Polling von %(mtr)s-Sensor %(cnt)s Mal fehlgeschlagen!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Polling von %(name)s %(cnt)s Mal fehlgeschlagen!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Pollster fĂĽr %s ist inaktiviert!" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "Verhindern Sie, dass Pollster %(name)s Quelle %(source)s weiterhin abfragt!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Maximale Länge von local_queue fĂĽr Publisher ist ĂĽberschritten, die %d " "ältesten Beispiele werden gelöscht" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "Veröffentlichungsrichtlinie ist unbekannt (%s); auf Standardeinstellung " "setzen" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW-AdminOps-API hat Folgendes zurĂĽckgegeben: %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "" "Anforderung konnte keine Verbindung mit OpenDaylight ĂĽber NorthBound REST-" "API herstellen" #, python-format msgid "Required field %s not specified" msgstr "Erforderliches Feld %s nicht angegeben" msgid "Resource" msgstr "Resource" msgid "Sample" msgstr "Beispiel" msgid "Samples should be included in request body" msgstr "Beispiele sollten in Anforderungshauptteil enthalten sein" #, python-format msgid "Skip loading extension for %s" msgstr "Laden der Ausnahme fĂĽr %s ĂĽberspringen" #, python-format msgid "String %s is not a valid isotime" msgstr "Zeichenfolge %s ist kein gĂĽltiger Wert fĂĽr 'isotime'" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "Die YAML-Datei mit der Definition der Zuordnung zwischen Beispielen und " "gnocchi-Ressourcen/Metriken" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Der Datentyp %(type)s wird nicht unterstĂĽtzt. Die Liste der unterstĂĽtzten " "Datentypen lautet: %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "Das Feld 'fields' ist erforderlich fĂĽr %s" msgid "The path for the file publisher is required" msgstr "Der Pfad fĂĽr den Datei-Publisher ist erforderlich" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UPD: Von %s gesendete Daten konnten nicht dekodiert werden" msgid "UDP: Unable to store meter" msgstr "UDP: Messgröße kann nicht gespeichert werden" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "" "Es kann keine Verbindung zum Datenbankserver hergestellt werden: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Wert %(value)s kann nicht in den erwarteten Datentyp %(type)s umgewandelt " "werden." #, python-format msgid "Unable to discover resources: %s" msgstr "Ressourcen können nicht gefunden werden: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "Auswertung nicht möglich fĂĽr Ausdruck %(expr)s: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "Publisher %s kann nicht geladen werden" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "Hypervisorinspector %s kann nicht geladen werden" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "Es kann keine erneute Verbindung zur primären mongodb nach %(retries)d " "Versuchen hergestellt werden. Abbruch." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "Es kann keine erneute Verbindung zur primären mongodb hergestellt werden: " "%(errmsg)s. Erneuter Versuch in %(retry_interval)d Sekunden." msgid "Unable to send sample over UDP" msgstr "Beispiel kann nicht ĂĽber UDP gesendet werden" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Unerwartete Ausnahme beim Konvertieren von %(value)s in den erwarteten " "Datentyp %(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "Unbekannte Erkennungserweiterung: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "Unbekannter Metadatentyp. SchlĂĽssel (%s) wird nicht abfragbar sein." #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr Loadbalancer %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr Firewall %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr Listener %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr Mitglied %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr Pool %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr VIP %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr VPN %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %s in VMware vSphere nicht gefunden" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s in XenServer nicht gefunden" msgid "Wrong sensor type" msgstr "Falscher Sensortyp" msgid "XenAPI not installed" msgstr "XenAPI nicht installiert" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "YAML-Fehler beim Lesen von Definitionsdatei %(file)s." #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "Abruf von CPU-Zeit nicht möglich fĂĽr %(id)s: %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "" "Wenn Gnocci aktiviert ist, kann die Option 'direct' nicht den Wert 'true' " "haben. " #, python-format msgid "dropping out of time order sample: %s" msgstr "" "Löschen des nicht in die zeitliche Reihenfolge gehörenden Beispiels: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "Beispiel ohne Vorgänger wird gelöscht: %s" msgid "ipmitool output length mismatch" msgstr "Abweichung bei ipmitool-Ausgabelänge" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes und backup_count sollten Zahlen sein." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "Nachrichtensignatur ungĂĽltig, Nachricht wird verworfen: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "Analyse von IPMI-Sensordaten fehlgeschlagen, keine Daten von angegebener " "Eingabe abgerufen" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "Analyse von IPMI-Sensordaten fehlgeschlagen, unbekannter Sensortyp" msgid "running ipmitool failure" msgstr "Fehler beim AusfĂĽhren von ipmitool" ceilometer-6.0.0/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-error.po0000664000567000056710000001211612701406223027146 0ustar jenkinsjenkins00000000000000# Monika Wolf , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-04 20:29+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-03 03:37+0000\n" "Last-Translator: Monika Wolf \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "Cannot load inspector %(name)s: %(err)s" msgstr "Inspector %(name)s kann nicht geladen werden: %(err)s" #, python-format msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" msgstr "" "Die Verwendung des residenten Speichers fĂĽr %(id)s konnte nicht abgerufen " "werden: %(e)s" #, python-format msgid "Dispatcher failed to handle the %s, requeue it." msgstr "" "Dispatcher konnte %s nicht verarbeiten. Erneut in Warteschlange stellen." msgid "Error connecting to coordination backend." msgstr "Fehler beim Herstellen einer Verbindung zum Koordinierungs-Back-End." msgid "Error getting group membership info from coordination backend." msgstr "" "Fehler beim Abrufen von Mitgliedschaftsinformationen vom Koordinierungs-Back-" "End." #, python-format msgid "Error joining partitioning group %s, re-trying" msgstr "" "Fehler beim Beitreten zur Partitionierungsgruppe %s. Operation wird " "wiederholt." #, python-format msgid "Error loading meter definition : %(err)s" msgstr "Fehler beim Laden der Messdefinition : %(err)s" #, python-format msgid "Error processing event and it will be dropped: %s" msgstr "Fehler beim Verarbeiten des Ereignisses und es wird gelöscht: %s" msgid "Error sending a heartbeat to coordination backend." msgstr "" "Fehler beim Senden eines Ăśberwachungssignals an das Koordinierungs-Back-End." msgid "Fail to process a notification" msgstr "Eine Benachrichtigung konnte nicht verarbeitet werden." msgid "Fail to process notification" msgstr "Benachrichtigung konnte nicht verarbeitet werden." msgid "Failed to connect to Gnocchi." msgstr "Fehler beim Herstellen einer Verbindung zu Gnocchi." #, python-format msgid "Failed to connect to Kafka service: %s" msgstr "Fehler beim Herstellen einer Verbindung zum Kafka-Service: %s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" msgstr "" "Fehler beim Herstellen einer Verbindung zur Datenbank. Zweck: %(purpose)s " "Später erneut versuchen: %(err)s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" msgstr "" "Fehler beim Herstellen einer Verbindung zur Datenbank. Zweck: %(purpose)s " "Später erneut versuchen: %(err)s" #, python-format msgid "Failed to load resource due to error %s" msgstr "Fehler beim Laden der Ressource aufgrund des folgenden Fehlers %s" #, python-format msgid "Failed to record event: %s" msgstr "Das Ereignis konnte nicht aufgezeichnet werden: %s" #, python-format msgid "Failed to record metering data: %s" msgstr "Messdaten wurden nicht aufgezeichnet: %s" #, fuzzy msgid "Failed to retry to send sample data with max_retry times" msgstr "" "Fehler bei dem Wiederholungsversuch, Beispieldaten mit max_retry times zu " "senden." #, fuzzy msgid "" "Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is " "not part of group and cannot take tasks" msgstr "" "Gruppen-ID: %{group_id}s, Mitglieder: %{members}s, Ich: %{me}s: Der aktuelle " "Agent ist nicht Teil der Gruppe und kann keine Tasks ĂĽbernehmen." #, python-format msgid "Invalid type %s specified" msgstr "UngĂĽltigen Typ %s angegeben" #, python-format msgid "Missing field %s" msgstr "Fehlendes Feld %s" msgid "Passed resource dict must contain keys resource_id and resource_url." msgstr "" "Das ĂĽbergebene Ressourcenwörterverzeichnis muss die SchlĂĽssel fĂĽr " "resource_id und resource_url enthalten." #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "Erforderliches Feld %(field)s muss %(type)s sein." #, python-format msgid "Required field %s not specified" msgstr "Erforderliches Feld %s nicht angegeben." #, python-format msgid "Required fields %s not specified" msgstr "Erforderliche Felder %s nicht angegeben." #, python-format msgid "Skip invalid resource %s" msgstr "UngĂĽltige Ressource %s ĂĽberspringen" #, python-format msgid "Skipping %(name)s, keystone issue: %(exc)s" msgstr "%(name)s wird ĂĽbersprungen, Keystone-Problem: %(exc)s" msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" msgstr "Statuscode: %{code}s. Fehler beim Versenden des Ereignisses: %{event}s" #, python-format msgid "Unable to load changed event pipeline: %s" msgstr "Die geänderte Ereignispipeline konnte nicht geladen werden: %s" #, python-format msgid "Unable to load changed pipeline: %s" msgstr "Die geänderte Pipeline konnte nicht geladen werden: %s" #, python-format msgid "Unrecognized type value %s" msgstr "Nicht erkannter Typwert %s" #, python-format msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" msgstr "Inspector-Aufruf fehlgeschlagen fĂĽr %(ident)s Host %(host)s: %(err)s" ceilometer-6.0.0/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-info.po0000664000567000056710000001037612701406223026756 0ustar jenkinsjenkins00000000000000# Monika Wolf , 2016. #zanata # Robert Simai , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev21\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-08 04:34+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-07 04:27+0000\n" "Last-Translator: Robert Simai \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "%d events are removed from database" msgstr "%d Ereignisse aus Datenbank entfernt" #, python-format msgid "%d samples removed from database" msgstr "%d Beispiele aus Datenbank entfernt" msgid "Configuration:" msgstr "Konfiguration:" #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "Verbindung mit %(db)s auf %(nodelist)s wird hergestellt" msgid "Coordination backend started successfully." msgstr "Das Koordinierungs-Back-End wurde erfolgreich gestartet." #, python-format msgid "Definitions: %s" msgstr "Definitionen: %s" msgid "Detected change in pipeline configuration." msgstr "Es wurde eine Ă„nderung in der Pipelinekonfiguration festgestellt." #, python-format msgid "Dropping event data with TTL %d" msgstr "Löschen von Ereignisdaten mit TTL %d" #, python-format msgid "Dropping metering data with TTL %d" msgstr "Löschen von Messdaten mit TTL %d" #, python-format msgid "Duplicate event detected, skipping it: %s" msgstr "Doppeltes Ereignis erkannt. Wird ĂĽbersprungen: %s" msgid "Expired residual resource and meter definition data" msgstr "Abgelaufene Daten fĂĽr residente Ressource und fĂĽr Messdefinition" #, python-format msgid "Index %s will be recreate." msgstr "Index %s wird erneut erstellt. " #, python-format msgid "Joined partitioning group %s" msgstr "Partitionierungsgruppe %s beigetreten." #, python-format msgid "Left partitioning group %s" msgstr "Partitionierungsgruppe %s verlassen." #, python-format msgid "No limit value provided, result set will be limited to %(limit)d." msgstr "" "Es wurde kein Grenzwert angegeben. Der Ergebnissatz wird auf %(limit)d " "beschränkt." msgid "Nothing to clean, database event time to live is disabled" msgstr "" "Nichts zu bereinigen. Die Lebensdauer (TTL) der Datenbankereignisdaten ist " "deaktiviert." msgid "Nothing to clean, database metering time to live is disabled" msgstr "" "Nichts zu bereinigen. Die Lebensdauer (TTL) der Datenbankstichprobendaten " "ist deaktiviert." #, python-format msgid "" "Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " "%(param)s" msgstr "" "Pipeline %(pipeline)s: Konfiguration von Transformerinstanz %(name)s mit " "Parameter %(param)s" #, python-format msgid "Pipeline config: %s" msgstr "Pipelinekonfiguration: %s" msgid "Pipeline configuration file has been updated." msgstr "Die Pipelinekonfigurationsdatei wurde aktualisiert." #, python-format msgid "Polling pollster %(poll)s in the context of %(src)s" msgstr "Abfrage von Pollster %(poll)s im Kontext von %(src)s" #, python-format msgid "Publishing policy set to %s" msgstr "Veröffentlichungsrichtlinie auf %s gesetzt" msgid "Reconfiguring polling tasks." msgstr "Polling-Tasks werden neu konfiguriert." msgid "Reloading notification agent and listeners." msgstr "Benachrichtigungsagent und Listener werden erneut geladen." #, python-format msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" msgstr "" "Pollster %(name)s ĂĽberspringen, keine %(p_context)sressourcen in diesem " "Zyklus gefunden." #, python-format msgid "Starting server in PID %s" msgstr "Starten von Server in PID %s" msgid "detected decoupled pipeline config format" msgstr "entkoppeltes Pipeline-Konfigurationsformat erkannt" #, python-format msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" msgstr "" "Messung von Daten %(counter_name)s fĂĽr %(resource_id)s: %(counter_volume)s" #, python-format msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" msgstr "" "Bereitstellung auf 0.0.0.0:%(sport)s, Ansicht unter http://127.0.0.1:" "%(vport)s" #, python-format msgid "serving on http://%(host)s:%(port)s" msgstr "Bereitstellung auf http://%(host)s:%(port)s" ceilometer-6.0.0/ceilometer/locale/fr/0000775000567000056710000000000012701406364021007 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/fr/LC_MESSAGES/0000775000567000056710000000000012701406364022574 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/fr/LC_MESSAGES/ceilometer.po0000664000567000056710000004215112701406223025261 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Corinne Verheyde , 2013 # CHABERT Loic , 2013 # Christophe kryskool , 2013 # Corinne Verheyde , 2013-2014 # EVEILLARD , 2013-2014 # Francesco Vollero , 2015 # Jonathan Dupart , 2014 # CHABERT Loic , 2013 # Maxime COQUEREL , 2014 # Nick Barcet , 2013 # Nick Barcet , 2013 # Andrew Melim , 2014 # Patrice LACHANCE , 2013 # Patrice LACHANCE , 2013 # RĂ©mi Le Trocquer , 2014 # EVEILLARD , 2013 # Corinne Verheyde , 2013 # Corinne Verheyde , 2013 # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Corinne Verheyde , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev34\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-09 20:26+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-09 05:03+0000\n" "Last-Translator: Corinne Verheyde \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s n'a pas Ă©tĂ© trouvĂ©" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "Le transformateur arithmĂ©tique doit utiliser au moins un mètre dans " "l'expression '%s'" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "Impossible de crĂ©er la table %(table_name)s car elle existe dĂ©jĂ . Erreur " "ignorĂ©e" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "Continue après l'erreur %(name)s: %(error)s " #, python-format msgid "Could not connect slave host: %s " msgstr "Impossible de se connecter Ă  l'hĂ´te slave: %s " #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Connexion impossible XenAPI: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "Ne peut pas recevoir l'utilisation CPU pour %(id)s: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "" "Impossible de rĂ©cupĂ©rer l'utilisation de la mĂ©moire pour %(id)s : %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "Impossible d'obtenir l'utilisation CPU de la VM %s" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "Impossible d'obtenir l'adresse IP de l'instance %s" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "La cible du rĂ©partiteur n'Ă©tait pas dĂ©finie, aucun compteur ne sera envoyĂ©. " "DĂ©finissez la cible dans le fichier ceilometer.conf" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Suppression du %(type)s de notification (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Erreur de libvirt lors de la recherche de l'instance : [Code d'erreur %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Erreur lors de l'analyse syntaxique de la rĂ©ponse: %s" msgid "Error stopping pollster." msgstr "Erreur lors de l'arrĂŞt du sondeur." msgid "Event" msgstr "ÉvĂ©nement" msgid "Expression evaluated to a NaN value!" msgstr "Expression Ă©valuĂ©e avec une valeur not-a-number !" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "Echec de l'importation de l'extension pour %(name)s: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Echec de l'inspection des donnĂ©es de l'instance . " "Le domaine est Ă  l'Ă©tat SHUTOFF (INTERRUPTION)." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "Echec de l'inspection de l'utilisation de la mĂ©moire de %(instance_uuid)s. " "Impossible d'obtenir des informations de libvirt : %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "Echec de l'inspection de l'utilisation de la mĂ©moire de l'instance . Impossible d'obtenir des informations de libvirt." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "Échec du chargement de tous les gestionnaires de notification pour %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Echec de l'analyse syntaxique de la valeur d'horodatage %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Echec de la publication des points de donnĂ©es %d. Suppression en cours" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "" "Echec de la publication des points de donnĂ©es %d. Mettez-les en file " "d'attente" #, python-format msgid "Failed to record metering data: %s" msgstr "Impossible d'enregistrer les donnĂ©es de mesure: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Filtre de l'expression n'est pas valide: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "L'instance %(name)s est ignorĂ©e (%(instance_id)s) : %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "instance %(name)s: %(error)s ignorĂ©" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "Loadbalancer %(loadbalancer_id)s ignorĂ©" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "Pool %(pool_id)s ignorĂ©" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Syntaxe YAML non valide dans le fichier de dĂ©finitions %(file)s Ă  la ligne : " "%(line)s, colonne : %(column)s." #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "PĂ©riode %(period)s non valide : %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Type de trait non valide '%(type)s' pour le trait %(trait)s" msgid "Limit must be positive" msgstr "La limite doit ĂŞtre positive" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "" "Plus d'un Ă©vĂ©nement avec l'identificateur %s a Ă©tĂ© renvoyĂ© Ă  partir du " "pilote de stockage" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Plusieurs machines virtuelles %s trouvĂ©es dans XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "Il faut indiquer connection_url et connection_password pour utiliser" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Aucun plugin nommĂ© %(plugin)s n'est disponible pour %(name)s" msgid "Node Manager init failed" msgstr "Echec de l'initialisation du gestionnaire de noeud" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Non autorisĂ© Ă  accĂ©der %(aspect)s %(id)s " #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "L'API OpenDaylight a renvoyĂ© %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "L'API Opencontrail a renvoyĂ© %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "" "OpĂ©rateur %(operator)s non supportĂ©. Seul l'opĂ©rateur Ă©galitĂ© est disponible " "pour le champ %(field)s" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "L'opĂ©rateur %(operator)s n'est pas supportĂ©. Les opĂ©rateurs supportĂ©s sont: " "%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "L'expression de tri n'est pas valide : %s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Erreur d'analyse dans la spĂ©cification JSONPath '%(jsonpath)s' pour " "%(name)s : %(err)s" msgid "Period must be positive." msgstr "La pĂ©riode doit ĂŞtre positive." #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "" "Pipeline %(pipeline)s : statut %(status)s après erreur du diffuseur %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "Pipeline %(pipeline)s: Reprise après une erreur de l'Ă©diteur %(pub)s" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "Pipeline %(pipeline)s: Erreur Ă  la purge du transformateur %(trans)s" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "" "Pipeline %(pipeline)s: Sortie après erreur du transformateur %(trans)s pour " "%(smp)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plugin spĂ©cifiĂ©, mais aucun nom de plugin n'est fourni pour %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "L'interrogation du capteur %(mtr)s a Ă©chouĂ© %(cnt)s fois !" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Sondage de %(name)s %(cnt)s fois en Ă©chec!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Le pollster pour %s est dĂ©sactivĂ© !" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "EmpĂŞcher le pollster %(name)s d'interroger la source %(source)s !" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "La longueur maximale de local_queue du diffuseur est dĂ©passĂ©e, suppression " "des %d Ă©chantillons les plus anciens" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "La politique de publication est inconnue (%s) forcĂ© le dĂ©faut" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "L'API AdminOps RGW a renvoyĂ© %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "" "La demande n'a pas rĂ©ussi Ă  se connecter Ă  OpenDaylight avec l'API REST " "NorthBound" #, python-format msgid "Required field %s not specified" msgstr "Champ requis %s non spĂ©cifiĂ©e" msgid "Resource" msgstr "Ressource" msgid "Sample" msgstr "Echantillon" msgid "Samples should be included in request body" msgstr "Des exemples doivent ĂŞtre inclus dans le corps de demande" #, python-format msgid "Skip loading extension for %s" msgstr "Passer le chargement de l'extension pour %s" #, python-format msgid "String %s is not a valid isotime" msgstr "La chaine de caractère %s n'est pas valide isotime" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "" "Fichier Yaml qui dĂ©finit le mappage entre les exemples et les ressources " "gnocchi /les mĂ©triques" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Le type de donnĂ©es %(type)s n'est pas supportĂ©. Les types de donnĂ©es " "supportĂ©s sont: %(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "Le champ 'fields' est requis pour %s" msgid "The path for the file publisher is required" msgstr "Le chemin du Ă©diteur de fichier est obligatoire " #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP: Impossible de dĂ©coder les donnĂ©es envoyĂ©es par %s" msgid "UDP: Unable to store meter" msgstr "UDP: Impossible de stocker les mesures" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "Impossible de se connecter au serveur de base de donnĂ©es : %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Impossible de convertir la valeur %(value)s vers le type de donnĂ©es attendu " "%(type)s." #, python-format msgid "Unable to discover resources: %s" msgstr "Impossible de dĂ©couvrir les ressources: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "Impossible d'Ă©valuer l'expression %(expr)s : %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "Impossible de charger l'Ă©diteur %s " #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "Impossible de tĂ©lĂ©charger l'inspecteur hypervisor: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "Impossible de se reconnecter au serveur mongodb principal après %(retries)d " "tentatives. Abandon." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "Impossible de se reconnecter au serveur mongodb principal : %(errmsg)s. " "Nouvelle tentative dans %(retry_interval)d secondes." msgid "Unable to send sample over UDP" msgstr "Impossible d'envoyer l'Ă©chantillon en UDP" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Exception inattendue lors de la conversion de %(value)s dans le type de " "donnĂ©e attendue %(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "DĂ©couverte d'une extension inconnue: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "Type de mĂ©tadonnĂ©es inconnu, la clĂ© (%s) n'est pas requĂŞtable" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Statut %(stat)s inconnu reçu sur le Load Balancer %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le pare-feu %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le listener %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le membre %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le pool %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Etat %(stat)s inconnu reçu sur l'IP virtuelle %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le vpn %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "La machine virtuelle %s est introuvable dans VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s non trouvĂ© dans XenServer" msgid "Wrong sensor type" msgstr "Type de dĂ©tecteur incorrect" msgid "XenAPI not installed" msgstr "XenAPI n'est pas installĂ©" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Erreur YAML lors de la lecture du fichier de dĂ©finitions %(file)s" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "impossible d'obtenir le temps UC pour %(id)s : %(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "L'option directe ne peut pas ĂŞtre Ă  vrai si Gnocchi est activĂ©." #, python-format msgid "dropping out of time order sample: %s" msgstr "suppression de l'exemple de classement dans le temps : %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "abandon de l'Ă©chantillon sans prĂ©dĂ©cesseur: %s" msgid "ipmitool output length mismatch" msgstr "Non-concordance de longueur de la sortie ipmitool" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes et backup_count doivent etre des chiffres." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "signature du message invalide, message ignorĂ©: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "Echec de l'analyse des donnĂ©es du dĂ©tecteur IPMI, aucune donnĂ©e extraite Ă  " "partir de l'entrĂ©e fournie" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "Echec de l'analyse des donnĂ©es du dĂ©tecteur IPMI, type de dĂ©tecteur inconnu" msgid "running ipmitool failure" msgstr "Echec d'exĂ©cution d'ipmitool" ceilometer-6.0.0/ceilometer/locale/ceilometer-log-warning.pot0000664000567000056710000001050212701406223025466 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev34\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-11 06:17+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: ceilometer/coordination.py:186 msgid "" "Cannot extract tasks because agent failed to join group properly. " "Rejoining group." msgstr "" #: ceilometer/notification.py:194 msgid "" "Non-metric meters may be collected. It is highly advisable to disable " "these meters using ceilometer.conf or the pipeline.yaml" msgstr "" #: ceilometer/pipeline.py:582 #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no" " volume (volume: None), the sample will be dropped" msgstr "" #: ceilometer/pipeline.py:595 #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has " "volume which is not a number (volume: %(counter_volume)s), the sample " "will be dropped" msgstr "" #: ceilometer/agent/manager.py:452 #, python-format msgid "Skipping %(name)s, %(service_type)s service is not registered in keystone" msgstr "" #: ceilometer/agent/discovery/endpoint.py:44 #, python-format msgid "No endpoints found for service %s" msgstr "" #: ceilometer/api/app.py:75 msgid "" "pecan_debug cannot be enabled, if workers is > 1, the value is overrided " "with False" msgstr "" #: ceilometer/api/controllers/v2/root.py:123 msgid "Can't connect to keystone, assuming gnocchi is disabled and retry later" msgstr "" #: ceilometer/api/controllers/v2/root.py:127 msgid "" "ceilometer-api started with gnocchi enabled. The resources/meters/samples" " URLs are disabled." msgstr "" #: ceilometer/api/controllers/v2/root.py:149 msgid "Can't connect to keystone, assuming aodh is disabled and retry later." msgstr "" #: ceilometer/api/controllers/v2/root.py:152 msgid "" "ceilometer-api started with aodh enabled. Alarms URLs will be redirected " "to aodh endpoint." msgstr "" #: ceilometer/cmd/polling.py:55 #, python-format msgid "Duplicated values: %s found in CLI options, auto de-duplicated" msgstr "" #: ceilometer/compute/pollsters/memory.py:56 #: ceilometer/compute/pollsters/memory.py:99 #, python-format msgid "" "Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal " "reason: %(exc)s" msgstr "" #: ceilometer/dispatcher/__init__.py:68 #, python-format msgid "Failed to load any dispatchers for %s" msgstr "" #: ceilometer/dispatcher/database.py:102 #, python-format msgid "message signature invalid, discarding message: %r" msgstr "" #: ceilometer/dispatcher/database.py:131 ceilometer/dispatcher/http.py:130 #, python-format msgid "event signature invalid, discarding event: %s" msgstr "" #: ceilometer/dispatcher/gnocchi.py:215 #, python-format msgid "unable to configure oslo_cache: %s" msgstr "" #: ceilometer/event/trait_plugins.py:126 #, python-format msgid "" "split plugin is deprecated, add \".`split(%(sep)s, %(segment)d, " "%(max_split)d)`\" to your jsonpath instead" msgstr "" #: ceilometer/event/trait_plugins.py:216 msgid "" "Timedelta plugin is required two timestamp fields to create timedelta " "value." msgstr "" #: ceilometer/event/trait_plugins.py:224 #, python-format msgid "" "Failed to parse date from set fields, both fields %(start)s and %(end)s " "must be datetime: %(err)s" msgstr "" #: ceilometer/hardware/pollsters/generic.py:65 #, python-format msgid "Ignore unrecognized field %s" msgstr "" #: ceilometer/meter/notifications.py:189 #, python-format msgid "Skipping duplicate meter definition %s" msgstr "" #: ceilometer/network/floatingip.py:66 #, python-format msgid "Invalid status, skipping IP address %s" msgstr "" #: ceilometer/transformer/conversions.py:88 #, python-format msgid "Dropping out of time order sample: %s" msgstr "" #: ceilometer/transformer/conversions.py:94 msgid "Negative delta detected, dropping value" msgstr "" #: ceilometer/transformer/conversions.py:100 #, python-format msgid "Dropping sample with no predecessor: %s" msgstr "" ceilometer-6.0.0/ceilometer/locale/zh_TW/0000775000567000056710000000000012701406364021433 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/zh_TW/LC_MESSAGES/0000775000567000056710000000000012701406364023220 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po0000664000567000056710000003550112701406223025706 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Stefano Maffulli , 2013 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Jennifer , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev50\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-19 00:57+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-15 07:46+0000\n" "Last-Translator: Jennifer \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "ć‰ľä¸Ťĺ° %(entity)s %(id)s" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "在表示式 '%s' 中,算術轉換器必é č‡łĺ°‘使用一種č¨é‡Ź" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "無法建立表格 %(table_name)s,該表格已經ĺ­ĺś¨ă€‚將忽略錯誤" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "在 %(name)s 傳回錯誤 %(error)s 後繼續" #, python-format msgid "Could not connect slave host: %s " msgstr "無法連接附屬主機:%s" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "無法連接 XenAPI:%s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "無法取得 %(id)s çš„ CPU 使用率:%(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "無法取得 %(id)s çš„č¨ć†¶é«”用量:%(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "無法取得 VM %s CPU 使用率" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "無法取得實例 %s çš„ IP 位址" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "未設定ĺ†ć´ľĺ™¨ç›®ć¨™ďĽŚĺ°‡ä¸Ťĺ…¬ä˝ä»»ä˝•č¨é‡Źă€‚請在ceilometer.conf 檔中設定目標。" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "正在捨棄通知 %(type)sďĽUUID:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "查閱實例 <ĺŤç¨±=%(name)s,ID=%(id)s> 時,libvirt 中發生錯誤:[錯誤碼 " "%(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "ĺ‰–ćž HTTP 回應時發生錯誤:%s" msgid "Error stopping pollster." msgstr "ĺść­˘ pollster 時發生錯誤。" msgid "Event" msgstr "事件" msgid "Expression evaluated to a NaN value!" msgstr "表示式已求值為非數字值ďĽ" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "無法匯入 %(name)s 的延伸:%(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "無法檢查實例 <ĺŤç¨±=%(name)s,ID=%(id)s> 的資料,網域狀態為 SHUTOFF。" #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "無法檢查 %(instance_uuid)s çš„č¨ć†¶é«”用量,無法從 libVirt 取得資訊:%(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "無法檢查實例 <ĺŤç¨±=%(name)s,ID=%(id)s> çš„č¨ć†¶é«”用量,無法從 libVirt 取得資" "訊。" #, python-format msgid "Failed to load any notification handlers for %s" msgstr "無法載入 %s 的任何通知處ç†ç¨‹ĺĽŹ" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "無法剖ćžć™‚é–“ćłč¨ĺ€Ľ %s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "ç„ˇćł•ç™Ľä˝ %d 個資料點,正在捨棄ĺ®ĺ€‘" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "ç„ˇćł•ç™Ľä˝ %d 個資料點,正在將ĺ®ĺ€‘排入佇ĺ—" #, python-format msgid "Failed to record metering data: %s" msgstr "無法č¨éŚ„č¨é‡Źčł‡ć–™ďĽš%s" #, python-format msgid "Filter expression not valid: %s" msgstr "éŽćżľčˇ¨ç¤şĺĽŹç„ˇć•:%s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "正在忽略實例 %(name)s (%(instance_id)s):%(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "正在忽略實例 %(name)s:%(error)s" #, python-format msgid "Ignoring loadbalancer %(loadbalancer_id)s" msgstr "正在忽略負載平衡器 %(loadbalancer_id)s" #, python-format msgid "Ignoring pool %(pool_id)s" msgstr "正在忽略儲ĺ­ĺŤ€ %(pool_id)s" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "定義檔 %(file)s 第 %(line)s 行第 %(column)s ĺ—中的 YAML 語法無ć•。" #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "ćśźé–“ %(period)s 無ć•:%(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "特徵 %(trait)s 的特徵類型 '%(type)s' 無ć•" msgid "Limit must be positive" msgstr "é™ĺ¶ĺ€Ľĺż…é ćŻć­Łć•¸" #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "從儲ĺ­é«”驅動程式傳回了多個 ID 為 %s 的事件" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "在 XenServer 中找ĺ°ĺ¤šĺ€‹ VM %s" msgid "Must specify connection_url, and connection_password to use" msgstr "ĺż…é ćŚ‡ĺ®š connection_url ĺ’Ś connection_password,才č˝ä˝żç”¨" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "沒有ĺŤç‚ş %(plugin)s 的外掛程式可供 %(name)s 使用" msgid "Node Manager init failed" msgstr "節點管ç†ç¨‹ĺĽŹčµ·ĺ§‹č¨­ĺ®šĺ¤±ć•—" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "未獲ćŽć¬Šäľ†ĺ­ĺŹ– %(aspect)s %(id)s" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "OpenDaylight API 傳回了 %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail API 傳回了 %(status)s %(reason)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only equality operator is available " "for field %(field)s" msgstr "é‹ç®—ĺ­ %(operator)s 不受支援。只有等式é‹ç®—ĺ­ć‰ŤĺŹŻäľ›ć¬„ä˝Ť %(field)s 使用" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "é‹ç®—ĺ­ %(operator)s 不受支援。受支援的é‹ç®—ĺ­ç‚şďĽš%(supported)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "排序方式表示式無ć•:%s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "%(name)s çš„ JSONPath 規格 '%(jsonpath)s' 中發生剖ćžéŚŻčŞ¤ďĽš%(err)s" msgid "Period must be positive." msgstr "ćśźé–“ĺż…é ćŻć­Łć•¸ă€‚" #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "管線 %(pipeline)s:在發ä˝č€… %(pub)s 傳回錯誤後處於%(status)s狀態" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "管線 %(pipeline)s:在發ä˝č€… %(pub)s 傳回錯誤後繼續" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "管線 %(pipeline)s:清除轉換器 %(trans)s 時發生錯誤" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "管線 %(pipeline)s:%(smp)s 的轉換器 %(trans)s傳回錯誤後çµćťź" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "ĺ·˛ćŚ‡ĺ®šĺ¤–ćŽ›ç¨‹ĺĽŹďĽŚä˝†ĺŤ»ćśŞĺ‘ %s ćŹäľ›ĺ¤–掛程式ĺŤç¨±" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "輪詢 %(mtr)s 感應器已失敗 %(cnt)s 次ďĽ" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "輪詢 %(name)s 失敗了 %(cnt)s 次ďĽ" #, python-format msgid "Pollster for %s is disabled!" msgstr "ĺ·˛ĺśç”¨ %s çš„ PollsterďĽ" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "é»ć­˘ pollster %(name)s ĺ†Ťć¬ˇčĽŞč©˘čł‡ćş %(source)sďĽ" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "已超出發ä˝č€… local_queue 長度上é™ďĽŚć­Łĺś¨ćŤ¨ćŁ„ %d 個最čŠçš„樣本" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "發ä˝ĺŽźĺ‰‡ä¸ŤćŽ (%s),強ĺ¶č¨­ç‚şé č¨­ĺ€Ľ" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API 傳回了 %(status)s %(reason)s" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "č¦ć±‚無法使用 NorthBound REST API 來連接至 OpenDaylight" #, python-format msgid "Required field %s not specified" msgstr "未指定必č¦ć¬„位 %s" msgid "Resource" msgstr "資ćş" msgid "Sample" msgstr "樣本" msgid "Samples should be included in request body" msgstr "č¦ć±‚內文中應該包括範例" #, python-format msgid "Skip loading extension for %s" msgstr "č·łéŽčĽ‰ĺ…Ą %s 的延伸" #, python-format msgid "String %s is not a valid isotime" msgstr "字串 %s 不ćŻćś‰ć•çš„ ISO 時間" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "ç”¨äľ†ĺś¨çŻ„äľ‹č‡ gnocchi 資ćş/度量之間定義對ć çš„Yaml 檔ćˇ" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "不支援資料類型 %(type)s。支援的資料類型清單為:%(supported)s" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s 需č¦ć¬„位「欄位」" msgid "The path for the file publisher is required" msgstr "需č¦ćŞ”ćˇç™Ľä˝č€…的路徑" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr "UDP:無法解碼由 %s 傳é€çš„資料" msgid "UDP: Unable to store meter" msgstr "UDP:無法儲ĺ­č¨é‡Ź" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "無法連接至資料庫伺服器:%(errmsg)s。" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "無法將值 %(value)s 轉換ćé ćśźçš„資料類型 %(type)s。" #, python-format msgid "Unable to discover resources: %s" msgstr "無法探索資ćşďĽš%s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "無法對表示式 %(expr)s 進行求值:%(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "無法載入發ä˝č€… %s" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "無法載入 Hypervisor 檢查程式:%s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "在 %(retries)d ć¬ˇé‡Ťč©¦äą‹ĺľŚä»Ťç„ˇćł•é‡Ťć–°é€ŁćŽĄč‡łä¸»č¦ MongoDB。正在放棄。" #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "ç„ˇćł•é‡Ťć–°é€ŁćŽĄč‡łä¸»č¦ MongoDB:%(errmsg)s。請在%(retry_interval)d 秒之後再次ĺ—" "試。" msgid "Unable to send sample over UDP" msgstr "ç„ˇćł•é€ŹéŽ UDP 來傳é€ć¨Łćś¬" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "ĺ°‡ %(value)s 轉換為é ćśźçš„資料類型%(type)s 時發生非é ćśźçš„異常狀ćłă€‚" #, python-format msgid "Unknown discovery extension: %s" msgstr "不ćŽçš„探索延伸:%s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "不ćŽçš„ meta 資料類型。索引鍵 (%s) 將不可查詢。" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "在負載平衡器 %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "在é˛ç«ç‰† %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "在接č˝ĺ™¨ %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "在ć員 %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "在儲ĺ­ĺŤ€ %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "在 VIP %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "在 VPN %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "在 VMware vSphere ä¸­ć‰ľä¸Ťĺ° VM %s" #, python-format msgid "VM %s not found in XenServer" msgstr "在 XenServer ä¸­ć‰ľä¸Ťĺ° VM %s" msgid "Wrong sensor type" msgstr "感應器類型錯誤" msgid "XenAPI not installed" msgstr "未安裝 XenAPI" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "讀取定義檔 %(file)s 時發生 YAML 錯誤" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "無法取得 %(id)s çš„ CPU 時間:%(e)s" msgid "direct option cannot be true when Gnocchi is enabled." msgstr "已啟用 Gnocchi 時,直接é¸é …不č˝ç‚ş true。" #, python-format msgid "dropping out of time order sample: %s" msgstr "正在ĺŞé™¤ä¸Ťĺś¨ć™‚間順序內的範例:%s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "正在捨棄不ĺ«ĺ‰Ťä¸€ç‰ćś¬çš„樣本:%s" msgid "ipmitool output length mismatch" msgstr "ipmitool 輸出長度不符" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes 及 backup_count 應該ćŻć•¸ĺ­—。" #, python-format msgid "message signature invalid, discarding message: %r" msgstr "訊ćŻç°˝ç« ç„ˇć•,正在捨棄訊ćŻďĽš%r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "ĺ‰–ćž IPMI 感應器資料失敗,未從給定的輸入擷取任何資料" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "ĺ‰–ćž IPMI 感應器資料失敗,感應器類型不ćŽ" msgid "running ipmitool failure" msgstr "執行 ipmitool 失敗" ceilometer-6.0.0/ceilometer/locale/ko_KR/0000775000567000056710000000000012701406364021405 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/ko_KR/LC_MESSAGES/0000775000567000056710000000000012701406364023172 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-warning.po0000664000567000056710000001312212701406224030076 0ustar jenkinsjenkins00000000000000# SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0rc2.dev1\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-20 20:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-21 04:18+0000\n" "Last-Translator: SeYeon Lee \n" "Language-Team: Korean (South Korea)\n" "Language: ko-KR\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "Can't connect to keystone, assuming aodh is disabled and retry later." msgstr "" "keystoneě— ě—°ę˛°í•  ě 없습ë‹ë‹¤ . aodhę°€ 사용ëě§€ 않는다고 가정í•ě—¬ ë‚ě¤‘ě— ë‹¤" "시 시도합ë‹ë‹¤." msgid "Can't connect to keystone, assuming gnocchi is disabled and retry later" msgstr "" "keystoneě— ě—°ę˛°í•  ě 없습ë‹ë‹¤. gnocchię°€ 사용ëě§€ 않는다고 가정í•ě—¬ ë‚ě¤‘ě— ë‹¤" "시 시도합ë‹ë‹¤." msgid "" "Cannot extract tasks because agent failed to join group properly. Rejoining " "group." msgstr "" "ě—이전트가 ě ě í•게 그룹을 결합í•ě§€ 못í–으므로 작업을 추출할 ě 없습ë‹ë‹¤. ę·¸" "룹을 다시 결합합ë‹ë‹¤." #, python-format msgid "" "Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal reason: " "%(exc)s" msgstr "" "%(instance_id)sěť %(pollster)s 데이터를 검사할 ě 없습ë‹ë‹¤. ěąëŞ…ě ěť´ě§€ 않은 " "ěť´ěś : %(exc)s" #, python-format msgid "Dropping out of time order sample: %s" msgstr "시간 ěśě„ś ě플ě—서 ě‚­ě ś: %s" #, python-format msgid "Dropping sample with no predecessor: %s" msgstr "ě„ í–‰ 작업이 없는 ě플 ě‚­ě ś: %s" #, python-format msgid "Duplicated values: %s found in CLI options, auto de-duplicated" msgstr "ě¤‘ëłµëś ę°’: CLI ěµě…ě— %sěť´(ę°€) ěžěеë‹ë‹¤. ěžëŹ™ěśĽëˇś 중복이 í•´ě śë©ë‹ë‹¤." #, python-format msgid "Failed to load any dispatchers for %s" msgstr "%sěť ë””ěŠ¤íŚ¨ě˛ëĄĽ 로드í•는 데 실패" #, python-format msgid "" "Failed to parse date from set fields, both fields %(start)s and %(end)s must " "be datetime: %(err)s" msgstr "" "설정 필드ě—서 데이터를 구문 분석í•는 데 실패, ë‘ í•„ë“ś %(start)s ë°Ź %(end)s은" "(는) ëŞ¨ë‘ datetimeěž„: %(err)s" #, python-format msgid "Ignore unrecognized field %s" msgstr "인식ëě§€ 않는 필드 %s 무시" #, python-format msgid "Invalid status, skipping IP address %s" msgstr "ě¬ë°”르지 않은 ěíś, IP 주소 %s ę±´ë„뛰기" msgid "Negative delta detected, dropping value" msgstr "음ěěť ëŤ¸í€ę°€ 발견ëě–´ ę°’ěť„ 삭제함" #, python-format msgid "No endpoints found for service %s" msgstr "%s ě„śëą„ěŠ¤ěť ě—”ë“śíŹ¬ěť¸íŠ¸ëĄĽ ě°ľěť„ ě 없음" msgid "" "Non-metric meters may be collected. It is highly advisable to disable these " "meters using ceilometer.conf or the pipeline.yaml" msgstr "" "비측정 미터를 ěě§‘í•  ě 없습ë‹ë‹¤. celometer.conf ë는 pipeline.yamlěť„ 사용í•" "ě—¬ 이러한 미터를 사용í•ě§€ 않게 설정í•는 ę˛ěť´ 좋습ë‹ë‹¤." #, python-format msgid "" "Skipping %(name)s, %(service_type)s service is not registered in keystone" msgstr " %(name)s, %(service_type)s 서비스 ę±´ë„뛰기는 keystoneě— ë“±ëˇťëě§€ 않음" #, python-format msgid "Skipping duplicate meter definition %s" msgstr "중복 미터 ě •ěť %s ę±´ë„뛰기" msgid "" "Timedelta plugin is required two timestamp fields to create timedelta value." msgstr "" "Timedelta 플러그인ě—서 timedelta ę°’ěť„ ěťě„±í•려면 ë‘ ę°śěť ě‹śę°„ě†Śěť¸ 필드가 í•„ěš”" "í•©ë‹ë‹¤." msgid "" "ceilometer-api started with aodh enabled. Alarms URLs will be redirected to " "aodh endpoint." msgstr "" "aodhę°€ ě‚¬ěš©ëś ěíśëˇś ceilometer-apię°€ 시작ëě—습ë‹ë‹¤. 알람 URLěť´ aodh 엔드포" "인트로 경로가 재지정ë©ë‹ë‹¤." msgid "" "ceilometer-api started with gnocchi enabled. The resources/meters/samples " "URLs are disabled." msgstr "" "gnocchi를 사용한 ěíśëˇś ceilometer-apię°€ 시작ëě—습ë‹ë‹¤. ěžě›/미터/ě플 URL" "ěť„ 사용í•ě§€ 않습ë‹ë‹¤." #, python-format msgid "event signature invalid, discarding event: %s" msgstr "이벤트 서명이 ě¬ë°”르지 않아 이벤트를 삭제함: %s" #, python-format msgid "message signature invalid, discarding message: %r" msgstr "ě¬ë°”르지 않은 메시지 서명. 메시지 버리는 중: %r" #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no " "volume (volume: None), the sample will be dropped" msgstr "" "%(resource_id)s @ %(timestamp)sěť ě¸ˇě • 데이터 %(counter_name)sě— ëłĽëĄ¨" "(volume: None)ěť´ 없으므로 ě플이 ě‚­ě śë©ë‹ë‹¤." #, python-format msgid "" "metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has " "volume which is not a number (volume: %(counter_volume)s), the sample will " "be dropped" msgstr "" "%(resource_id)s @ %(timestamp)sěť ě¸ˇě • 데이터 %(counter_name)sě— ë˛í¸" "(volume: %(counter_volume)s)ę°€ 아닌 볼륨이 ěžěśĽëŻ€ëˇś, ě플이 ě‚­ě śë©ë‹ë‹¤." msgid "" "pecan_debug cannot be enabled, if workers is > 1, the value is overrided " "with False" msgstr "" "pecan_debug를 사용í•도록 설정할 ě 없습ë‹ë‹¤. 작업ěžę°€ > 1ěť´ë©´ ę°’ěť´ False로 겹" "ěłě”ë‹ë‹¤." #, python-format msgid "" "split plugin is deprecated, add \".`split(%(sep)s, %(segment)d, " "%(max_split)d)`\" to your jsonpath instead" msgstr "" "ë¶„í•  플러그인은 더 ěť´ě 사용ëě§€ 않음, 대신 \".`split(%(sep)s, %(segment)d, " "%(max_split)d)`\"ěť„(를) jsonpathě— ě¶”ę°€" #, python-format msgid "unable to configure oslo_cache: %s" msgstr "oslo_cache를 구성할 ě 없음: %s" ceilometer-6.0.0/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po0000664000567000056710000003311412701406223025656 0ustar jenkinsjenkins00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Seong-ho Cho , 2014 # Seunghyo Chun , 2013 # Seunghyo Chun , 2013 # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0b4.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-04 20:29+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-02-03 05:50+0000\n" "Last-Translator: Lucas Palm \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)sěť„(를) ě°ľěť„ ě 없음" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "'%s' 표í„식ě—서 ě‚°ě  ëł€í™ę¸°ëŠ” í•ë‚ ěť´ěěť ëŻ¸í„°ëĄĽ 사용해야 함" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "%(table_name)s 테이블을 작성할 ě 없음, 이미 존재합ë‹ë‹¤. ě¤ëĄ 무시" #, python-format msgid "Continue after error from %(name)s: %(error)s" msgstr "%(name)sě—서 ě¤ëĄ 후 계속: %(error)s" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "XenAPI를 연결할 ě 없음: %s" #, python-format msgid "Could not get CPU Util for %(id)s: %(e)s" msgstr "%(id)sě— ëŚ€í•´ CPU Utilěť„ ę°€ě ¸ě¬ ě 없음: %(e)s" #, python-format msgid "Could not get Memory Usage for %(id)s: %(e)s" msgstr "%(id)sě— ëŚ€í•ś 메모리 사용을 ę°€ě ¸ě¬ ě 없음: %(e)s" #, python-format msgid "Could not get VM %s CPU Utilization" msgstr "VM %s CPU 이용률을 ę°€ě ¸ě¬ ě 없음" #, python-format msgid "Couldn't obtain IP address of instance %s" msgstr "%s ěť¸ěŠ¤í„´ěŠ¤ěť IP 주소를 얻을 ě 없음" msgid "" "Dispatcher target was not set, no meter will be posted. Set the target in " "the ceilometer.conf file" msgstr "" "ë””ěŠ¤íŚ¨ě˛ ëŚ€ěěť„ 설정í•ě§€ 않ě•으며 미터가 게시ëě§€ 않습ë‹ë‹¤. ceilometer.conf " "íŚŚěťĽě— ëŚ€ěěť„ 설정í•십시ě¤." #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "알림 %(type)s ě‚­ě ś 중(uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "인스턴스 ę˛€ě‰ ě¤‘ libvirtě—서 ě¤ëĄ ë°śěť: [ě¤ëĄ ě˝”" "드 %(error_code)s] %(ex)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "HTTP 응답 구문 분석 중 ě¤ëĄ ë°śěť: %s" msgid "Error stopping pollster." msgstr "ěťę˛¬ěˇ°ě‚¬ěžëĄĽ 중지í•는 ě¤‘ě— ě¤ëĄę°€ ë°śěťí–습ë‹ë‹¤. " msgid "Event" msgstr "이벤트" msgid "Expression evaluated to a NaN value!" msgstr "표í„식이 NaN 값으로 평가ëě—습ë‹ë‹¤!" #, python-format msgid "Failed to import extension for %(name)s: %(error)s" msgstr "%(name)s 확장ěžëĄĽ 가져ě¤ëŠ” 데 실패함: %(error)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "인스턴스 <이름=%(name)s, id=%(id)s>ěť ëŤ°ěť´í„° 검사 실패, 도메인 ěíśę°€ SHUTOFF" "ěž…ë‹ë‹¤." #, python-format msgid "" "Failed to inspect memory usage of %(instance_uuid)s, can not get info from " "libvirt: %(error)s" msgstr "" "%(instance_uuid)sěť ë©”ëŞ¨ë¦¬ 사용량 검사 실패, libvirtě—서 정보를 ę°€ě ¸ě¬ ě ě—†" "음: %(error)s" #, python-format msgid "" "Failed to inspect memory usage of instance , can " "not get info from libvirt." msgstr "" "인스턴스 <이름=%(name)s, id=%(id)s>ěť ë©”ëŞ¨ë¦¬ 사용량 검사 실패, libvirtě—서 ě •" "보를 ę°€ě ¸ě¬ ě 없습ë‹ë‹¤." #, python-format msgid "Failed to load any notification handlers for %s" msgstr "%sěť ě•Śë¦Ľ 핸들러 로드 실패" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "시간소인 ę°’ %s 구문 분석 실패" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d 데이터포인트 공개 실패. 이를 ě‚­ě śí•는 중" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "%d 데이터포인트 공개 실패. 이를 íě— ëŚ€ę¸°ě‹śí‚´" #, python-format msgid "Failed to record metering data: %s" msgstr "측정 데이터 기록 실패: %s" #, python-format msgid "Filter expression not valid: %s" msgstr "í•„í„° 표í„식이 ě¬ë°”르지 않음: %s" #, python-format msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" msgstr "인스턴스 %(name)s (%(instance_id)s) 무시 중: %(error)s" #, python-format msgid "Ignoring instance %(name)s: %(error)s" msgstr "인스턴스 %(name)s 무시 중: %(error)s" #, python-format msgid "Invalid period %(period)s: %(err)s" msgstr "ě¬ë°”르지 않은 기간 %(period)s: %(err)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "특성 %(trait)sě— ëŚ€í•ś ě¬ë°”르지 않은 특성 ěś í• '%(type)s'" msgid "Limit must be positive" msgstr "제한 값은 ě–‘ě여야 í•©ë‹ë‹¤." #, python-format msgid "More than one event with id %s returned from storage driver" msgstr "IDę°€ %s인 ë‘ ěť´ěěť ěť´ë˛¤íŠ¸ę°€ 스토리지 드라이버ě—서 리턴ë¨" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "여러 VM %sěť„(를) XenServerě—서 찾음 " msgid "Must specify connection_url, and connection_password to use" msgstr "사용할 connection_url ë°Ź connection_password를 지정해야 함 " msgid "Node Manager init failed" msgstr "노드 ę´€ë¦¬ěž ě´ę¸°í™” 실패" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "%(aspect)s %(id)sě— ëŚ€í•ś 액세스 권한이 부여ëě§€ 않음" #, python-format msgid "OpenDaylitght API returned %(status)s %(reason)s" msgstr "OpenDaylitght APIę°€ %(status)s 리턴: %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail APIę°€ %(status)s 리턴: %(reason)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "Order-by 표í„식이 ě¬ë°”르지 않음: %s" msgid "Period must be positive." msgstr "기간은 ě–‘ě여야 í•©ë‹ë‹¤. " #, python-format msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" msgstr "파이프라인 %(pipeline)s: ęłµę°śěž %(pub)sě—서 ě¤ëĄ 후 %(status)s" #, python-format msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" msgstr "파이프라인 %(pipeline)s: ęłµę°śěž %(pub)sě—서 ě¤ëĄ 후 계속" #, python-format msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" msgstr "파이프라인 %(pipeline)s: 변í™ę¸° %(trans)sěť„(를) 비우는 중 ě¤ëĄ ë°śěť" #, python-format msgid "" "Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " "%(smp)s" msgstr "파이프라인 %(pipeline)s: %(smp)sěť ëł€í™ę¸° %(trans)sě—서 ě¤ëĄ 후 종료" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "íŹ´ë§ %(mtr)s 센서가 %(cnt)së˛ ě‹¤íŚ¨í–습ë‹ë‹¤!" #, python-format msgid "Pollster for %s is disabled!" msgstr "%sěť ěťę˛¬ěˇ°ě‚¬ěžę°€ 사용 ě•함으로 설정ëě–´ ěžěеë‹ë‹¤!" #, python-format msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" msgstr "" "ěťę˛¬ěˇ°ě‚¬ěž %(name)sěť´(ę°€) 소스 %(source)s를 더 ěť´ě 폴ë§í•ě§€ 않도록 í•십시ě¤!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "ęłµę°śěž ěµśëŚ€ local_queue 길이가 ě´ęłĽë¨. %d 가장 ě¤ëžëś ě플 ě‚­ě ś 중" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "공개 ě •ě±…ěť„ 알 ě 없음(%s). 기본값으로 ę°•ě ś 설정함" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps APIę°€ %(status)s %(reason)sěť„(를) 리턴함" msgid "Request failed to connect to OpenDaylight with NorthBound REST API" msgstr "요청이 NorthBound REST API로 OpenDaylightě— ě—°ę˛°í•는 데 실패함" #, python-format msgid "Required field %s not specified" msgstr "í•„ě 필드 %sěť´(ę°€) 지정ëě§€ 않음" msgid "Resource" msgstr "리소스" msgid "Sample" msgstr "ě플" msgid "Samples should be included in request body" msgstr "ě플이 요청 ëł¸ë¬¸ě— íŹ¬í•¨ë어야 함" #, python-format msgid "Skip loading extension for %s" msgstr "%s í™•ěžĄěž ëˇśë“ś ę±´ë„뛰기" #, python-format msgid "String %s is not a valid isotime" msgstr "문ěžě—´ %sěť´(ę°€) ě¬ë°”른 등시간이 ě•„ë‹" msgid "" "The Yaml file that defines mapping between samples and gnocchi resources/" "metrics" msgstr "ě플과 gnocchi resources/ 메트릭 ę°„ 맵핑을 ě •ěťí•는 Yaml 파일" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "데이터 ěś í• %(type)sěť´(ę°€) ě§€ě›ëě§€ 않습ë‹ë‹¤. ě§€ě›ë는 데이터 ěś í• ëŞ©ëˇťěť€ " "%(supported)sěž…ë‹ë‹¤." msgid "The path for the file publisher is required" msgstr "파일 공개ěžěť 경로가 필요함" #, python-format msgid "UDP: Cannot decode data sent by %s" msgstr " UDP: %sěť´(ę°€) 보낸 데이터를 해독할 ě 없습ë‹ë‹¤" msgid "UDP: Unable to store meter" msgstr "UDP: 측정을 저장할 ě 없습ë‹ë‹¤" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "데이터베이스 ě„śë˛„ě— ě—°ę˛°í•  ě 없음: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "%(value)s ę°’ěť„ ěě 데이터 ěś í• %(type)s(으)로 변í™í•  ě 없습ë‹ë‹¤." #, python-format msgid "Unable to discover resources: %s" msgstr "ěžě›ěť„ 검ě‰í•  ě 없음: %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "%(expr)s 표í„식을 평가할 ě 없음: %(exc)s" #, python-format msgid "Unable to load publisher %s" msgstr "ęłµę°śěž %sěť„(를) 로드할 ě 없음" #, python-format msgid "Unable to load the hypervisor inspector: %s" msgstr "í•이퍼바이저 검사기를 로드할 ě 없음: %s" #, python-format msgid "" "Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " "up." msgstr "" "%(retries)d회 재시도한 이후ě—는 1ě°¨ mongodbě— ë‹¤ě‹ś 연결할 ě 없습ë‹ë‹¤. 포기í•" "는 중입ë‹ë‹¤." #, python-format msgid "" "Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " "%(retry_interval)d seconds." msgstr "" "1ě°¨ mongodbě— ë‹¤ě‹ś 연결할 ě 없음: %(errmsg)s. %(retry_interval)dě´ í›„ě— ë‹¤" "시 시도합ë‹ë‹¤." msgid "Unable to send sample over UDP" msgstr "UDP를 통해 ě플을 전송할 ě 없음" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "%(value)sěť„(를) ěěëś ëŤ°ěť´í„° ěś í•으로 변í™í•는 ě¤‘ě— ěěěą ě•Šěť€ ě외 ë°śěť " "%(type)s." #, python-format msgid "Unknown discovery extension: %s" msgstr "알 ě 없는 ę˛€ě‰ í™•ěžĄěž: %s" #, python-format msgid "Unknown metadata type. Key (%s) will not be queryable." msgstr "알 ě 없는 ë©”í€ëŤ°ěť´í„° ěś í•ěž…ë‹ë‹¤. 키(%s)를 조회할 ě 없습ë‹ë‹¤." #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "fw %(id)sě—서 알 ě 없는 ěíś %(stat)sěť´(ę°€) ěě‹ ë¨. ě플을 ę±´ë„뛰는 중" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "í’€ %(id)sě—서 알 ě 없는 ěíś %(stat)sěť´(ę°€) ěě‹ ë¨. ě플을 ę±´ë„뛰는 중" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "vip %(id)sě—서 알 ě 없는 ěíś %(stat)sěť´(ę°€) ěě‹ ë¨. ě플을 ę±´ë„뛰는 중" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %sěť„(를) VMware vSphereě—서 ě°ľěť„ ě 없음" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %sěť„(를) XenServerě—서 ě°ľěť„ ě 없음 " msgid "Wrong sensor type" msgstr "ěžëŞ»ëś ě„Ľě„ś ěś í•" msgid "XenAPI not installed" msgstr "XenAPIę°€ 설ěąëě§€ 않음" #, python-format msgid "could not get CPU time for %(id)s: %(e)s" msgstr "%(id)sěť CPU 시간을 ę°€ě ¸ě¬ ě 없음: %(e)s" #, python-format msgid "dropping out of time order sample: %s" msgstr "시간 ěśě„ś ě플ě—서 벗어남: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "ě„ í–‰ 작업이 없는 ě플 ě‚­ě ś: %s" msgid "ipmitool output length mismatch" msgstr "ipmitool ě¶śë Ą 길이 ë¶ěťĽěą" msgid "max_bytes and backup_count should be numbers." msgstr "max_bytes ë°Ź backup_count는 ě«ěžě—¬ě•Ľ í•©ë‹ë‹¤." #, python-format msgid "message signature invalid, discarding message: %r" msgstr "ě¬ë°”르지 않은 메시지 서명. 메시지 버리는 중: %r" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "IPMI 센서 데이터 구문 ë¶„ě„ťě— ě‹¤íŚ¨í–음, ě śęłµëś ěž…ë Ąě—서 검ě‰ëś 데이터가 없음" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "IPMI 센서 데이터 구문 ë¶„ě„ťě— ě‹¤íŚ¨í–음, 알 ě 없는 센서 ěś í•" msgid "running ipmitool failure" msgstr "ipmitool 실행 실패" ceilometer-6.0.0/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-error.po0000664000567000056710000001165412701406224027572 0ustar jenkinsjenkins00000000000000# SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0rc2.dev1\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-20 20:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-21 03:28+0000\n" "Last-Translator: SeYeon Lee \n" "Language-Team: Korean (South Korea)\n" "Language: ko-KR\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=1; plural=0\n" #, python-format msgid "Cannot load inspector %(name)s: %(err)s" msgstr "%(name)s 검사기를 로드할 ě 없음: %(err)s" #, python-format msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" msgstr "%(id)sěť ě주 메모리 사용을 ę°€ě ¸ě¬ ě 없음 : %(e)s" #, python-format msgid "Dispatcher failed to handle the %s, requeue it." msgstr "디스패ě˛ě—서 %sěť„(를) ě˛ë¦¬í•ě§€ 못í•ě—¬ 다시 ëŚ€ę¸°ě—´ě— ë‘ˇë‹ë‹¤." msgid "Error connecting to coordination backend." msgstr "조정 ë°±ě—”ë“śě— ě—°ę˛°í•는 ě¤‘ě— ě¤ëĄę°€ ë°śěťí–습ë‹ë‹¤." msgid "Error getting group membership info from coordination backend." msgstr "조정 백엔드ě—서 그룹 멤버십 정보를 가져ě¤ëŠ” ě¤‘ě— ě¤ëĄę°€ ë°śěťí–습ë‹ë‹¤." #, python-format msgid "Error joining partitioning group %s, re-trying" msgstr "" "íŚŚí‹°ě… ě§€ě • 그룹 %sěť„(를) 결합í•는 ě¤‘ě— ě¤ëĄę°€ ë°śěťí•ě—¬, 다시 시도 중입ë‹ë‹¤." #, python-format msgid "Error loading meter definition : %(err)s" msgstr "미터 ě •ěť ëˇśë“ś ě¤ëĄ : %(err)s" #, python-format msgid "Error processing event and it will be dropped: %s" msgstr "이벤트 ě˛ë¦¬ 중 ě¤ëĄę°€ ë°śěťí•므로 ě‚­ě śë¨: %s" msgid "Error sending a heartbeat to coordination backend." msgstr "í•트비트를 조정 ë°±ě—”ë“śě— ëł´ë‚´ëŠ” ě¤‘ě— ě¤ëĄę°€ ë°śěťí–습ë‹ë‹¤." msgid "Fail to process a notification" msgstr "알림을 ě˛ë¦¬í•는 데 실패" msgid "Fail to process notification" msgstr "알림을 ě˛ë¦¬í•는 데 실패" msgid "Failed to connect to Gnocchi." msgstr "Gnocchiě— ě—°ę˛°í•ě§€ 못í–습ë‹ë‹¤." #, python-format msgid "Failed to connect to Kafka service: %s" msgstr "Kafka ě„śëą„ěŠ¤ě— ě—°ę˛°í•는 데 실패: %s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" msgstr "dbě— ě—°ę˛°í•는 데 실패, %(purpose)s 용도를 ë‚ě¤‘ě— ë‹¤ě‹ś 시도: %(err)s" #, python-format msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" msgstr "dbě— ě—°ę˛°í•는 데 실패, %(purpose)s 용도를 ë‚ě¤‘ě— ë‹¤ě‹ś 시도: %(err)s" #, python-format msgid "Failed to load resource due to error %s" msgstr "%s ě¤ëĄëˇś 인해 ěžě›ěť„ 로드í•는 데 실패" #, python-format msgid "Failed to record event: %s" msgstr "이벤트를 기록í•는 데 실패: %s" #, python-format msgid "Failed to record metering data: %s" msgstr "측정 데이터 기록 실패: %s" msgid "Failed to retry to send sample data with max_retry times" msgstr "ě플 데이터를 max_retry íšźěë§ŚíĽ ëł´ë‚´ëŠ” 데 실패" msgid "" "Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is " "not part of group and cannot take tasks" msgstr "" "그룹 ID: %{group_id}s, 멤버: %{members}s, 사용ěž: %{me}s: í„재 ě—이전트가 ę·¸" "ëŁąěť ěťĽë¶€ę°€ ě•„ë‹ëŻ€ëˇś 작업을 ě행할 ě 없음" #, python-format msgid "Invalid type %s specified" msgstr "ě¬ë°”르지 않은 ěś í• %sěť´(ę°€) 지정ë¨" #, python-format msgid "Missing field %s" msgstr "%s 필드 ë„ëť˝" msgid "Passed resource dict must contain keys resource_id and resource_url." msgstr "ě „ë‹¬ëś ěžě› dictě— í‚¤ resource_id와 resource_urlěť´ 포함ë어야 í•©ë‹ë‹¤." #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "í•„ě 필드 %(field)s은(는) %(type)s이어야 함" #, python-format msgid "Required field %s not specified" msgstr "í•„ě 필드 %sěť´(ę°€) 지정ëě§€ 않음" #, python-format msgid "Required fields %s not specified" msgstr "í•„ě 필드 %sěť´(ę°€) 지정ëě§€ 않음" #, python-format msgid "Skip invalid resource %s" msgstr "ě¬ë°”르지 않은 ěžě› %s ę±´ë„뛰기" #, python-format msgid "Skipping %(name)s, keystone issue: %(exc)s" msgstr "%(name)s ę±´ë„뛰기, keystone 문제: %(exc)s" msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" msgstr "ěíś ě˝”ë“ś: %{code}s. 이벤트를 디스패ěąí•는 데 실패: %{event}s" #, python-format msgid "Unable to load changed event pipeline: %s" msgstr "ëł€ę˛˝ëś ěť´ë˛¤íŠ¸ 파이프라인을 로드할 ě 없음: %s" #, python-format msgid "Unable to load changed pipeline: %s" msgstr "ëł€ę˛˝ëś íŚŚěť´í”„ëťĽěť¸ěť„ 로드할 ě 없음: %s" #, python-format msgid "Unrecognized type value %s" msgstr "인식ëě§€ 않은 ěś í• ę°’ %s" #, python-format msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" msgstr "%(ident)s í¸ěŠ¤íŠ¸ %(host)sěť ę˛€ě‚¬ę¸° í¸ě¶śě— 실패: %(err)s" ceilometer-6.0.0/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-info.po0000664000567000056710000001023312701406224027364 0ustar jenkinsjenkins00000000000000# SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 6.0.0.0rc2.dev1\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-20 20:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-21 03:56+0000\n" "Last-Translator: SeYeon Lee \n" "Language-Team: Korean (South Korea)\n" "Language: ko-KR\n" "X-Generator: Zanata 3.7.3\n" "Plural-Forms: nplurals=1; plural=0\n" #, python-format msgid "%d events are removed from database" msgstr "데이터베이스ě—서 %d 이벤트가 ě śę±°ë¨" #, python-format msgid "%d samples removed from database" msgstr "데이터베이스ě—서 %d ě플이 ě śę±°ë¨" msgid "Configuration:" msgstr "구성:" #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "%(nodelist)sě—서 %(db)sě— ě—°ę˛° 중 " msgid "Coordination backend started successfully." msgstr "조정 백엔드가 성공ě ěśĽëˇś 시작ëě—습ë‹ë‹¤." #, python-format msgid "Definitions: %s" msgstr "ě •ěť: %s" msgid "Detected change in pipeline configuration." msgstr "파이프라인 ęµ¬ě„±ěť ëł€ę˛˝ěť„ 발견í–습ë‹ë‹¤." #, python-format msgid "Dropping event data with TTL %d" msgstr "TTLěť´ %d인 이벤트 데이터 ě‚­ě ś" #, python-format msgid "Dropping metering data with TTL %d" msgstr "TTLěť´ %d인 측정 데이터 ě‚­ě ś" #, python-format msgid "Duplicate event detected, skipping it: %s" msgstr "중복 이벤트가 발견ëě–´ 해당 이벤트를 ę±´ë„뜀: %s" msgid "Expired residual resource and meter definition data" msgstr "잔여 ěžě› ë°Ź 측정 ě •ěť ëŤ°ěť´í„° 만료ë¨" #, python-format msgid "Index %s will be recreate." msgstr "%s 인덱스가 다시 ěťě„±ë©ë‹ë‹¤." #, python-format msgid "Joined partitioning group %s" msgstr "ę˛°í•©ëś íŚŚí‹°ě… ę·¸ëŁą %s" #, python-format msgid "Left partitioning group %s" msgstr "남은 íŚŚí‹°ě… ę·¸ëŁą %s" #, python-format msgid "No limit value provided, result set will be limited to %(limit)d." msgstr "한계 ę°’ěť´ ě śęłµëě§€ 않음, 결과 세트가 %(limit)d(으)로 제한ë©ë‹ë‹¤." msgid "Nothing to clean, database event time to live is disabled" msgstr "정리할 사항이 없음, 데이터베이스 이벤트 지속 시간(TTL)ěť´ 사용ëě§€ 않음" msgid "Nothing to clean, database metering time to live is disabled" msgstr "정리할 사항이 없음, 데이터베이스 측정 지속 시간(TTL)ěť´ 사용ëě§€ 않음" #, python-format msgid "" "Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " "%(param)s" msgstr "" "파이프라인 %(pipeline)s: %(param)s 매개변ě로 변í™ę¸° 인스턴스 %(name)s 설정 " #, python-format msgid "Pipeline config: %s" msgstr "파이프라인 구성: %s" msgid "Pipeline configuration file has been updated." msgstr "파이프라인 구성 파일이 업데이트ëě—습ë‹ë‹¤." #, python-format msgid "Polling pollster %(poll)s in the context of %(src)s" msgstr "%(src)s ě»¨í…ŤěŠ¤íŠ¸ěť ěťę˛¬ěˇ°ě‚¬ěž %(poll)s 폴ë§" #, python-format msgid "Publishing policy set to %s" msgstr "공개 ě •ě±…ěť´ %s(으)로 설정ë¨" msgid "Reconfiguring polling tasks." msgstr "íŹ´ë§ ěž‘ě—…ěť„ 재구성합ë‹ë‹¤." msgid "Reloading notification agent and listeners." msgstr "알림 ě—이전트와 리스ë„를 다시 로드합ë‹ë‹¤." #, python-format msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" msgstr "pollster %(name)s ę±´ë„뛰기, %(p_context)s ěžě›ě—서 ěť´ 주기를 발견함" #, python-format msgid "Starting server in PID %s" msgstr "PID %sěť ě„śë˛„ 시작" msgid "detected decoupled pipeline config format" msgstr "비결합 파이프라인 구성 í•식 발견" #, python-format msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" msgstr "%(resource_id)sěť ě¸ˇě • 데이터 %(counter_name)s: %(counter_volume)s" #, python-format msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" msgstr "0.0.0.0:%(sport)sě—서 전달 중, http://127.0.0.1:%(vport)sě—서 보기" #, python-format msgid "serving on http://%(host)s:%(port)s" msgstr "http://%(host)s:%(port)sě—서 전달 중" ceilometer-6.0.0/ceilometer/pipeline.py0000664000567000056710000007361012701406224021322 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp. # Copyright 2014 Red Hat, Inc # # Authors: Yunhong Jiang # Eoghan Glynn # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import hashlib from itertools import chain import os from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import timeutils import six from stevedore import extension import yaml from ceilometer.event.storage import models from ceilometer.i18n import _, _LI, _LW from ceilometer import publisher from ceilometer.publisher import utils as publisher_utils from ceilometer import sample as sample_util from ceilometer import utils OPTS = [ cfg.StrOpt('pipeline_cfg_file', default="pipeline.yaml", help="Configuration file for pipeline definition." ), cfg.StrOpt('event_pipeline_cfg_file', default="event_pipeline.yaml", help="Configuration file for event pipeline definition." ), cfg.BoolOpt('refresh_pipeline_cfg', default=False, help="Refresh Pipeline configuration on-the-fly." ), cfg.BoolOpt('refresh_event_pipeline_cfg', default=False, help="Refresh Event Pipeline configuration on-the-fly." ), cfg.IntOpt('pipeline_polling_interval', default=20, help="Polling interval for pipeline file configuration" " in seconds." ), ] cfg.CONF.register_opts(OPTS) LOG = log.getLogger(__name__) class PipelineException(Exception): def __init__(self, message, pipeline_cfg): self.msg = message self.pipeline_cfg = pipeline_cfg def __str__(self): return 'Pipeline %s: %s' % (self.pipeline_cfg, self.msg) @six.add_metaclass(abc.ABCMeta) class PipelineEndpoint(object): def __init__(self, context, pipeline): self.publish_context = PublishContext(context, [pipeline]) @abc.abstractmethod def sample(self, messages): pass class SamplePipelineEndpoint(PipelineEndpoint): def sample(self, messages): samples = chain.from_iterable(m["payload"] for m in messages) samples = [ sample_util.Sample(name=s['counter_name'], type=s['counter_type'], unit=s['counter_unit'], volume=s['counter_volume'], user_id=s['user_id'], project_id=s['project_id'], resource_id=s['resource_id'], timestamp=s['timestamp'], resource_metadata=s['resource_metadata'], source=s.get('source')) for s in samples if publisher_utils.verify_signature( s, cfg.CONF.publisher.telemetry_secret) ] with self.publish_context as p: p(samples) class EventPipelineEndpoint(PipelineEndpoint): def sample(self, messages): events = chain.from_iterable(m["payload"] for m in messages) events = [ models.Event( message_id=ev['message_id'], event_type=ev['event_type'], generated=timeutils.normalize_time( timeutils.parse_isotime(ev['generated'])), traits=[models.Trait(name, dtype, models.Trait.convert_value(dtype, value)) for name, dtype, value in ev['traits']], raw=ev.get('raw', {})) for ev in events if publisher_utils.verify_signature( ev, cfg.CONF.publisher.telemetry_secret) ] try: with self.publish_context as p: p(events) except Exception: if not cfg.CONF.notification.ack_on_event_error: return oslo_messaging.NotificationResult.REQUEUE raise return oslo_messaging.NotificationResult.HANDLED class _PipelineTransportManager(object): def __init__(self): self.transporters = [] @staticmethod def hash_grouping(datapoint, grouping_keys): value = '' for key in grouping_keys or []: value += datapoint.get(key) if datapoint.get(key) else '' return hash(value) def add_transporter(self, transporter): self.transporters.append(transporter) def publisher(self, context): serializer = self.serializer hash_grouping = self.hash_grouping transporters = self.transporters filter_attr = self.filter_attr event_type = self.event_type class PipelinePublishContext(object): def __enter__(self): def p(data): # TODO(gordc): cleanup so payload is always single # datapoint. we can't correctly bucketise # datapoints if batched. data = [data] if not isinstance(data, list) else data for datapoint in data: serialized_data = serializer(datapoint) for d_filter, grouping_keys, notifiers in transporters: if d_filter(serialized_data[filter_attr]): key = (hash_grouping(serialized_data, grouping_keys) % len(notifiers)) notifier = notifiers[key] notifier.sample(context.to_dict(), event_type=event_type, payload=[serialized_data]) return p def __exit__(self, exc_type, exc_value, traceback): pass return PipelinePublishContext() class SamplePipelineTransportManager(_PipelineTransportManager): filter_attr = 'counter_name' event_type = 'ceilometer.pipeline' @staticmethod def serializer(data): return publisher_utils.meter_message_from_counter( data, cfg.CONF.publisher.telemetry_secret) class EventPipelineTransportManager(_PipelineTransportManager): filter_attr = 'event_type' event_type = 'pipeline.event' @staticmethod def serializer(data): return publisher_utils.message_from_event( data, cfg.CONF.publisher.telemetry_secret) class PublishContext(object): def __init__(self, context, pipelines=None): pipelines = pipelines or [] self.pipelines = set(pipelines) self.context = context def add_pipelines(self, pipelines): self.pipelines.update(pipelines) def __enter__(self): def p(data): for p in self.pipelines: p.publish_data(self.context, data) return p def __exit__(self, exc_type, exc_value, traceback): for p in self.pipelines: p.flush(self.context) class Source(object): """Represents a source of samples or events.""" def __init__(self, cfg): self.cfg = cfg try: self.name = cfg['name'] self.sinks = cfg.get('sinks') except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) def __str__(self): return self.name def check_sinks(self, sinks): if not self.sinks: raise PipelineException( "No sink defined in source %s" % self, self.cfg) for sink in self.sinks: if sink not in sinks: raise PipelineException( "Dangling sink %s from source %s" % (sink, self), self.cfg) def check_source_filtering(self, data, d_type): """Source data rules checking - At least one meaningful datapoint exist - Included type and excluded type can't co-exist on the same pipeline - Included type meter and wildcard can't co-exist at same pipeline """ if not data: raise PipelineException('No %s specified' % d_type, self.cfg) if ([x for x in data if x[0] not in '!*'] and [x for x in data if x[0] == '!']): raise PipelineException( 'Both included and excluded %s specified' % d_type, cfg) if '*' in data and [x for x in data if x[0] not in '!*']: raise PipelineException( 'Included %s specified with wildcard' % d_type, self.cfg) @staticmethod def is_supported(dataset, data_name): # Support wildcard like storage.* and !disk.* # Start with negation, we consider that the order is deny, allow if any(utils.match(data_name, datapoint[1:]) for datapoint in dataset if datapoint[0] == '!'): return False if any(utils.match(data_name, datapoint) for datapoint in dataset if datapoint[0] != '!'): return True # if we only have negation, we suppose the default is allow return all(datapoint.startswith('!') for datapoint in dataset) class EventSource(Source): """Represents a source of events. In effect it is a set of notification handlers capturing events for a set of matching notifications. """ def __init__(self, cfg): super(EventSource, self).__init__(cfg) self.events = cfg.get('events') self.check_source_filtering(self.events, 'events') def support_event(self, event_name): return self.is_supported(self.events, event_name) class SampleSource(Source): """Represents a source of samples. In effect it is a set of pollsters and/or notification handlers emitting samples for a set of matching meters. Each source encapsulates meter name matching, polling interval determination, optional resource enumeration or discovery, and mapping to one or more sinks for publication. """ def __init__(self, cfg): super(SampleSource, self).__init__(cfg) # Support 'counters' for backward compatibility self.meters = cfg.get('meters', cfg.get('counters')) try: self.interval = int(cfg.get('interval', 600)) except ValueError: raise PipelineException("Invalid interval value", cfg) if self.interval <= 0: raise PipelineException("Interval value should > 0", cfg) self.resources = cfg.get('resources') or [] if not isinstance(self.resources, list): raise PipelineException("Resources should be a list", cfg) self.discovery = cfg.get('discovery') or [] if not isinstance(self.discovery, list): raise PipelineException("Discovery should be a list", cfg) self.check_source_filtering(self.meters, 'meters') def get_interval(self): return self.interval def support_meter(self, meter_name): return self.is_supported(self.meters, meter_name) class Sink(object): """Represents a sink for the transformation and publication of data. Each sink config is concerned *only* with the transformation rules and publication conduits for data. In effect, a sink describes a chain of handlers. The chain starts with zero or more transformers and ends with one or more publishers. The first transformer in the chain is passed data from the corresponding source, takes some action such as deriving rate of change, performing unit conversion, or aggregating, before passing the modified data to next step. The subsequent transformers, if any, handle the data similarly. At the end of the chain, publishers publish the data. The exact publishing method depends on publisher type, for example, pushing into data storage via the message bus providing guaranteed delivery, or for loss-tolerant data UDP may be used. If no transformers are included in the chain, the publishers are passed data directly from the sink which are published unchanged. """ def __init__(self, cfg, transformer_manager): self.cfg = cfg try: self.name = cfg['name'] # It's legal to have no transformer specified self.transformer_cfg = cfg.get('transformers') or [] except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) if not cfg.get('publishers'): raise PipelineException("No publisher specified", cfg) self.publishers = [] for p in cfg['publishers']: if '://' not in p: # Support old format without URL p = p + "://" try: self.publishers.append(publisher.get_publisher(p, self.NAMESPACE)) except Exception: LOG.exception(_("Unable to load publisher %s"), p) self.multi_publish = True if len(self.publishers) > 1 else False self.transformers = self._setup_transformers(cfg, transformer_manager) def __str__(self): return self.name def _setup_transformers(self, cfg, transformer_manager): transformers = [] for transformer in self.transformer_cfg: parameter = transformer['parameters'] or {} try: ext = transformer_manager[transformer['name']] except KeyError: raise PipelineException( "No transformer named %s loaded" % transformer['name'], cfg) transformers.append(ext.plugin(**parameter)) LOG.info(_LI( "Pipeline %(pipeline)s: Setup transformer instance %(name)s " "with parameter %(param)s") % ({'pipeline': self, 'name': transformer['name'], 'param': parameter})) return transformers class EventSink(Sink): NAMESPACE = 'ceilometer.event.publisher' def publish_events(self, ctxt, events): if events: for p in self.publishers: try: p.publish_events(ctxt, events) except Exception: LOG.exception(_("Pipeline %(pipeline)s: %(status)s" " after error from publisher %(pub)s") % ({'pipeline': self, 'status': 'Continue' if self.multi_publish else 'Exit', 'pub': p} )) if not self.multi_publish: raise def flush(self, ctxt): """Flush data after all events have been injected to pipeline.""" pass class SampleSink(Sink): NAMESPACE = 'ceilometer.publisher' def _transform_sample(self, start, ctxt, sample): try: for transformer in self.transformers[start:]: sample = transformer.handle_sample(ctxt, sample) if not sample: LOG.debug( "Pipeline %(pipeline)s: Sample dropped by " "transformer %(trans)s", {'pipeline': self, 'trans': transformer}) return return sample except Exception as err: # TODO(gordc): only use one log level. LOG.warning(_("Pipeline %(pipeline)s: " "Exit after error from transformer " "%(trans)s for %(smp)s") % ({'pipeline': self, 'trans': transformer, 'smp': sample})) LOG.exception(err) def _publish_samples(self, start, ctxt, samples): """Push samples into pipeline for publishing. :param start: The first transformer that the sample will be injected. This is mainly for flush() invocation that transformer may emit samples. :param ctxt: Execution context from the manager or service. :param samples: Sample list. """ transformed_samples = [] if not self.transformers: transformed_samples = samples else: for sample in samples: LOG.debug( "Pipeline %(pipeline)s: Transform sample " "%(smp)s from %(trans)s transformer", {'pipeline': self, 'smp': sample, 'trans': start}) sample = self._transform_sample(start, ctxt, sample) if sample: transformed_samples.append(sample) if transformed_samples: for p in self.publishers: try: p.publish_samples(ctxt, transformed_samples) except Exception: LOG.exception(_( "Pipeline %(pipeline)s: Continue after error " "from publisher %(pub)s") % ({'pipeline': self, 'pub': p})) def publish_samples(self, ctxt, samples): self._publish_samples(0, ctxt, samples) def flush(self, ctxt): """Flush data after all samples have been injected to pipeline.""" for (i, transformer) in enumerate(self.transformers): try: self._publish_samples(i + 1, ctxt, list(transformer.flush(ctxt))) except Exception as err: LOG.warning(_( "Pipeline %(pipeline)s: Error flushing " "transformer %(trans)s") % ({'pipeline': self, 'trans': transformer})) LOG.exception(err) @six.add_metaclass(abc.ABCMeta) class Pipeline(object): """Represents a coupling between a sink and a corresponding source.""" def __init__(self, source, sink): self.source = source self.sink = sink self.name = str(self) def __str__(self): return (self.source.name if self.source.name == self.sink.name else '%s:%s' % (self.source.name, self.sink.name)) def flush(self, ctxt): self.sink.flush(ctxt) @property def publishers(self): return self.sink.publishers @abc.abstractmethod def publish_data(self, ctxt, data): """Publish data from pipeline.""" class EventPipeline(Pipeline): """Represents a pipeline for Events.""" def __str__(self): # NOTE(gordc): prepend a namespace so we ensure event and sample # pipelines do not have the same name. return 'event:%s' % super(EventPipeline, self).__str__() def support_event(self, event_type): return self.source.support_event(event_type) def publish_data(self, ctxt, events): if not isinstance(events, list): events = [events] supported = [e for e in events if self.source.support_event(e.event_type)] self.sink.publish_events(ctxt, supported) class SamplePipeline(Pipeline): """Represents a pipeline for Samples.""" def get_interval(self): return self.source.interval @property def resources(self): return self.source.resources @property def discovery(self): return self.source.discovery def support_meter(self, meter_name): return self.source.support_meter(meter_name) def _validate_volume(self, s): volume = s.volume if volume is None: LOG.warning(_LW( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has no volume (volume: None), the sample will' ' be dropped') % {'counter_name': s.name, 'resource_id': s.resource_id, 'timestamp': s.timestamp if s.timestamp else 'NO TIMESTAMP'} ) return False if not isinstance(volume, (int, float)): try: volume = float(volume) except ValueError: LOG.warning(_LW( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has volume which is not a number ' '(volume: %(counter_volume)s), the sample will be dropped') % {'counter_name': s.name, 'resource_id': s.resource_id, 'timestamp': ( s.timestamp if s.timestamp else 'NO TIMESTAMP'), 'counter_volume': volume} ) return False return True def publish_data(self, ctxt, samples): if not isinstance(samples, list): samples = [samples] supported = [s for s in samples if self.source.support_meter(s.name) and self._validate_volume(s)] self.sink.publish_samples(ctxt, supported) SAMPLE_TYPE = {'pipeline': SamplePipeline, 'source': SampleSource, 'sink': SampleSink} EVENT_TYPE = {'pipeline': EventPipeline, 'source': EventSource, 'sink': EventSink} class PipelineManager(object): """Pipeline Manager Pipeline manager sets up pipelines according to config file Usually only one pipeline manager exists in the system. """ def __init__(self, cfg, transformer_manager, p_type=SAMPLE_TYPE): """Setup the pipelines according to config. The configuration is supported as follows: Decoupled: the source and sink configuration are separately specified before being linked together. This allows source- specific configuration, such as resource discovery, to be kept focused only on the fine-grained source while avoiding the necessity for wide duplication of sink-related config. The configuration is provided in the form of separate lists of dictionaries defining sources and sinks, for example: {"sources": [{"name": source_1, "interval": interval_time, "meters" : ["meter_1", "meter_2"], "resources": ["resource_uri1", "resource_uri2"], "sinks" : ["sink_1", "sink_2"] }, {"name": source_2, "interval": interval_time, "meters" : ["meter_3"], "sinks" : ["sink_2"] }, ], "sinks": [{"name": sink_1, "transformers": [ {"name": "Transformer_1", "parameters": {"p1": "value"}}, {"name": "Transformer_2", "parameters": {"p1": "value"}}, ], "publishers": ["publisher_1", "publisher_2"] }, {"name": sink_2, "publishers": ["publisher_3"] }, ] } The interval determines the cadence of sample injection into the pipeline where samples are produced under the direct control of an agent, i.e. via a polling cycle as opposed to incoming notifications. Valid meter format is '*', '!meter_name', or 'meter_name'. '*' is wildcard symbol means any meters; '!meter_name' means "meter_name" will be excluded; 'meter_name' means 'meter_name' will be included. The 'meter_name" is Sample name field. Valid meters definition is all "included meter names", all "excluded meter names", wildcard and "excluded meter names", or only wildcard. The resources is list of URI indicating the resources from where the meters should be polled. It's optional and it's up to the specific pollster to decide how to use it. Transformer's name is plugin name in setup.cfg. Publisher's name is plugin name in setup.cfg """ self.pipelines = [] if not ('sources' in cfg and 'sinks' in cfg): raise PipelineException("Both sources & sinks are required", cfg) LOG.info(_LI('detected decoupled pipeline config format')) unique_names = set() sources = [] for s in cfg.get('sources', []): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) sources.append(p_type['source'](s)) unique_names.clear() sinks = {} for s in cfg.get('sinks', []): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated sink names: %s" % name, self) else: unique_names.add(name) sinks[s['name']] = p_type['sink'](s, transformer_manager) unique_names.clear() for source in sources: source.check_sinks(sinks) for target in source.sinks: pipe = p_type['pipeline'](source, sinks[target]) if pipe.name in unique_names: raise PipelineException( "Duplicate pipeline name: %s. Ensure pipeline" " names are unique. (name is the source and sink" " names combined)" % pipe.name, cfg) else: unique_names.add(pipe.name) self.pipelines.append(pipe) unique_names.clear() def publisher(self, context): """Build a new Publisher for these manager pipelines. :param context: The context. """ return PublishContext(context, self.pipelines) class PollingManager(object): """Polling Manager Polling manager sets up polling according to config file. """ def __init__(self, cfg): """Setup the polling according to config. The configuration is the sources half of the Pipeline Config. """ self.sources = [] if not ('sources' in cfg and 'sinks' in cfg): raise PipelineException("Both sources & sinks are required", cfg) LOG.info(_LI('detected decoupled pipeline config format')) unique_names = set() for s in cfg.get('sources', []): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) self.sources.append(SampleSource(s)) unique_names.clear() def _setup_pipeline_manager(cfg_file, transformer_manager, p_type=SAMPLE_TYPE): if not os.path.exists(cfg_file): cfg_file = cfg.CONF.find_file(cfg_file) LOG.debug("Pipeline config file: %s", cfg_file) with open(cfg_file) as fap: data = fap.read() pipeline_cfg = yaml.safe_load(data) LOG.info(_LI("Pipeline config: %s"), pipeline_cfg) return PipelineManager(pipeline_cfg, transformer_manager or extension.ExtensionManager( 'ceilometer.transformer', ), p_type) def _setup_polling_manager(cfg_file): if not os.path.exists(cfg_file): cfg_file = cfg.CONF.find_file(cfg_file) LOG.debug("Polling config file: %s", cfg_file) with open(cfg_file) as fap: data = fap.read() pipeline_cfg = yaml.safe_load(data) LOG.info(_LI("Pipeline config: %s"), pipeline_cfg) return PollingManager(pipeline_cfg) def setup_event_pipeline(transformer_manager=None): """Setup event pipeline manager according to yaml config file.""" cfg_file = cfg.CONF.event_pipeline_cfg_file return _setup_pipeline_manager(cfg_file, transformer_manager, EVENT_TYPE) def setup_pipeline(transformer_manager=None): """Setup pipeline manager according to yaml config file.""" cfg_file = cfg.CONF.pipeline_cfg_file return _setup_pipeline_manager(cfg_file, transformer_manager) def _get_pipeline_cfg_file(p_type=SAMPLE_TYPE): if p_type == EVENT_TYPE: cfg_file = cfg.CONF.event_pipeline_cfg_file else: cfg_file = cfg.CONF.pipeline_cfg_file if not os.path.exists(cfg_file): cfg_file = cfg.CONF.find_file(cfg_file) return cfg_file def get_pipeline_mtime(p_type=SAMPLE_TYPE): cfg_file = _get_pipeline_cfg_file(p_type) return os.path.getmtime(cfg_file) def get_pipeline_hash(p_type=SAMPLE_TYPE): cfg_file = _get_pipeline_cfg_file(p_type) with open(cfg_file) as fap: data = fap.read() if six.PY3: data = data.encode('utf-8') file_hash = hashlib.md5(data).hexdigest() return file_hash def setup_polling(): """Setup polling manager according to yaml config file.""" cfg_file = cfg.CONF.pipeline_cfg_file return _setup_polling_manager(cfg_file) def get_pipeline_grouping_key(pipe): keys = [] for transformer in pipe.sink.transformers: keys += transformer.grouping_keys return list(set(keys)) ceilometer-6.0.0/ceilometer/publisher/0000775000567000056710000000000012701406364021136 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/publisher/test.py0000664000567000056710000000271012701406224022462 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample in memory, useful for testing """ from ceilometer import publisher class TestPublisher(publisher.PublisherBase): """Publisher used in unit testing.""" def __init__(self, parsed_url): self.samples = [] self.events = [] self.calls = 0 def publish_samples(self, context, samples): """Send a metering message for publishing :param context: Execution context from the service or RPC call :param samples: Samples from pipeline after transformation """ self.samples.extend(samples) self.calls += 1 def publish_events(self, context, events): """Send an event message for publishing :param context: Execution context from the service or RPC call :param events: events from pipeline after transformation """ self.events.extend(events) self.calls += 1 ceilometer-6.0.0/ceilometer/publisher/direct.py0000664000567000056710000000410212701406224022752 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import timeutils from ceilometer.dispatcher import database from ceilometer import publisher from ceilometer.publisher import utils class DirectPublisher(publisher.PublisherBase): """A publisher that allows saving directly from the pipeline. Samples are saved to the currently configured database by hitching a ride on the DatabaseDispatcher. This is useful where it is desirable to limit the number of external services that are required. """ def __init__(self, parsed_url): super(DirectPublisher, self).__init__(parsed_url) dispatcher = database.DatabaseDispatcher(cfg.CONF) self.meter_conn = dispatcher.meter_conn self.event_conn = dispatcher.event_conn def publish_samples(self, context, samples): if not isinstance(samples, list): samples = [samples] # Transform the Sample objects into a list of dicts meters = [ utils.meter_message_from_counter( sample, cfg.CONF.publisher.telemetry_secret) for sample in samples ] for meter in meters: if meter.get('timestamp'): ts = timeutils.parse_isotime(meter['timestamp']) meter['timestamp'] = timeutils.normalize_time(ts) self.meter_conn.record_metering_data(meter) def publish_events(self, context, events): if not isinstance(events, list): events = [events] self.event_conn.record_events(events) ceilometer-6.0.0/ceilometer/publisher/utils.py0000664000567000056710000001133712701406223022647 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utils for publishers """ import hashlib import hmac from oslo_config import cfg import six from ceilometer import utils OPTS = [ cfg.StrOpt('telemetry_secret', secret=True, default='change this for valid signing', help='Secret value for signing messages. Set value empty if ' 'signing is not required to avoid computational overhead.', deprecated_opts=[cfg.DeprecatedOpt("metering_secret", "DEFAULT"), cfg.DeprecatedOpt("metering_secret", "publisher_rpc"), cfg.DeprecatedOpt("metering_secret", "publisher")] ), ] cfg.CONF.register_opts(OPTS, group="publisher") def compute_signature(message, secret): """Return the signature for a message dictionary.""" if not secret: return '' if isinstance(secret, six.text_type): secret = secret.encode('utf-8') digest_maker = hmac.new(secret, b'', hashlib.sha256) for name, value in utils.recursive_keypairs(message): if name == 'message_signature': # Skip any existing signature value, which would not have # been part of the original message. continue digest_maker.update(six.text_type(name).encode('utf-8')) digest_maker.update(six.text_type(value).encode('utf-8')) return digest_maker.hexdigest() def besteffort_compare_digest(first, second): """Returns True if both string inputs are equal, otherwise False. This function should take a constant amount of time regardless of how many characters in the strings match. """ # NOTE(sileht): compare_digest method protected for timing-attacks # exists since python >= 2.7.7 and python >= 3.3 # this a bit less-secure python fallback version # taken from https://github.com/openstack/python-keystoneclient/blob/ # master/keystoneclient/middleware/memcache_crypt.py#L88 if len(first) != len(second): return False result = 0 if six.PY3 and isinstance(first, bytes) and isinstance(second, bytes): for x, y in zip(first, second): result |= x ^ y else: for x, y in zip(first, second): result |= ord(x) ^ ord(y) return result == 0 if hasattr(hmac, 'compare_digest'): compare_digest = hmac.compare_digest else: compare_digest = besteffort_compare_digest def verify_signature(message, secret): """Check the signature in the message. Message is verified against the value computed from the rest of the contents. """ if not secret: return True old_sig = message.get('message_signature', '') new_sig = compute_signature(message, secret) if isinstance(old_sig, six.text_type): try: old_sig = old_sig.encode('ascii') except UnicodeDecodeError: return False if six.PY3: new_sig = new_sig.encode('ascii') return compare_digest(new_sig, old_sig) def meter_message_from_counter(sample, secret): """Make a metering message ready to be published or stored. Returns a dictionary containing a metering message for a notification message and a Sample instance. """ msg = {'source': sample.source, 'counter_name': sample.name, 'counter_type': sample.type, 'counter_unit': sample.unit, 'counter_volume': sample.volume, 'user_id': sample.user_id, 'project_id': sample.project_id, 'resource_id': sample.resource_id, 'timestamp': sample.timestamp, 'resource_metadata': sample.resource_metadata, 'message_id': sample.id, } msg['message_signature'] = compute_signature(msg, secret) return msg def message_from_event(event, secret): """Make an event message ready to be published or stored. Returns a serialized model of Event containing an event message """ msg = event.serialize() msg['message_signature'] = compute_signature(msg, secret) return msg ceilometer-6.0.0/ceilometer/publisher/__init__.py0000664000567000056710000000270112701406224023242 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp. # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_utils import netutils import six from stevedore import driver def get_publisher(url, namespace='ceilometer.publisher'): """Get publisher driver and load it. :param URL: URL for the publisher :param namespace: Namespace to use to look for drivers. """ parse_result = netutils.urlsplit(url) loaded_driver = driver.DriverManager(namespace, parse_result.scheme) return loaded_driver.driver(parse_result) @six.add_metaclass(abc.ABCMeta) class PublisherBase(object): """Base class for plugins that publish data.""" def __init__(self, parsed_url): pass @abc.abstractmethod def publish_samples(self, context, samples): """Publish samples into final conduit.""" @abc.abstractmethod def publish_events(self, context, events): """Publish events into final conduit.""" ceilometer-6.0.0/ceilometer/publisher/file.py0000664000567000056710000000744512701406224022434 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import logging.handlers from oslo_log import log from six.moves.urllib import parse as urlparse import ceilometer from ceilometer.i18n import _ from ceilometer import publisher LOG = log.getLogger(__name__) class FilePublisher(publisher.PublisherBase): """Publisher metering data to file. The publisher which records metering data into a file. The file name and location should be configured in ceilometer pipeline configuration file. If a file name and location is not specified, this File Publisher will not log any meters other than log a warning in Ceilometer log file. To enable this publisher, add the following section to the /etc/ceilometer/publisher.yaml file or simply add it to an existing pipeline:: - name: meter_file interval: 600 counters: - "*" transformers: publishers: - file:///var/test?max_bytes=10000000&backup_count=5 File path is required for this publisher to work properly. If max_bytes or backup_count is missing, FileHandler will be used to save the metering data. If max_bytes and backup_count are present, RotatingFileHandler will be used to save the metering data. """ def __init__(self, parsed_url): super(FilePublisher, self).__init__(parsed_url) self.publisher_logger = None path = parsed_url.path if not path or path.lower() == 'file': LOG.error(_('The path for the file publisher is required')) return rfh = None max_bytes = 0 backup_count = 0 # Handling other configuration options in the query string if parsed_url.query: params = urlparse.parse_qs(parsed_url.query) if params.get('max_bytes') and params.get('backup_count'): try: max_bytes = int(params.get('max_bytes')[0]) backup_count = int(params.get('backup_count')[0]) except ValueError: LOG.error(_('max_bytes and backup_count should be ' 'numbers.')) return # create rotating file handler rfh = logging.handlers.RotatingFileHandler( path, encoding='utf8', maxBytes=max_bytes, backupCount=backup_count) self.publisher_logger = logging.Logger('publisher.file') self.publisher_logger.propagate = False self.publisher_logger.setLevel(logging.INFO) rfh.setLevel(logging.INFO) self.publisher_logger.addHandler(rfh) def publish_samples(self, context, samples): """Send a metering message for publishing :param context: Execution context from the service or RPC call :param samples: Samples from pipeline after transformation """ if self.publisher_logger: for sample in samples: self.publisher_logger.info(sample.as_dict()) def publish_events(self, context, events): """Send an event message for publishing :param context: Execution context from the service or RPC call :param events: events from pipeline after transformation """ raise ceilometer.NotImplementedError ceilometer-6.0.0/ceilometer/publisher/udp.py0000664000567000056710000000514212701406224022275 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample using an UDP mechanism """ import socket import msgpack from oslo_config import cfg from oslo_log import log from oslo_utils import netutils import ceilometer from ceilometer.i18n import _ from ceilometer import publisher from ceilometer.publisher import utils cfg.CONF.import_opt('udp_port', 'ceilometer.collector', group='collector') LOG = log.getLogger(__name__) class UDPPublisher(publisher.PublisherBase): def __init__(self, parsed_url): self.host, self.port = netutils.parse_host_port( parsed_url.netloc, default_port=cfg.CONF.collector.udp_port) if netutils.is_valid_ipv6(self.host): addr_family = socket.AF_INET6 else: addr_family = socket.AF_INET self.socket = socket.socket(addr_family, socket.SOCK_DGRAM) def publish_samples(self, context, samples): """Send a metering message for publishing :param context: Execution context from the service or RPC call :param samples: Samples from pipeline after transformation """ for sample in samples: msg = utils.meter_message_from_counter( sample, cfg.CONF.publisher.telemetry_secret) host = self.host port = self.port LOG.debug("Publishing sample %(msg)s over UDP to " "%(host)s:%(port)d", {'msg': msg, 'host': host, 'port': port}) try: self.socket.sendto(msgpack.dumps(msg), (self.host, self.port)) except Exception as e: LOG.warning(_("Unable to send sample over UDP")) LOG.exception(e) def publish_events(self, context, events): """Send an event message for publishing :param context: Execution context from the service or RPC call :param events: events from pipeline after transformation """ raise ceilometer.NotImplementedError ceilometer-6.0.0/ceilometer/publisher/kafka_broker.py0000664000567000056710000000731512701406224024132 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Cisco Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import kafka from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import netutils from six.moves.urllib import parse as urlparse from ceilometer.i18n import _LE from ceilometer.publisher import messaging LOG = log.getLogger(__name__) class KafkaBrokerPublisher(messaging.MessagingPublisher): """Publish metering data to kafka broker. The ip address and port number of kafka broker should be configured in ceilometer pipeline configuration file. If an ip address is not specified, this kafka publisher will not publish any meters. To enable this publisher, add the following section to the /etc/ceilometer/pipeline.yaml file or simply add it to an existing pipeline:: meter: - name: meter_kafka interval: 600 counters: - "*" transformers: sinks: - kafka_sink sinks: - name: kafka_sink transformers: publishers: - kafka://[kafka_broker_ip]:[kafka_broker_port]?topic=[topic] Kafka topic name and broker's port are required for this publisher to work properly. If topic parameter is missing, this kafka publisher publish metering data under a topic name, 'ceilometer'. If the port number is not specified, this Kafka Publisher will use 9092 as the broker's port. This publisher has transmit options such as queue, drop, and retry. These options are specified using policy field of URL parameter. When queue option could be selected, local queue length can be determined using max_queue_length field as well. When the transfer fails with retry option, try to resend the data as many times as specified in max_retry field. If max_retry is not specified, default the number of retry is 100. """ def __init__(self, parsed_url): super(KafkaBrokerPublisher, self).__init__(parsed_url) options = urlparse.parse_qs(parsed_url.query) self._producer = None self._host, self._port = netutils.parse_host_port( parsed_url.netloc, default_port=9092) self._topic = options.get('topic', ['ceilometer'])[-1] self.max_retry = int(options.get('max_retry', [100])[-1]) def _ensure_connection(self): if self._producer: return try: client = kafka.KafkaClient("%s:%s" % (self._host, self._port)) self._producer = kafka.SimpleProducer(client) except Exception as e: LOG.exception(_LE("Failed to connect to Kafka service: %s"), e) raise messaging.DeliveryFailure('Kafka Client is not available, ' 'please restart Kafka client') def _send(self, context, event_type, data): self._ensure_connection() # TODO(sileht): don't split the payload into multiple network # message ... but how to do that without breaking consuming # application... try: for d in data: self._producer.send_messages(self._topic, jsonutils.dumps(d)) except Exception as e: messaging.raise_delivery_failure(e) ceilometer-6.0.0/ceilometer/publisher/messaging.py0000664000567000056710000001771312701406224023471 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample using the preferred RPC mechanism. """ import abc import itertools import operator from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import encodeutils from oslo_utils import excutils import six import six.moves.urllib.parse as urlparse from ceilometer.i18n import _, _LE, _LI from ceilometer import messaging from ceilometer import publisher from ceilometer.publisher import utils LOG = log.getLogger(__name__) NOTIFIER_OPTS = [ cfg.StrOpt('metering_topic', default='metering', help='The topic that ceilometer uses for metering ' 'notifications.', ), cfg.StrOpt('event_topic', default='event', help='The topic that ceilometer uses for event ' 'notifications.', ), cfg.StrOpt('telemetry_driver', default='messagingv2', help='The driver that ceilometer uses for metering ' 'notifications.', deprecated_name='metering_driver', ) ] cfg.CONF.register_opts(NOTIFIER_OPTS, group="publisher_notifier") cfg.CONF.import_opt('host', 'ceilometer.service') class DeliveryFailure(Exception): def __init__(self, message=None, cause=None): super(DeliveryFailure, self).__init__(message) self.cause = cause def raise_delivery_failure(exc): excutils.raise_with_cause(DeliveryFailure, encodeutils.exception_to_unicode(exc), cause=exc) @six.add_metaclass(abc.ABCMeta) class MessagingPublisher(publisher.PublisherBase): def __init__(self, parsed_url): options = urlparse.parse_qs(parsed_url.query) # the value of options is a list of url param values # only take care of the latest one if the option # is provided more than once self.per_meter_topic = bool(int( options.get('per_meter_topic', [0])[-1])) self.policy = options.get('policy', ['default'])[-1] self.max_queue_length = int(options.get( 'max_queue_length', [1024])[-1]) self.max_retry = 0 self.local_queue = [] if self.policy in ['default', 'queue', 'drop']: LOG.info(_LI('Publishing policy set to %s') % self.policy) else: LOG.warning(_('Publishing policy is unknown (%s) force to ' 'default') % self.policy) self.policy = 'default' self.retry = 1 if self.policy in ['queue', 'drop'] else None def publish_samples(self, context, samples): """Publish samples on RPC. :param context: Execution context from the service or RPC call. :param samples: Samples from pipeline after transformation. """ meters = [ utils.meter_message_from_counter( sample, cfg.CONF.publisher.telemetry_secret) for sample in samples ] topic = cfg.CONF.publisher_notifier.metering_topic self.local_queue.append((context, topic, meters)) if self.per_meter_topic: for meter_name, meter_list in itertools.groupby( sorted(meters, key=operator.itemgetter('counter_name')), operator.itemgetter('counter_name')): meter_list = list(meter_list) topic_name = topic + '.' + meter_name LOG.debug('Publishing %(m)d samples on %(n)s', {'m': len(meter_list), 'n': topic_name}) self.local_queue.append((context, topic_name, meter_list)) self.flush() def flush(self): # NOTE(sileht): # this is why the self.local_queue is emptied before processing the # queue and the remaining messages in the queue are added to # self.local_queue after in case of another call having already added # something in the self.local_queue queue = self.local_queue self.local_queue = [] self.local_queue = (self._process_queue(queue, self.policy) + self.local_queue) if self.policy == 'queue': self._check_queue_length() def _check_queue_length(self): queue_length = len(self.local_queue) if queue_length > self.max_queue_length > 0: count = queue_length - self.max_queue_length self.local_queue = self.local_queue[count:] LOG.warning(_("Publisher max local_queue length is exceeded, " "dropping %d oldest samples") % count) def _process_queue(self, queue, policy): current_retry = 0 while queue: context, topic, data = queue[0] try: self._send(context, topic, data) except DeliveryFailure: data = sum([len(m) for __, __, m in queue]) if policy == 'queue': LOG.warning(_("Failed to publish %d datapoints, queue " "them"), data) return queue elif policy == 'drop': LOG.warning(_("Failed to publish %d datapoints, " "dropping them"), data) return [] current_retry += 1 if current_retry >= self.max_retry: LOG.exception(_LE("Failed to retry to send sample data " "with max_retry times")) raise else: queue.pop(0) return [] def publish_events(self, context, events): """Send an event message for publishing :param context: Execution context from the service or RPC call :param events: events from pipeline after transformation """ ev_list = [utils.message_from_event( event, cfg.CONF.publisher.telemetry_secret) for event in events] topic = cfg.CONF.publisher_notifier.event_topic self.local_queue.append((context, topic, ev_list)) self.flush() @abc.abstractmethod def _send(self, context, topic, meters): """Send the meters to the messaging topic.""" class NotifierPublisher(MessagingPublisher): def __init__(self, parsed_url, default_topic): super(NotifierPublisher, self).__init__(parsed_url) options = urlparse.parse_qs(parsed_url.query) topic = options.get('topic', [default_topic])[-1] self.notifier = oslo_messaging.Notifier( messaging.get_transport(), driver=cfg.CONF.publisher_notifier.telemetry_driver, publisher_id='telemetry.publisher.%s' % cfg.CONF.host, topic=topic, retry=self.retry ) def _send(self, context, event_type, data): try: self.notifier.sample(context.to_dict(), event_type=event_type, payload=data) except oslo_messaging.MessageDeliveryFailure as e: raise_delivery_failure(e) class SampleNotifierPublisher(NotifierPublisher): def __init__(self, parsed_url): super(SampleNotifierPublisher, self).__init__( parsed_url, cfg.CONF.publisher_notifier.metering_topic) class EventNotifierPublisher(NotifierPublisher): def __init__(self, parsed_url): super(EventNotifierPublisher, self).__init__( parsed_url, cfg.CONF.publisher_notifier.event_topic) ceilometer-6.0.0/ceilometer/coordination.py0000664000567000056710000001672112701406223022204 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg from oslo_log import log import retrying import tooz.coordination from ceilometer.i18n import _LE, _LI, _LW from ceilometer import utils LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('backend_url', help='The backend URL to use for distributed coordination. If ' 'left empty, per-deployment central agent and per-host ' 'compute agent won\'t do workload ' 'partitioning and will only function correctly if a ' 'single instance of that service is running.'), cfg.FloatOpt('heartbeat', default=1.0, help='Number of seconds between heartbeats for distributed ' 'coordination.'), cfg.FloatOpt('check_watchers', default=10.0, help='Number of seconds between checks to see if group ' 'membership has changed') ] cfg.CONF.register_opts(OPTS, group='coordination') class MemberNotInGroupError(Exception): def __init__(self, group_id, members, my_id): super(MemberNotInGroupError, self).__init__(_LE( 'Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: ' 'Current agent is not part of group and cannot take tasks') % {'group_id': group_id, 'members': members, 'me': my_id}) def retry_on_member_not_in_group(exception): return isinstance(exception, MemberNotInGroupError) class PartitionCoordinator(object): """Workload partitioning coordinator. This class uses the `tooz` library to manage group membership. To ensure that the other agents know this agent is still alive, the `heartbeat` method should be called periodically. Coordination errors and reconnects are handled under the hood, so the service using the partition coordinator need not care whether the coordination backend is down. The `extract_my_subset` will simply return an empty iterable in this case. """ def __init__(self, my_id=None): self._coordinator = None self._groups = set() self._my_id = my_id or str(uuid.uuid4()) def start(self): backend_url = cfg.CONF.coordination.backend_url if backend_url: try: self._coordinator = tooz.coordination.get_coordinator( backend_url, self._my_id) self._coordinator.start() LOG.info(_LI('Coordination backend started successfully.')) except tooz.coordination.ToozError: LOG.exception(_LE('Error connecting to coordination backend.')) def stop(self): if not self._coordinator: return for group in list(self._groups): self.leave_group(group) try: self._coordinator.stop() except tooz.coordination.ToozError: LOG.exception(_LE('Error connecting to coordination backend.')) finally: self._coordinator = None def is_active(self): return self._coordinator is not None def heartbeat(self): if self._coordinator: if not self._coordinator.is_started: # re-connect self.start() try: self._coordinator.heartbeat() except tooz.coordination.ToozError: LOG.exception(_LE('Error sending a heartbeat to coordination ' 'backend.')) def watch_group(self, namespace, callback): if self._coordinator: self._coordinator.watch_join_group(namespace, callback) self._coordinator.watch_leave_group(namespace, callback) def run_watchers(self): if self._coordinator: self._coordinator.run_watchers() def join_group(self, group_id): if (not self._coordinator or not self._coordinator.is_started or not group_id): return while True: try: join_req = self._coordinator.join_group(group_id) join_req.get() LOG.info(_LI('Joined partitioning group %s'), group_id) break except tooz.coordination.MemberAlreadyExist: return except tooz.coordination.GroupNotCreated: create_grp_req = self._coordinator.create_group(group_id) try: create_grp_req.get() except tooz.coordination.GroupAlreadyExist: pass except tooz.coordination.ToozError: LOG.exception(_LE('Error joining partitioning group %s,' ' re-trying'), group_id) self._groups.add(group_id) def leave_group(self, group_id): if group_id not in self._groups: return if self._coordinator: self._coordinator.leave_group(group_id) self._groups.remove(group_id) LOG.info(_LI('Left partitioning group %s'), group_id) def _get_members(self, group_id): if not self._coordinator: return [self._my_id] while True: get_members_req = self._coordinator.get_members(group_id) try: return get_members_req.get() except tooz.coordination.GroupNotCreated: self.join_group(group_id) @retrying.retry(stop_max_attempt_number=5, wait_random_max=2000, retry_on_exception=retry_on_member_not_in_group) def extract_my_subset(self, group_id, iterable, attempt=0): """Filters an iterable, returning only objects assigned to this agent. We have a list of objects and get a list of active group members from `tooz`. We then hash all the objects into buckets and return only the ones that hashed into *our* bucket. """ if not group_id: return iterable if group_id not in self._groups: self.join_group(group_id) try: members = self._get_members(group_id) LOG.debug('Members of group: %s, Me: %s', members, self._my_id) if self._my_id not in members: LOG.warning(_LW('Cannot extract tasks because agent failed to ' 'join group properly. Rejoining group.')) self.join_group(group_id) members = self._get_members(group_id) if self._my_id not in members: raise MemberNotInGroupError(group_id, members, self._my_id) hr = utils.HashRing(members) filtered = [v for v in iterable if hr.get_node(str(v)) == self._my_id] LOG.debug('My subset: %s', [str(f) for f in filtered]) return filtered except tooz.coordination.ToozError: LOG.exception(_LE('Error getting group membership info from ' 'coordination backend.')) return [] ceilometer-6.0.0/ceilometer/keystone_client.py0000664000567000056710000001476512701406223022721 0ustar jenkinsjenkins00000000000000# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from keystoneauth1 import exceptions as ka_exception from keystoneauth1 import identity as ka_identity from keystoneauth1 import loading as ka_loading from keystoneclient.v3 import client as ks_client_v3 from oslo_config import cfg from oslo_log import log LOG = log.getLogger(__name__) CFG_GROUP = "service_credentials" def get_session(requests_session=None): """Get a ceilometer service credentials auth session.""" auth_plugin = ka_loading.load_auth_from_conf_options(cfg.CONF, CFG_GROUP) session = ka_loading.load_session_from_conf_options( cfg.CONF, CFG_GROUP, auth=auth_plugin, session=requests_session ) return session def get_client(trust_id=None, requests_session=None): """Return a client for keystone v3 endpoint, optionally using a trust.""" session = get_session(requests_session=requests_session) return ks_client_v3.Client(session=session, trust_id=trust_id) def get_service_catalog(client): return client.session.auth.get_access(client.session).service_catalog def get_auth_token(client): return client.session.auth.get_access(client.session).auth_token def get_client_on_behalf_user(auth_plugin, trust_id=None, requests_session=None): """Return a client for keystone v3 endpoint, optionally using a trust.""" session = ka_loading.load_session_from_conf_options( cfg.CONF, CFG_GROUP, auth=auth_plugin, session=requests_session ) return ks_client_v3.Client(session=session, trust_id=trust_id) def create_trust_id(trustor_user_id, trustor_project_id, roles, auth_plugin): """Create a new trust using the ceilometer service user.""" admin_client = get_client() trustee_user_id = admin_client.auth_ref.user_id client = get_client_on_behalf_user(auth_plugin=auth_plugin) trust = client.trusts.create(trustor_user=trustor_user_id, trustee_user=trustee_user_id, project=trustor_project_id, impersonation=True, role_names=roles) return trust.id def delete_trust_id(trust_id, auth_plugin): """Delete a trust previously setup for the ceilometer user.""" client = get_client_on_behalf_user(auth_plugin=auth_plugin) try: client.trusts.delete(trust_id) except ka_exception.NotFound: pass CLI_OPTS = [ cfg.StrOpt('region-name', deprecated_group="DEFAULT", deprecated_name="os-region-name", default=os.environ.get('OS_REGION_NAME'), help='Region name to use for OpenStack service endpoints.'), cfg.StrOpt('interface', default=os.environ.get( 'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE', 'public')), deprecated_name="os-endpoint-type", choices=('public', 'internal', 'admin', 'auth', 'publicURL', 'internalURL', 'adminURL'), help='Type of endpoint in Identity service catalog to use for ' 'communication with OpenStack services.'), ] cfg.CONF.register_cli_opts(CLI_OPTS, group=CFG_GROUP) def register_keystoneauth_opts(conf): ka_loading.register_auth_conf_options(conf, CFG_GROUP) ka_loading.register_session_conf_options( conf, CFG_GROUP, deprecated_opts={'cacert': [ cfg.DeprecatedOpt('os-cacert', group=CFG_GROUP), cfg.DeprecatedOpt('os-cacert', group="DEFAULT")] }) conf.set_default("auth_type", default="password-ceilometer-legacy", group=CFG_GROUP) def setup_keystoneauth(conf): if conf[CFG_GROUP].auth_type == "password-ceilometer-legacy": LOG.warning("Value 'password-ceilometer-legacy' for '[%s]/auth_type' " "is deprecated. And will be removed in Ceilometer 7.0. " "Use 'password' instead.", CFG_GROUP) ka_loading.load_auth_from_conf_options(conf, CFG_GROUP) class LegacyCeilometerKeystoneLoader(ka_loading.BaseLoader): @property def plugin_class(self): return ka_identity.V2Password def get_options(self): options = super(LegacyCeilometerKeystoneLoader, self).get_options() options.extend([ ka_loading.Opt( 'os-username', default=os.environ.get('OS_USERNAME', 'ceilometer'), help='User name to use for OpenStack service access.'), ka_loading.Opt( 'os-password', secret=True, default=os.environ.get('OS_PASSWORD', 'admin'), help='Password to use for OpenStack service access.'), ka_loading.Opt( 'os-tenant-id', default=os.environ.get('OS_TENANT_ID', ''), help='Tenant ID to use for OpenStack service access.'), ka_loading.Opt( 'os-tenant-name', default=os.environ.get('OS_TENANT_NAME', 'admin'), help='Tenant name to use for OpenStack service access.'), ka_loading.Opt( 'os-auth-url', default=os.environ.get('OS_AUTH_URL', 'http://localhost:5000/v2.0'), help='Auth URL to use for OpenStack service access.'), ]) return options def load_from_options(self, **kwargs): options_map = { 'os_auth_url': 'auth_url', 'os_username': 'username', 'os_password': 'password', 'os_tenant_name': 'tenant_name', 'os_tenant_id': 'tenant_id', } identity_kwargs = dict((options_map[o.dest], kwargs.get(o.dest) or o.default) for o in self.get_options() if o.dest in options_map) return self.plugin_class(**identity_kwargs) ceilometer-6.0.0/ceilometer/compute/0000775000567000056710000000000012701406364020615 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/notifications/0000775000567000056710000000000012701406364023466 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/notifications/__init__.py0000664000567000056710000000241712701406223025575 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_messaging from ceilometer.agent import plugin_base OPTS = [ cfg.StrOpt('nova_control_exchange', default='nova', help="Exchange name for Nova notifications."), ] cfg.CONF.register_opts(OPTS) class ComputeNotificationBase(plugin_base.NotificationBase): def get_targets(self, conf): """Return a sequence of oslo_messaging.Target This sequence is defining the exchange and topics to be connected for this plugin. """ return [oslo_messaging.Target(topic=topic, exchange=conf.nova_control_exchange) for topic in self.get_notification_topics(conf)] ceilometer-6.0.0/ceilometer/compute/notifications/instance.py0000664000567000056710000000632612701406223025645 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Converters for producing compute sample messages from notification events. """ import abc import six from ceilometer.agent import plugin_base from ceilometer.compute import notifications from ceilometer.compute import util from ceilometer import sample @six.add_metaclass(abc.ABCMeta) class UserMetadataAwareInstanceNotificationBase( notifications.ComputeNotificationBase): """Consumes notifications containing instance user metadata.""" def process_notification(self, message): instance_properties = self.get_instance_properties(message) if isinstance(instance_properties.get('metadata'), dict): src_metadata = instance_properties['metadata'] del instance_properties['metadata'] util.add_reserved_user_metadata(src_metadata, instance_properties) return self.get_sample(message) def get_instance_properties(self, message): """Retrieve instance properties from notification payload.""" return message['payload'] @abc.abstractmethod def get_sample(self, message): """Derive sample from notification payload.""" class InstanceScheduled(UserMetadataAwareInstanceNotificationBase, plugin_base.NonMetricNotificationBase): event_types = ['scheduler.run_instance.scheduled'] def get_instance_properties(self, message): """Retrieve instance properties from notification payload.""" return message['payload']['request_spec']['instance_properties'] def get_sample(self, message): yield sample.Sample.from_notification( name='instance.scheduled', type=sample.TYPE_DELTA, volume=1, unit='instance', user_id=None, project_id=message['payload']['request_spec'] ['instance_properties']['project_id'], resource_id=message['payload']['instance_id'], message=message) class ComputeInstanceNotificationBase( UserMetadataAwareInstanceNotificationBase): """Convert compute.instance.* notifications into Samples.""" event_types = ['compute.instance.*'] class Instance(ComputeInstanceNotificationBase, plugin_base.NonMetricNotificationBase): def get_sample(self, message): yield sample.Sample.from_notification( name='instance', type=sample.TYPE_GAUGE, unit='instance', volume=1, user_id=message['payload']['user_id'], project_id=message['payload']['tenant_id'], resource_id=message['payload']['instance_id'], message=message) ceilometer-6.0.0/ceilometer/compute/pollsters/0000775000567000056710000000000012701406364022644 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/pollsters/disk.py0000664000567000056710000006422012701406223024146 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # Copyright 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections from oslo_log import log import six import ceilometer from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ from ceilometer import sample LOG = log.getLogger(__name__) DiskIOData = collections.namedtuple( 'DiskIOData', 'r_bytes r_requests w_bytes w_requests per_disk_requests', ) DiskRateData = collections.namedtuple('DiskRateData', ['read_bytes_rate', 'read_requests_rate', 'write_bytes_rate', 'write_requests_rate', 'per_disk_rate']) DiskLatencyData = collections.namedtuple('DiskLatencyData', ['disk_latency', 'per_disk_latency']) DiskIOPSData = collections.namedtuple('DiskIOPSData', ['iops_count', 'per_disk_iops']) DiskInfoData = collections.namedtuple('DiskInfoData', ['capacity', 'allocation', 'physical', 'per_disk_info']) @six.add_metaclass(abc.ABCMeta) class _Base(pollsters.BaseComputePollster): DISKIO_USAGE_MESSAGE = ' '.join(["DISKIO USAGE:", "%s %s:", "read-requests=%d", "read-bytes=%d", "write-requests=%d", "write-bytes=%d", "errors=%d", ]) CACHE_KEY_DISK = 'diskio' def _populate_cache(self, inspector, cache, instance): i_cache = cache.setdefault(self.CACHE_KEY_DISK, {}) if instance.id not in i_cache: r_bytes = 0 r_requests = 0 w_bytes = 0 w_requests = 0 per_device_read_bytes = {} per_device_read_requests = {} per_device_write_bytes = {} per_device_write_requests = {} for disk, info in inspector.inspect_disks(instance): LOG.debug(self.DISKIO_USAGE_MESSAGE, instance, disk.device, info.read_requests, info.read_bytes, info.write_requests, info.write_bytes, info.errors) r_bytes += info.read_bytes r_requests += info.read_requests w_bytes += info.write_bytes w_requests += info.write_requests # per disk data per_device_read_bytes[disk.device] = info.read_bytes per_device_read_requests[disk.device] = info.read_requests per_device_write_bytes[disk.device] = info.write_bytes per_device_write_requests[disk.device] = info.write_requests per_device_requests = { 'read_bytes': per_device_read_bytes, 'read_requests': per_device_read_requests, 'write_bytes': per_device_write_bytes, 'write_requests': per_device_write_requests, } i_cache[instance.id] = DiskIOData( r_bytes=r_bytes, r_requests=r_requests, w_bytes=w_bytes, w_requests=w_requests, per_disk_requests=per_device_requests, ) return i_cache[instance.id] @abc.abstractmethod def _get_samples(instance, c_data): """Return one or more Sample.""" @staticmethod def _get_sample_read_and_write(instance, _name, _unit, c_data, _volume, _metadata): """Read / write Pollster and return one Sample""" return [util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_CUMULATIVE, unit=_unit, volume=getattr(c_data, _volume), additional_metadata={ 'device': c_data.per_disk_requests[_metadata].keys()}, )] @staticmethod def _get_samples_per_device(c_data, _attr, instance, _name, _unit): """Return one or more Samples for meter 'disk.device.*'""" samples = [] for disk, value in six.iteritems(c_data.per_disk_requests[_attr]): samples.append(util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_CUMULATIVE, unit=_unit, volume=value, resource_id="%s-%s" % (instance.id, disk), additional_metadata={'disk_name': disk}, )) return samples def get_samples(self, manager, cache, resources): for instance in resources: instance_name = util.instance_name(instance) try: c_data = self._populate_cache( self.inspector, cache, instance, ) for s in self._get_samples(instance, c_data): yield s except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: LOG.exception(_('Ignoring instance %(name)s: %(error)s'), {'name': instance_name, 'error': err}) class ReadRequestsPollster(_Base): def _get_samples(self, instance, c_data): return self._get_sample_read_and_write( instance, 'disk.read.requests', 'request', c_data, 'r_requests', 'read_requests') class PerDeviceReadRequestsPollster(_Base): def _get_samples(self, instance, c_data): return self._get_samples_per_device( c_data, 'read_requests', instance, 'disk.device.read.requests', 'request') class ReadBytesPollster(_Base): def _get_samples(self, instance, c_data): return self._get_sample_read_and_write( instance, 'disk.read.bytes', 'B', c_data, 'r_bytes', 'read_bytes') class PerDeviceReadBytesPollster(_Base): def _get_samples(self, instance, c_data): return self._get_samples_per_device( c_data, 'read_bytes', instance, 'disk.device.read.bytes', 'B') class WriteRequestsPollster(_Base): def _get_samples(self, instance, c_data): return self._get_sample_read_and_write( instance, 'disk.write.requests', 'request', c_data, 'w_requests', 'write_requests') class PerDeviceWriteRequestsPollster(_Base): def _get_samples(self, instance, c_data): return self._get_samples_per_device( c_data, 'write_requests', instance, 'disk.device.write.requests', 'request') class WriteBytesPollster(_Base): def _get_samples(self, instance, c_data): return self._get_sample_read_and_write( instance, 'disk.write.bytes', 'B', c_data, 'w_bytes', 'write_bytes') class PerDeviceWriteBytesPollster(_Base): def _get_samples(self, instance, c_data): return self._get_samples_per_device( c_data, 'write_bytes', instance, 'disk.device.write.bytes', 'B') @six.add_metaclass(abc.ABCMeta) class _DiskRatesPollsterBase(pollsters.BaseComputePollster): CACHE_KEY_DISK_RATE = 'diskio-rate' def _populate_cache(self, inspector, cache, instance): i_cache = cache.setdefault(self.CACHE_KEY_DISK_RATE, {}) if instance.id not in i_cache: r_bytes_rate = 0 r_requests_rate = 0 w_bytes_rate = 0 w_requests_rate = 0 per_disk_r_bytes_rate = {} per_disk_r_requests_rate = {} per_disk_w_bytes_rate = {} per_disk_w_requests_rate = {} disk_rates = inspector.inspect_disk_rates( instance, self._inspection_duration) for disk, info in disk_rates: r_bytes_rate += info.read_bytes_rate r_requests_rate += info.read_requests_rate w_bytes_rate += info.write_bytes_rate w_requests_rate += info.write_requests_rate per_disk_r_bytes_rate[disk.device] = info.read_bytes_rate per_disk_r_requests_rate[disk.device] = info.read_requests_rate per_disk_w_bytes_rate[disk.device] = info.write_bytes_rate per_disk_w_requests_rate[disk.device] = ( info.write_requests_rate) per_disk_rate = { 'read_bytes_rate': per_disk_r_bytes_rate, 'read_requests_rate': per_disk_r_requests_rate, 'write_bytes_rate': per_disk_w_bytes_rate, 'write_requests_rate': per_disk_w_requests_rate, } i_cache[instance.id] = DiskRateData( r_bytes_rate, r_requests_rate, w_bytes_rate, w_requests_rate, per_disk_rate ) return i_cache[instance.id] @abc.abstractmethod def _get_samples(self, instance, disk_rates_info): """Return one or more Sample.""" def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: try: disk_rates_info = self._populate_cache( self.inspector, cache, instance, ) for disk_rate in self._get_samples(instance, disk_rates_info): yield disk_rate except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: instance_name = util.instance_name(instance) LOG.exception(_('Ignoring instance %(name)s: %(error)s'), {'name': instance_name, 'error': err}) def _get_samples_per_device(self, disk_rates_info, _attr, instance, _name, _unit): """Return one or more Samples for meter 'disk.device.*'.""" samples = [] for disk, value in six.iteritems(disk_rates_info.per_disk_rate[ _attr]): samples.append(util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_GAUGE, unit=_unit, volume=value, resource_id="%s-%s" % (instance.id, disk), additional_metadata={'disk_name': disk}, )) return samples def _get_sample_read_and_write(self, instance, _name, _unit, _element, _attr1, _attr2): """Read / write Pollster and return one Sample""" return [util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_GAUGE, unit=_unit, volume=getattr(_element, _attr1), additional_metadata={ 'device': getattr(_element, _attr2)[_attr1].keys()}, )] class ReadBytesRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_sample_read_and_write( instance, 'disk.read.bytes.rate', 'B/s', disk_rates_info, 'read_bytes_rate', 'per_disk_rate') class PerDeviceReadBytesRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_samples_per_device( disk_rates_info, 'read_bytes_rate', instance, 'disk.device.read.bytes.rate', 'B/s') class ReadRequestsRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_sample_read_and_write( instance, 'disk.read.requests.rate', 'requests/s', disk_rates_info, 'read_requests_rate', 'per_disk_rate') class PerDeviceReadRequestsRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_samples_per_device( disk_rates_info, 'read_requests_rate', instance, 'disk.device.read.requests.rate', 'requests/s') class WriteBytesRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_sample_read_and_write( instance, 'disk.write.bytes.rate', 'B/s', disk_rates_info, 'write_bytes_rate', 'per_disk_rate') class PerDeviceWriteBytesRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_samples_per_device( disk_rates_info, 'write_bytes_rate', instance, 'disk.device.write.bytes.rate', 'B/s') class WriteRequestsRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_sample_read_and_write( instance, 'disk.write.requests.rate', 'requests/s', disk_rates_info, 'write_requests_rate', 'per_disk_rate') class PerDeviceWriteRequestsRatePollster(_DiskRatesPollsterBase): def _get_samples(self, instance, disk_rates_info): return self._get_samples_per_device( disk_rates_info, 'write_requests_rate', instance, 'disk.device.write.requests.rate', 'requests/s') @six.add_metaclass(abc.ABCMeta) class _DiskLatencyPollsterBase(pollsters.BaseComputePollster): CACHE_KEY_DISK_LATENCY = 'disk-latency' def _populate_cache(self, inspector, cache, instance): return self._populate_cache_create( cache.setdefault(self.CACHE_KEY_DISK_LATENCY, {}), instance, inspector, DiskLatencyData, 'inspect_disk_latency', 'disk_latency') @abc.abstractmethod def _get_samples(self, instance, disk_rates_info): """Return one or more Sample.""" def get_samples(self, manager, cache, resources): for instance in resources: try: disk_latency_info = self._populate_cache( self.inspector, cache, instance, ) for disk_latency in self._get_samples(instance, disk_latency_info): yield disk_latency except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: instance_name = util.instance_name(instance) LOG.exception(_('Ignoring instance %(name)s: %(error)s'), {'name': instance_name, 'error': err}) class DiskLatencyPollster(_DiskLatencyPollsterBase): def _get_samples(self, instance, disk_latency_info): return [util.make_sample_from_instance( instance, name='disk.latency', type=sample.TYPE_GAUGE, unit='ms', volume=disk_latency_info.disk_latency / 1000 )] class PerDeviceDiskLatencyPollster(_DiskLatencyPollsterBase): def _get_samples(self, instance, disk_latency_info): samples = [] for disk, value in six.iteritems(disk_latency_info.per_disk_latency[ 'disk_latency']): samples.append(util.make_sample_from_instance( instance, name='disk.device.latency', type=sample.TYPE_GAUGE, unit='ms', volume=value / 1000, resource_id="%s-%s" % (instance.id, disk), additional_metadata={'disk_name': disk}, )) return samples class _DiskIOPSPollsterBase(pollsters.BaseComputePollster): CACHE_KEY_DISK_IOPS = 'disk-iops' def _populate_cache(self, inspector, cache, instance): return self._populate_cache_create( cache.setdefault(self.CACHE_KEY_DISK_IOPS, {}), instance, inspector, DiskIOPSData, 'inspect_disk_iops', 'iops_count') @abc.abstractmethod def _get_samples(self, instance, disk_rates_info): """Return one or more Sample.""" def get_samples(self, manager, cache, resources): for instance in resources: try: disk_iops_info = self._populate_cache( self.inspector, cache, instance, ) for disk_iops in self._get_samples(instance, disk_iops_info): yield disk_iops except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' '%(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: instance_name = util.instance_name(instance) LOG.exception(_('Ignoring instance %(name)s: %(error)s'), {'name': instance_name, 'error': err}) class DiskIOPSPollster(_DiskIOPSPollsterBase): def _get_samples(self, instance, disk_iops_info): return [util.make_sample_from_instance( instance, name='disk.iops', type=sample.TYPE_GAUGE, unit='count/s', volume=disk_iops_info.iops_count )] class PerDeviceDiskIOPSPollster(_DiskIOPSPollsterBase): def _get_samples(self, instance, disk_iops_info): samples = [] for disk, value in six.iteritems(disk_iops_info.per_disk_iops[ 'iops_count']): samples.append(util.make_sample_from_instance( instance, name='disk.device.iops', type=sample.TYPE_GAUGE, unit='count/s', volume=value, resource_id="%s-%s" % (instance.id, disk), additional_metadata={'disk_name': disk}, )) return samples @six.add_metaclass(abc.ABCMeta) class _DiskInfoPollsterBase(pollsters.BaseComputePollster): CACHE_KEY_DISK_INFO = 'diskinfo' def _populate_cache(self, inspector, cache, instance): i_cache = cache.setdefault(self.CACHE_KEY_DISK_INFO, {}) if instance.id not in i_cache: all_capacity = 0 all_allocation = 0 all_physical = 0 per_disk_capacity = {} per_disk_allocation = {} per_disk_physical = {} disk_info = inspector.inspect_disk_info( instance) for disk, info in disk_info: all_capacity += info.capacity all_allocation += info.allocation all_physical += info.physical per_disk_capacity[disk.device] = info.capacity per_disk_allocation[disk.device] = info.allocation per_disk_physical[disk.device] = info.physical per_disk_info = { 'capacity': per_disk_capacity, 'allocation': per_disk_allocation, 'physical': per_disk_physical, } i_cache[instance.id] = DiskInfoData( all_capacity, all_allocation, all_physical, per_disk_info ) return i_cache[instance.id] @abc.abstractmethod def _get_samples(self, instance, disk_info): """Return one or more Sample.""" def _get_samples_per_device(self, disk_info, _attr, instance, _name): """Return one or more Samples for meter 'disk.device.*'.""" samples = [] for disk, value in six.iteritems(disk_info.per_disk_info[_attr]): samples.append(util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_GAUGE, unit='B', volume=value, resource_id="%s-%s" % (instance.id, disk), additional_metadata={'disk_name': disk}, )) return samples def _get_samples_task(self, instance, _name, disk_info, _attr1, _attr2): """Return one or more Samples for meter 'disk.task.*'.""" return [util.make_sample_from_instance( instance, name=_name, type=sample.TYPE_GAUGE, unit='B', volume=getattr(disk_info, _attr1), additional_metadata={ 'device': disk_info.per_disk_info[_attr2].keys()}, )] def get_samples(self, manager, cache, resources): for instance in resources: try: disk_size_info = self._populate_cache( self.inspector, cache, instance, ) for disk_info in self._get_samples(instance, disk_size_info): yield disk_info except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: instance_name = util.instance_name(instance) LOG.exception(_('Ignoring instance %(name)s ' '(%(instance_id)s) : %(error)s') % ( {'name': instance_name, 'instance_id': instance.id, 'error': err})) class CapacityPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_task( instance, 'disk.capacity', disk_info, 'capacity', 'capacity') class PerDeviceCapacityPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_per_device( disk_info, 'capacity', instance, 'disk.device.capacity') class AllocationPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_task( instance, 'disk.allocation', disk_info, 'allocation', 'allocation') class PerDeviceAllocationPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_per_device( disk_info, 'allocation', instance, 'disk.device.allocation') class PhysicalPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_task( instance, 'disk.usage', disk_info, 'physical', 'physical') class PerDevicePhysicalPollster(_DiskInfoPollsterBase): def _get_samples(self, instance, disk_info): return self._get_samples_per_device( disk_info, 'physical', instance, 'disk.device.usage') ceilometer-6.0.0/ceilometer/compute/pollsters/net.py0000664000567000056710000001546012701406223024004 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log import ceilometer from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ from ceilometer import sample LOG = log.getLogger(__name__) class _Base(pollsters.BaseComputePollster): NET_USAGE_MESSAGE = ' '.join(["NETWORK USAGE:", "%s %s:", "read-bytes=%d", "write-bytes=%d"]) @staticmethod def make_vnic_sample(instance, name, type, unit, volume, vnic_data): metadata = copy.copy(vnic_data) additional_metadata = dict(zip(metadata._fields, metadata)) if vnic_data.fref is not None: rid = vnic_data.fref additional_metadata['vnic_name'] = vnic_data.fref else: instance_name = util.instance_name(instance) rid = "%s-%s-%s" % (instance_name, instance.id, vnic_data.name) additional_metadata['vnic_name'] = vnic_data.name return util.make_sample_from_instance( instance=instance, name=name, type=type, unit=unit, volume=volume, resource_id=rid, additional_metadata=additional_metadata ) CACHE_KEY_VNIC = 'vnics' def _get_vnic_info(self, inspector, instance): return inspector.inspect_vnics(instance) @staticmethod def _get_rx_info(info): return info.rx_bytes @staticmethod def _get_tx_info(info): return info.tx_bytes def _get_vnics_for_instance(self, cache, inspector, instance): i_cache = cache.setdefault(self.CACHE_KEY_VNIC, {}) if instance.id not in i_cache: i_cache[instance.id] = list( self._get_vnic_info(inspector, instance) ) return i_cache[instance.id] def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: instance_name = util.instance_name(instance) LOG.debug('checking net info for instance %s', instance.id) try: vnics = self._get_vnics_for_instance( cache, self.inspector, instance, ) for vnic, info in vnics: LOG.debug(self.NET_USAGE_MESSAGE, instance_name, vnic.name, self._get_rx_info(info), self._get_tx_info(info)) yield self._get_sample(instance, vnic, info) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' ' %(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) except Exception as err: LOG.exception(_('Ignoring instance %(name)s: %(error)s'), {'name': instance_name, 'error': err}) class _RateBase(_Base): NET_USAGE_MESSAGE = ' '.join(["NETWORK RATE:", "%s %s:", "read-bytes-rate=%d", "write-bytes-rate=%d"]) CACHE_KEY_VNIC = 'vnic-rates' def _get_vnic_info(self, inspector, instance): return inspector.inspect_vnic_rates(instance, self._inspection_duration) @staticmethod def _get_rx_info(info): return info.rx_bytes_rate @staticmethod def _get_tx_info(info): return info.tx_bytes_rate class IncomingBytesPollster(_Base): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.incoming.bytes', type=sample.TYPE_CUMULATIVE, unit='B', volume=info.rx_bytes, vnic_data=vnic, ) class IncomingPacketsPollster(_Base): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.incoming.packets', type=sample.TYPE_CUMULATIVE, unit='packet', volume=info.rx_packets, vnic_data=vnic, ) class OutgoingBytesPollster(_Base): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.outgoing.bytes', type=sample.TYPE_CUMULATIVE, unit='B', volume=info.tx_bytes, vnic_data=vnic, ) class OutgoingPacketsPollster(_Base): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.outgoing.packets', type=sample.TYPE_CUMULATIVE, unit='packet', volume=info.tx_packets, vnic_data=vnic, ) class IncomingBytesRatePollster(_RateBase): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.incoming.bytes.rate', type=sample.TYPE_GAUGE, unit='B/s', volume=info.rx_bytes_rate, vnic_data=vnic, ) class OutgoingBytesRatePollster(_RateBase): def _get_sample(self, instance, vnic, info): return self.make_vnic_sample( instance, name='network.outgoing.bytes.rate', type=sample.TYPE_GAUGE, unit='B/s', volume=info.tx_bytes_rate, vnic_data=vnic, ) ceilometer-6.0.0/ceilometer/compute/pollsters/__init__.py0000664000567000056710000000513412701406223024752 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_utils import timeutils import six from ceilometer.agent import plugin_base from ceilometer.compute.virt import inspector as virt_inspector @six.add_metaclass(abc.ABCMeta) class BaseComputePollster(plugin_base.PollsterBase): def setup_environment(self): super(BaseComputePollster, self).setup_environment() # propagate exception from check_sanity self.inspector.check_sanity() @property def inspector(self): try: inspector = self._inspector except AttributeError: inspector = virt_inspector.get_hypervisor_inspector() BaseComputePollster._inspector = inspector return inspector @property def default_discovery(self): return 'local_instances' @staticmethod def _populate_cache_create(_i_cache, _instance, _inspector, _DiskData, _inspector_attr, _stats_attr): """Settings and return cache.""" if _instance.id not in _i_cache: _data = 0 _per_device_data = {} disk_rates = getattr(_inspector, _inspector_attr)(_instance) for disk, stats in disk_rates: _data += getattr(stats, _stats_attr) _per_device_data[disk.device] = ( getattr(stats, _stats_attr)) _per_disk_data = { _stats_attr: _per_device_data } _i_cache[_instance.id] = _DiskData( _data, _per_disk_data ) return _i_cache[_instance.id] def _record_poll_time(self): """Method records current time as the poll time. :return: time in seconds since the last poll time was recorded """ current_time = timeutils.utcnow() duration = None if hasattr(self, '_last_poll_time'): duration = timeutils.delta_seconds(self._last_poll_time, current_time) self._last_poll_time = current_time return duration ceilometer-6.0.0/ceilometer/compute/pollsters/memory.py0000664000567000056710000001257412701406223024531 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import ceilometer from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _, _LE, _LW from ceilometer import sample LOG = log.getLogger(__name__) class MemoryUsagePollster(pollsters.BaseComputePollster): def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: LOG.debug('Checking memory usage for instance %s', instance.id) try: memory_info = self.inspector.inspect_memory_usage( instance, self._inspection_duration) LOG.debug("MEMORY USAGE: %(instance)s %(usage)f", {'instance': instance, 'usage': memory_info.usage}) yield util.make_sample_from_instance( instance, name='memory.usage', type=sample.TYPE_GAUGE, unit='MB', volume=memory_info.usage, ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except virt_inspector.NoDataException as e: LOG.warning(_LW('Cannot inspect data of %(pollster)s for ' '%(instance_id)s, non-fatal reason: %(exc)s'), {'pollster': self.__class__.__name__, 'instance_id': instance.id, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('Obtaining Memory Usage is not implemented for %s', self.inspector.__class__.__name__) except Exception as err: LOG.exception(_('Could not get Memory Usage for ' '%(id)s: %(e)s'), {'id': instance.id, 'e': err}) class MemoryResidentPollster(pollsters.BaseComputePollster): def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: LOG.debug('Checking resident memory for instance %s', instance.id) try: memory_info = self.inspector.inspect_memory_resident( instance, self._inspection_duration) LOG.debug("RESIDENT MEMORY: %(instance)s %(resident)f", {'instance': instance, 'resident': memory_info.resident}) yield util.make_sample_from_instance( instance, name='memory.resident', type=sample.TYPE_GAUGE, unit='MB', volume=memory_info.resident, ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except virt_inspector.NoDataException as e: LOG.warning(_LW('Cannot inspect data of %(pollster)s for ' '%(instance_id)s, non-fatal reason: %(exc)s'), {'pollster': self.__class__.__name__, 'instance_id': instance.id, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('Obtaining Resident Memory is not implemented' ' for %s', self.inspector.__class__.__name__) except Exception as err: LOG.exception(_LE('Could not get Resident Memory Usage for ' '%(id)s: %(e)s'), {'id': instance.id, 'e': err}) ceilometer-6.0.0/ceilometer/compute/pollsters/instance.py0000664000567000056710000000215112701406223025013 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer import sample class InstancePollster(pollsters.BaseComputePollster): @staticmethod def get_samples(manager, cache, resources): for instance in resources: yield util.make_sample_from_instance( instance, name='instance', type=sample.TYPE_GAUGE, unit='instance', volume=1, ) ceilometer-6.0.0/ceilometer/compute/pollsters/util.py0000664000567000056710000000655312701406223024176 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from ceilometer.compute import util as compute_util from ceilometer import sample INSTANCE_PROPERTIES = [ # Identity properties 'reservation_id', # Type properties 'architecture', 'OS-EXT-AZ:availability_zone', 'kernel_id', 'os_type', 'ramdisk_id', ] def _get_metadata_from_object(instance): """Return a metadata dictionary for the instance.""" instance_type = instance.flavor['name'] if instance.flavor else None metadata = { 'display_name': instance.name, 'name': getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', u''), 'instance_id': instance.id, 'instance_type': instance_type, 'host': instance.hostId, 'instance_host': getattr(instance, 'OS-EXT-SRV-ATTR:host', u''), 'flavor': instance.flavor, 'status': instance.status.lower(), 'state': getattr(instance, 'OS-EXT-STS:vm_state', u''), } # Image properties if instance.image: metadata['image'] = instance.image metadata['image_ref'] = instance.image['id'] # Images that come through the conductor API in the nova notifier # plugin will not have links. if instance.image.get('links'): metadata['image_ref_url'] = instance.image['links'][0]['href'] else: metadata['image_ref_url'] = None else: metadata['image'] = None metadata['image_ref'] = None metadata['image_ref_url'] = None for name in INSTANCE_PROPERTIES: if hasattr(instance, name): metadata[name] = getattr(instance, name) metadata['vcpus'] = instance.flavor['vcpus'] metadata['memory_mb'] = instance.flavor['ram'] metadata['disk_gb'] = instance.flavor['disk'] metadata['ephemeral_gb'] = instance.flavor['ephemeral'] metadata['root_gb'] = (int(metadata['disk_gb']) - int(metadata['ephemeral_gb'])) return compute_util.add_reserved_user_metadata(instance.metadata, metadata) def make_sample_from_instance(instance, name, type, unit, volume, resource_id=None, additional_metadata=None): additional_metadata = additional_metadata or {} resource_metadata = _get_metadata_from_object(instance) resource_metadata.update(additional_metadata) return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=instance.user_id, project_id=instance.tenant_id, resource_id=resource_id or instance.id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=resource_metadata, ) def instance_name(instance): """Shortcut to get instance name.""" return getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', None) ceilometer-6.0.0/ceilometer/compute/pollsters/cpu.py0000664000567000056710000001012412701406223023775 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import ceilometer from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ from ceilometer import sample LOG = log.getLogger(__name__) class CPUPollster(pollsters.BaseComputePollster): def get_samples(self, manager, cache, resources): for instance in resources: LOG.debug('checking instance %s', instance.id) try: cpu_info = self.inspector.inspect_cpus(instance) LOG.debug("CPUTIME USAGE: %(instance)s %(time)d", {'instance': instance, 'time': cpu_info.time}) cpu_num = {'cpu_number': cpu_info.number} yield util.make_sample_from_instance( instance, name='cpu', type=sample.TYPE_CUMULATIVE, unit='ns', volume=cpu_info.time, additional_metadata=cpu_num, ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting samples of %(pollster)s: %(exc)s', {'instance_id': instance.id, 'pollster': self.__class__.__name__, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('Obtaining CPU time is not implemented for %s', self.inspector.__class__.__name__) except Exception as err: LOG.exception(_('could not get CPU time for %(id)s: %(e)s'), {'id': instance.id, 'e': err}) class CPUUtilPollster(pollsters.BaseComputePollster): def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: LOG.debug('Checking CPU util for instance %s', instance.id) try: cpu_info = self.inspector.inspect_cpu_util( instance, self._inspection_duration) LOG.debug("CPU UTIL: %(instance)s %(util)d", {'instance': instance, 'util': cpu_info.util}) yield util.make_sample_from_instance( instance, name='cpu_util', type=sample.TYPE_GAUGE, unit='%', volume=cpu_info.util, ) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('Obtaining CPU Util is not implemented for %s', self.inspector.__class__.__name__) except Exception as err: LOG.exception(_('Could not get CPU Util for %(id)s: %(e)s'), {'id': instance.id, 'e': err}) ceilometer-6.0.0/ceilometer/compute/__init__.py0000664000567000056710000000000012701406223022706 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/virt/0000775000567000056710000000000012701406364021601 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/virt/inspector.py0000664000567000056710000002310612701406223024155 0ustar jenkinsjenkins00000000000000# # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Inspector abstraction for read-only access to hypervisors.""" import collections from oslo_config import cfg from oslo_log import log from stevedore import driver import ceilometer from ceilometer.i18n import _ OPTS = [ cfg.StrOpt('hypervisor_inspector', default='libvirt', help='Inspector to use for inspecting the hypervisor layer. ' 'Known inspectors are libvirt, hyperv, vmware, xenapi ' 'and powervm.'), ] cfg.CONF.register_opts(OPTS) LOG = log.getLogger(__name__) # Named tuple representing instances. # # name: the name of the instance # uuid: the UUID associated with the instance # Instance = collections.namedtuple('Instance', ['name', 'UUID']) # Named tuple representing CPU statistics. # # number: number of CPUs # time: cumulative CPU time # CPUStats = collections.namedtuple('CPUStats', ['number', 'time']) # Named tuple representing CPU Utilization statistics. # # util: CPU utilization in percentage # CPUUtilStats = collections.namedtuple('CPUUtilStats', ['util']) # Named tuple representing Memory usage statistics. # # usage: Amount of memory used # MemoryUsageStats = collections.namedtuple('MemoryUsageStats', ['usage']) # Named tuple representing Resident Memory usage statistics. # # resident: Amount of resident memory # MemoryResidentStats = collections.namedtuple('MemoryResidentStats', ['resident']) # Named tuple representing vNICs. # # name: the name of the vNIC # mac: the MAC address # fref: the filter ref # parameters: miscellaneous parameters # Interface = collections.namedtuple('Interface', ['name', 'mac', 'fref', 'parameters']) # Named tuple representing vNIC statistics. # # rx_bytes: number of received bytes # rx_packets: number of received packets # tx_bytes: number of transmitted bytes # tx_packets: number of transmitted packets # InterfaceStats = collections.namedtuple('InterfaceStats', ['rx_bytes', 'rx_packets', 'tx_bytes', 'tx_packets']) # Named tuple representing vNIC rate statistics. # # rx_bytes_rate: rate of received bytes # tx_bytes_rate: rate of transmitted bytes # InterfaceRateStats = collections.namedtuple('InterfaceRateStats', ['rx_bytes_rate', 'tx_bytes_rate']) # Named tuple representing disks. # # device: the device name for the disk # Disk = collections.namedtuple('Disk', ['device']) # Named tuple representing disk statistics. # # read_bytes: number of bytes read # read_requests: number of read operations # write_bytes: number of bytes written # write_requests: number of write operations # errors: number of errors # DiskStats = collections.namedtuple('DiskStats', ['read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors']) # Named tuple representing disk rate statistics. # # read_bytes_rate: number of bytes read per second # read_requests_rate: number of read operations per second # write_bytes_rate: number of bytes written per second # write_requests_rate: number of write operations per second # DiskRateStats = collections.namedtuple('DiskRateStats', ['read_bytes_rate', 'read_requests_rate', 'write_bytes_rate', 'write_requests_rate']) # Named tuple representing disk latency statistics. # # disk_latency: average disk latency # DiskLatencyStats = collections.namedtuple('DiskLatencyStats', ['disk_latency']) # Named tuple representing disk iops statistics. # # iops: number of iops per second # DiskIOPSStats = collections.namedtuple('DiskIOPSStats', ['iops_count']) # Named tuple representing disk Information. # # capacity: capacity of the disk # allocation: allocation of the disk # physical: usage of the disk DiskInfo = collections.namedtuple('DiskInfo', ['capacity', 'allocation', 'physical']) # Exception types # class InspectorException(Exception): def __init__(self, message=None): super(InspectorException, self).__init__(message) class InstanceNotFoundException(InspectorException): pass class InstanceShutOffException(InspectorException): pass class NoDataException(InspectorException): pass class NoSanityException(InspectorException): pass # Main virt inspector abstraction layering over the hypervisor API. # class Inspector(object): def check_sanity(self): """Check the sanity of hypervisor inspector. Each subclass could overwrite it to throw any exception when detecting mis-configured inspector """ pass def inspect_cpus(self, instance): """Inspect the CPU statistics for an instance. :param instance: the target instance :return: the number of CPUs and cumulative CPU time """ raise ceilometer.NotImplementedError def inspect_cpu_util(self, instance, duration=None): """Inspect the CPU Utilization (%) for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: the percentage of CPU utilization """ raise ceilometer.NotImplementedError def inspect_vnics(self, instance): """Inspect the vNIC statistics for an instance. :param instance: the target instance :return: for each vNIC, the number of bytes & packets received and transmitted """ raise ceilometer.NotImplementedError def inspect_vnic_rates(self, instance, duration=None): """Inspect the vNIC rate statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each vNIC, the rate of bytes & packets received and transmitted """ raise ceilometer.NotImplementedError def inspect_disks(self, instance): """Inspect the disk statistics for an instance. :param instance: the target instance :return: for each disk, the number of bytes & operations read and written, and the error count """ raise ceilometer.NotImplementedError def inspect_memory_usage(self, instance, duration=None): """Inspect the memory usage statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: the amount of memory used """ raise ceilometer.NotImplementedError def inspect_memory_resident(self, instance, duration=None): """Inspect the resident memory statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: the amount of resident memory """ raise ceilometer.NotImplementedError def inspect_disk_rates(self, instance, duration=None): """Inspect the disk statistics as rates for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each disk, the number of bytes & operations read and written per second, with the error count """ raise ceilometer.NotImplementedError def inspect_disk_latency(self, instance): """Inspect the disk statistics as rates for an instance. :param instance: the target instance :return: for each disk, the average disk latency """ raise ceilometer.NotImplementedError def inspect_disk_iops(self, instance): """Inspect the disk statistics as rates for an instance. :param instance: the target instance :return: for each disk, the number of iops per second """ raise ceilometer.NotImplementedError def inspect_disk_info(self, instance): """Inspect the disk information for an instance. :param instance: the target instance :return: for each disk , capacity , alloaction and usage """ raise ceilometer.NotImplementedError def get_hypervisor_inspector(): try: namespace = 'ceilometer.compute.virt' mgr = driver.DriverManager(namespace, cfg.CONF.hypervisor_inspector, invoke_on_load=True) return mgr.driver except ImportError as e: LOG.error(_("Unable to load the hypervisor inspector: %s") % e) return Inspector() ceilometer-6.0.0/ceilometer/compute/virt/__init__.py0000664000567000056710000000000012701406223023672 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/virt/libvirt/0000775000567000056710000000000012701406364023254 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/virt/libvirt/inspector.py0000664000567000056710000002215312701406223025631 0ustar jenkinsjenkins00000000000000# # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for libvirt.""" from lxml import etree from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import six from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ libvirt = None LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('libvirt_type', default='kvm', choices=['kvm', 'lxc', 'qemu', 'uml', 'xen'], help='Libvirt domain type.'), cfg.StrOpt('libvirt_uri', default='', help='Override the default libvirt URI ' '(which is dependent on libvirt_type).'), ] CONF = cfg.CONF CONF.register_opts(OPTS) def retry_on_disconnect(function): def decorator(self, *args, **kwargs): try: return function(self, *args, **kwargs) except libvirt.libvirtError as e: if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_ERR_INTERNAL_ERROR) and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)): LOG.debug('Connection to libvirt broken') self.connection = None return function(self, *args, **kwargs) else: raise return decorator class LibvirtInspector(virt_inspector.Inspector): per_type_uris = dict(uml='uml:///system', xen='xen:///', lxc='lxc:///') def __init__(self): self.uri = self._get_uri() self.connection = None def _get_uri(self): return CONF.libvirt_uri or self.per_type_uris.get(CONF.libvirt_type, 'qemu:///system') def _get_connection(self): if not self.connection: global libvirt if libvirt is None: libvirt = __import__('libvirt') LOG.debug('Connecting to libvirt: %s', self.uri) self.connection = libvirt.openReadOnly(self.uri) return self.connection def check_sanity(self): if not self._get_connection(): raise virt_inspector.NoSanityException() @retry_on_disconnect def _lookup_by_uuid(self, instance): instance_name = util.instance_name(instance) try: return self._get_connection().lookupByUUIDString(instance.id) except Exception as ex: if not libvirt or not isinstance(ex, libvirt.libvirtError): raise virt_inspector.InspectorException(six.text_type(ex)) error_code = ex.get_error_code() if (error_code in (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_ERR_INTERNAL_ERROR) and ex.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)): raise msg = _("Error from libvirt while looking up instance " ": " "[Error Code %(error_code)s] " "%(ex)s") % {'name': instance_name, 'id': instance.id, 'error_code': error_code, 'ex': ex} raise virt_inspector.InstanceNotFoundException(msg) def inspect_cpus(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) dom_info = domain.info() return virt_inspector.CPUStats(number=dom_info[3], time=dom_info[4]) def _get_domain_not_shut_off_or_raise(self, instance): instance_name = util.instance_name(instance) domain = self._lookup_by_uuid(instance) state = domain.info()[0] if state == libvirt.VIR_DOMAIN_SHUTOFF: msg = _('Failed to inspect data of instance ' ', ' 'domain state is SHUTOFF.') % { 'name': instance_name, 'id': instance.id} raise virt_inspector.InstanceShutOffException(msg) return domain def inspect_vnics(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) for iface in tree.findall('devices/interface'): target = iface.find('target') if target is not None: name = target.get('dev') else: continue mac = iface.find('mac') if mac is not None: mac_address = mac.get('address') else: continue fref = iface.find('filterref') if fref is not None: fref = fref.get('filter') params = dict((p.get('name').lower(), p.get('value')) for p in iface.findall('filterref/parameter')) interface = virt_inspector.Interface(name=name, mac=mac_address, fref=fref, parameters=params) dom_stats = domain.interfaceStats(name) stats = virt_inspector.InterfaceStats(rx_bytes=dom_stats[0], rx_packets=dom_stats[1], tx_bytes=dom_stats[4], tx_packets=dom_stats[5]) yield (interface, stats) def inspect_disks(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) for device in filter( bool, [target.get("dev") for target in tree.findall('devices/disk/target')]): disk = virt_inspector.Disk(device=device) block_stats = domain.blockStats(device) stats = virt_inspector.DiskStats(read_requests=block_stats[0], read_bytes=block_stats[1], write_requests=block_stats[2], write_bytes=block_stats[3], errors=block_stats[4]) yield (disk, stats) def inspect_memory_usage(self, instance, duration=None): instance_name = util.instance_name(instance) domain = self._get_domain_not_shut_off_or_raise(instance) try: memory_stats = domain.memoryStats() if (memory_stats and memory_stats.get('available') and memory_stats.get('unused')): memory_used = (memory_stats.get('available') - memory_stats.get('unused')) # Stat provided from libvirt is in KB, converting it to MB. memory_used = memory_used / units.Ki return virt_inspector.MemoryUsageStats(usage=memory_used) else: msg = _('Failed to inspect memory usage of instance ' ', ' 'can not get info from libvirt.') % { 'name': instance_name, 'id': instance.id} raise virt_inspector.NoDataException(msg) # memoryStats might launch an exception if the method is not supported # by the underlying hypervisor being used by libvirt. except libvirt.libvirtError as e: msg = _('Failed to inspect memory usage of %(instance_uuid)s, ' 'can not get info from libvirt: %(error)s') % { 'instance_uuid': instance.id, 'error': e} raise virt_inspector.NoDataException(msg) def inspect_disk_info(self, instance): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) for device in filter( bool, [target.get("dev") for target in tree.findall('devices/disk/target')]): disk = virt_inspector.Disk(device=device) block_info = domain.blockInfo(device) info = virt_inspector.DiskInfo(capacity=block_info[0], allocation=block_info[1], physical=block_info[2]) yield (disk, info) def inspect_memory_resident(self, instance, duration=None): domain = self._get_domain_not_shut_off_or_raise(instance) memory = domain.memoryStats()['rss'] / units.Ki return virt_inspector.MemoryResidentStats(resident=memory) ceilometer-6.0.0/ceilometer/compute/virt/libvirt/__init__.py0000664000567000056710000000000012701406223025345 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/virt/vmware/0000775000567000056710000000000012701406364023102 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/virt/vmware/vsphere_operations.py0000664000567000056710000002371712701406223027377 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_vmware import vim_util PERF_MANAGER_TYPE = "PerformanceManager" PERF_COUNTER_PROPERTY = "perfCounter" VM_INSTANCE_ID_PROPERTY = 'config.extraConfig["nvp.vm-uuid"].value' # ESXi Servers sample performance data every 20 seconds. 20-second interval # data is called instance data or real-time data. To retrieve instance data, # we need to specify a value of 20 seconds for the "PerfQuerySpec.intervalId" # property. In that case the "QueryPerf" method operates as a raw data feed # that bypasses the vCenter database and instead retrieves performance data # from an ESXi host. # The following value is time interval for real-time performance stats # in seconds and it is not configurable. VC_REAL_TIME_SAMPLING_INTERVAL = 20 class VsphereOperations(object): """Class to invoke vSphere APIs calls. vSphere APIs calls are required by various pollsters, collecting data from VMware infrastructure. """ def __init__(self, api_session, max_objects): self._api_session = api_session self._max_objects = max_objects # Mapping between "VM's Nova instance Id" -> "VM's MOID" # In case a VM is deployed by Nova, then its name is instance ID. # So this map essentially has VM names as keys. self._vm_moid_lookup_map = {} # Mapping from full name -> ID, for VC Performance counters self._perf_counter_id_lookup_map = None def _init_vm_moid_lookup_map(self): session = self._api_session result = session.invoke_api(vim_util, "get_objects", session.vim, "VirtualMachine", self._max_objects, [VM_INSTANCE_ID_PROPERTY], False) while result: for vm_object in result.objects: vm_moid = vm_object.obj.value # propSet will be set only if the server provides value if hasattr(vm_object, 'propSet') and vm_object.propSet: vm_instance_id = vm_object.propSet[0].val if vm_instance_id: self._vm_moid_lookup_map[vm_instance_id] = vm_moid result = session.invoke_api(vim_util, "continue_retrieval", session.vim, result) def get_vm_moid(self, vm_instance_id): """Method returns VC MOID of the VM by its NOVA instance ID.""" if vm_instance_id not in self._vm_moid_lookup_map: self._init_vm_moid_lookup_map() return self._vm_moid_lookup_map.get(vm_instance_id, None) def _init_perf_counter_id_lookup_map(self): # Query details of all the performance counters from VC session = self._api_session client_factory = session.vim.client.factory perf_manager = session.vim.service_content.perfManager prop_spec = vim_util.build_property_spec( client_factory, PERF_MANAGER_TYPE, [PERF_COUNTER_PROPERTY]) obj_spec = vim_util.build_object_spec( client_factory, perf_manager, None) filter_spec = vim_util.build_property_filter_spec( client_factory, [prop_spec], [obj_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = 1 prop_collector = session.vim.service_content.propertyCollector result = session.invoke_api(session.vim, "RetrievePropertiesEx", prop_collector, specSet=[filter_spec], options=options) perf_counter_infos = result.objects[0].propSet[0].val.PerfCounterInfo # Extract the counter Id for each counter and populate the map self._perf_counter_id_lookup_map = {} for perf_counter_info in perf_counter_infos: counter_group = perf_counter_info.groupInfo.key counter_name = perf_counter_info.nameInfo.key counter_rollup_type = perf_counter_info.rollupType counter_id = perf_counter_info.key counter_full_name = (counter_group + ":" + counter_name + ":" + counter_rollup_type) self._perf_counter_id_lookup_map[counter_full_name] = counter_id def get_perf_counter_id(self, counter_full_name): """Method returns the ID of VC performance counter by its full name. A VC performance counter is uniquely identified by the tuple {'Group Name', 'Counter Name', 'Rollup Type'}. It will have an id - counter ID (changes from one VC to another), which is required to query performance stats from that VC. This method returns the ID for a counter, assuming 'CounterFullName' => 'Group Name:CounterName:RollupType'. """ if not self._perf_counter_id_lookup_map: self._init_perf_counter_id_lookup_map() return self._perf_counter_id_lookup_map[counter_full_name] # TODO(akhils@vmware.com) Move this method to common library # when it gets checked-in def query_vm_property(self, vm_moid, property_name): """Method returns the value of specified property for a VM. :param vm_moid: moid of the VM whose property is to be queried :param property_name: path of the property """ vm_mobj = vim_util.get_moref(vm_moid, "VirtualMachine") session = self._api_session return session.invoke_api(vim_util, "get_object_property", session.vim, vm_mobj, property_name) def query_vm_aggregate_stats(self, vm_moid, counter_id, duration): """Method queries the aggregated real-time stat value for a VM. This method should be used for aggregate counters. :param vm_moid: moid of the VM :param counter_id: id of the perf counter in VC :param duration: in seconds from current time, over which the stat value was applicable :return: the aggregated stats value for the counter """ # For aggregate counters, device_name should be "" stats = self._query_vm_perf_stats(vm_moid, counter_id, "", duration) # Performance manager provides the aggregated stats value # with device name -> None return stats.get(None, 0) def query_vm_device_stats(self, vm_moid, counter_id, duration): """Method queries the real-time stat values for a VM, for all devices. This method should be used for device(non-aggregate) counters. :param vm_moid: moid of the VM :param counter_id: id of the perf counter in VC :param duration: in seconds from current time, over which the stat value was applicable :return: a map containing the stat values keyed by the device ID/name """ # For device counters, device_name should be "*" to get stat values # for all devices. stats = self._query_vm_perf_stats(vm_moid, counter_id, "*", duration) # For some device counters, in addition to the per device value # the Performance manager also returns the aggregated value. # Just to be consistent, deleting the aggregated value if present. stats.pop(None, None) return stats def _query_vm_perf_stats(self, vm_moid, counter_id, device_name, duration): """Method queries the real-time stat values for a VM. :param vm_moid: moid of the VM for which stats are needed :param counter_id: id of the perf counter in VC :param device_name: name of the device for which stats are to be queried. For aggregate counters pass empty string (""). For device counters pass "*", if stats are required over all devices. :param duration: in seconds from current time, over which the stat value was applicable :return: a map containing the stat values keyed by the device ID/name """ session = self._api_session client_factory = session.vim.client.factory # Construct the QuerySpec metric_id = client_factory.create('ns0:PerfMetricId') metric_id.counterId = counter_id metric_id.instance = device_name query_spec = client_factory.create('ns0:PerfQuerySpec') query_spec.entity = vim_util.get_moref(vm_moid, "VirtualMachine") query_spec.metricId = [metric_id] query_spec.intervalId = VC_REAL_TIME_SAMPLING_INTERVAL # We query all samples which are applicable over the specified duration samples_cnt = (int(duration / VC_REAL_TIME_SAMPLING_INTERVAL) if duration and duration >= VC_REAL_TIME_SAMPLING_INTERVAL else 1) query_spec.maxSample = samples_cnt perf_manager = session.vim.service_content.perfManager perf_stats = session.invoke_api(session.vim, 'QueryPerf', perf_manager, querySpec=[query_spec]) stat_values = {} if perf_stats: entity_metric = perf_stats[0] sample_infos = entity_metric.sampleInfo if len(sample_infos) > 0: for metric_series in entity_metric.value: # Take the average of all samples to improve the accuracy # of the stat value stat_value = float(sum(metric_series.value)) / samples_cnt device_id = metric_series.id.instance stat_values[device_id] = stat_value return stat_values ceilometer-6.0.0/ceilometer/compute/virt/vmware/inspector.py0000664000567000056710000002002012701406223025446 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for VMware vSphere""" from oslo_config import cfg from oslo_utils import units from oslo_vmware import api import six from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.vmware import vsphere_operations from ceilometer.i18n import _ opt_group = cfg.OptGroup(name='vmware', title='Options for VMware') OPTS = [ cfg.StrOpt('host_ip', default='', help='IP address of the VMware vSphere host.'), cfg.PortOpt('host_port', default=443, help='Port of the VMware vSphere host.'), cfg.StrOpt('host_username', default='', help='Username of VMware vSphere.'), cfg.StrOpt('host_password', default='', help='Password of VMware vSphere.', secret=True), cfg.StrOpt('ca_file', help='CA bundle file to use in verifying the vCenter server ' 'certificate.'), cfg.BoolOpt('insecure', default=False, help='If true, the vCenter server certificate is not ' 'verified. If false, then the default CA truststore is ' 'used for verification. This option is ignored if ' '"ca_file" is set.'), cfg.IntOpt('api_retry_count', default=10, help='Number of times a VMware vSphere API may be retried.'), cfg.FloatOpt('task_poll_interval', default=0.5, help='Sleep time in seconds for polling an ongoing async ' 'task.'), cfg.StrOpt('wsdl_location', help='Optional vim service WSDL location ' 'e.g http:///vimService.wsdl. ' 'Optional over-ride to default location for bug ' 'work-arounds.'), ] cfg.CONF.register_group(opt_group) cfg.CONF.register_opts(OPTS, group=opt_group) VC_AVERAGE_MEMORY_CONSUMED_CNTR = 'mem:consumed:average' VC_AVERAGE_CPU_CONSUMED_CNTR = 'cpu:usage:average' VC_NETWORK_RX_COUNTER = 'net:received:average' VC_NETWORK_TX_COUNTER = 'net:transmitted:average' VC_DISK_READ_RATE_CNTR = "disk:read:average" VC_DISK_READ_REQUESTS_RATE_CNTR = "disk:numberReadAveraged:average" VC_DISK_WRITE_RATE_CNTR = "disk:write:average" VC_DISK_WRITE_REQUESTS_RATE_CNTR = "disk:numberWriteAveraged:average" def get_api_session(): api_session = api.VMwareAPISession( cfg.CONF.vmware.host_ip, cfg.CONF.vmware.host_username, cfg.CONF.vmware.host_password, cfg.CONF.vmware.api_retry_count, cfg.CONF.vmware.task_poll_interval, wsdl_loc=cfg.CONF.vmware.wsdl_location, port=cfg.CONF.vmware.host_port, cacert=cfg.CONF.vmware.ca_file, insecure=cfg.CONF.vmware.insecure) return api_session class VsphereInspector(virt_inspector.Inspector): def __init__(self): super(VsphereInspector, self).__init__() self._ops = vsphere_operations.VsphereOperations( get_api_session(), 1000) def inspect_cpu_util(self, instance, duration=None): vm_moid = self._ops.get_vm_moid(instance.id) if vm_moid is None: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in VMware vSphere') % instance.id) cpu_util_counter_id = self._ops.get_perf_counter_id( VC_AVERAGE_CPU_CONSUMED_CNTR) cpu_util = self._ops.query_vm_aggregate_stats( vm_moid, cpu_util_counter_id, duration) # For this counter vSphere returns values scaled-up by 100, since the # corresponding API can't return decimals, but only longs. # For e.g. if the utilization is 12.34%, the value returned is 1234. # Hence, dividing by 100. cpu_util = cpu_util / 100 return virt_inspector.CPUUtilStats(util=cpu_util) def inspect_vnic_rates(self, instance, duration=None): vm_moid = self._ops.get_vm_moid(instance.id) if not vm_moid: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in VMware vSphere') % instance.id) vnic_stats = {} vnic_ids = set() for net_counter in (VC_NETWORK_RX_COUNTER, VC_NETWORK_TX_COUNTER): net_counter_id = self._ops.get_perf_counter_id(net_counter) vnic_id_to_stats_map = self._ops.query_vm_device_stats( vm_moid, net_counter_id, duration) vnic_stats[net_counter] = vnic_id_to_stats_map vnic_ids.update(six.iterkeys(vnic_id_to_stats_map)) # Stats provided from vSphere are in KB/s, converting it to B/s. for vnic_id in vnic_ids: rx_bytes_rate = (vnic_stats[VC_NETWORK_RX_COUNTER] .get(vnic_id, 0) * units.Ki) tx_bytes_rate = (vnic_stats[VC_NETWORK_TX_COUNTER] .get(vnic_id, 0) * units.Ki) stats = virt_inspector.InterfaceRateStats(rx_bytes_rate, tx_bytes_rate) interface = virt_inspector.Interface( name=vnic_id, mac=None, fref=None, parameters=None) yield (interface, stats) def inspect_memory_usage(self, instance, duration=None): vm_moid = self._ops.get_vm_moid(instance.id) if vm_moid is None: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in VMware vSphere') % instance.id) mem_counter_id = self._ops.get_perf_counter_id( VC_AVERAGE_MEMORY_CONSUMED_CNTR) memory = self._ops.query_vm_aggregate_stats( vm_moid, mem_counter_id, duration) # Stat provided from vSphere is in KB, converting it to MB. memory = memory / units.Ki return virt_inspector.MemoryUsageStats(usage=memory) def inspect_disk_rates(self, instance, duration=None): vm_moid = self._ops.get_vm_moid(instance.id) if not vm_moid: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in VMware vSphere') % instance.id) disk_stats = {} disk_ids = set() disk_counters = [ VC_DISK_READ_RATE_CNTR, VC_DISK_READ_REQUESTS_RATE_CNTR, VC_DISK_WRITE_RATE_CNTR, VC_DISK_WRITE_REQUESTS_RATE_CNTR ] for disk_counter in disk_counters: disk_counter_id = self._ops.get_perf_counter_id(disk_counter) disk_id_to_stat_map = self._ops.query_vm_device_stats( vm_moid, disk_counter_id, duration) disk_stats[disk_counter] = disk_id_to_stat_map disk_ids.update(six.iterkeys(disk_id_to_stat_map)) for disk_id in disk_ids: def stat_val(counter_name): return disk_stats[counter_name].get(disk_id, 0) disk = virt_inspector.Disk(device=disk_id) # Stats provided from vSphere are in KB/s, converting it to B/s. disk_rate_info = virt_inspector.DiskRateStats( read_bytes_rate=stat_val(VC_DISK_READ_RATE_CNTR) * units.Ki, read_requests_rate=stat_val(VC_DISK_READ_REQUESTS_RATE_CNTR), write_bytes_rate=stat_val(VC_DISK_WRITE_RATE_CNTR) * units.Ki, write_requests_rate=stat_val(VC_DISK_WRITE_REQUESTS_RATE_CNTR) ) yield(disk, disk_rate_info) ceilometer-6.0.0/ceilometer/compute/virt/vmware/__init__.py0000664000567000056710000000000012701406223025173 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/virt/xenapi/0000775000567000056710000000000012701406364023065 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/virt/xenapi/inspector.py0000664000567000056710000001736412701406223025452 0ustar jenkinsjenkins00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for XenAPI.""" from oslo_config import cfg from oslo_utils import units import six.moves.urllib.parse as urlparse try: import XenAPI as api except ImportError: api = None from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ opt_group = cfg.OptGroup(name='xenapi', title='Options for XenAPI') OPTS = [ cfg.StrOpt('connection_url', help='URL for connection to XenServer/Xen Cloud Platform.'), cfg.StrOpt('connection_username', default='root', help='Username for connection to XenServer/Xen Cloud ' 'Platform.'), cfg.StrOpt('connection_password', help='Password for connection to XenServer/Xen Cloud Platform.', secret=True), ] CONF = cfg.CONF CONF.register_group(opt_group) CONF.register_opts(OPTS, group=opt_group) class XenapiException(virt_inspector.InspectorException): pass def swap_xapi_host(url, host_addr): """Replace the XenServer address present in 'url' with 'host_addr'.""" temp_url = urlparse.urlparse(url) # The connection URL is served by XAPI and doesn't support having a # path for the connection url after the port. And username/password # will be pass separately. So the URL like "http://abc:abc@abc:433/abc" # should not appear for XAPI case. temp_netloc = temp_url.netloc.replace(temp_url.hostname, '%s' % host_addr) replaced = temp_url._replace(netloc=temp_netloc) return urlparse.urlunparse(replaced) def get_api_session(): if not api: raise ImportError(_('XenAPI not installed')) url = CONF.xenapi.connection_url username = CONF.xenapi.connection_username password = CONF.xenapi.connection_password if not url or password is None: raise XenapiException(_('Must specify connection_url, and ' 'connection_password to use')) try: session = (api.xapi_local() if url == 'unix://local' else api.Session(url)) session.login_with_password(username, password) except api.Failure as e: if e.details[0] == 'HOST_IS_SLAVE': master = e.details[1] url = swap_xapi_host(url, master) try: session = api.Session(url) session.login_with_password(username, password) except api.Failure as es: raise XenapiException(_('Could not connect slave host: %s ') % es.details[0]) else: msg = _("Could not connect to XenAPI: %s") % e.details[0] raise XenapiException(msg) return session class XenapiInspector(virt_inspector.Inspector): def __init__(self): super(XenapiInspector, self).__init__() self.session = get_api_session() def _get_host_ref(self): """Return the xenapi host on which nova-compute runs on.""" return self.session.xenapi.session.get_this_host(self.session.handle) def _call_xenapi(self, method, *args): return self.session.xenapi_request(method, args) def _lookup_by_name(self, instance_name): vm_refs = self._call_xenapi("VM.get_by_name_label", instance_name) n = len(vm_refs) if n == 0: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in XenServer') % instance_name) elif n > 1: raise XenapiException( _('Multiple VM %s found in XenServer') % instance_name) else: return vm_refs[0] def inspect_cpu_util(self, instance, duration=None): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref) metrics_rec = self._call_xenapi("VM_metrics.get_record", metrics_ref) vcpus_number = metrics_rec['VCPUs_number'] vcpus_utils = metrics_rec['VCPUs_utilisation'] if len(vcpus_utils) == 0: msg = _("Could not get VM %s CPU Utilization") % instance_name raise XenapiException(msg) utils = 0.0 for num in range(int(vcpus_number)): utils += vcpus_utils.get(str(num)) utils = utils / int(vcpus_number) * 100 return virt_inspector.CPUUtilStats(util=utils) def inspect_memory_usage(self, instance, duration=None): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref) metrics_rec = self._call_xenapi("VM_metrics.get_record", metrics_ref) # Stat provided from XenServer is in B, converting it to MB. memory = int(metrics_rec['memory_actual']) / units.Mi return virt_inspector.MemoryUsageStats(usage=memory) def inspect_vnic_rates(self, instance, duration=None): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) vif_refs = self._call_xenapi("VM.get_VIFs", vm_ref) if vif_refs: for vif_ref in vif_refs: vif_rec = self._call_xenapi("VIF.get_record", vif_ref) vif_metrics_ref = self._call_xenapi( "VIF.get_metrics", vif_ref) vif_metrics_rec = self._call_xenapi( "VIF_metrics.get_record", vif_metrics_ref) interface = virt_inspector.Interface( name=vif_rec['uuid'], mac=vif_rec['MAC'], fref=None, parameters=None) rx_rate = float(vif_metrics_rec['io_read_kbs']) * units.Ki tx_rate = float(vif_metrics_rec['io_write_kbs']) * units.Ki stats = virt_inspector.InterfaceRateStats(rx_rate, tx_rate) yield (interface, stats) def inspect_disk_rates(self, instance, duration=None): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) vbd_refs = self._call_xenapi("VM.get_VBDs", vm_ref) if vbd_refs: for vbd_ref in vbd_refs: vbd_rec = self._call_xenapi("VBD.get_record", vbd_ref) vbd_metrics_ref = self._call_xenapi("VBD.get_metrics", vbd_ref) vbd_metrics_rec = self._call_xenapi("VBD_metrics.get_record", vbd_metrics_ref) disk = virt_inspector.Disk(device=vbd_rec['device']) # Stats provided from XenServer are in KB/s, # converting it to B/s. read_rate = float(vbd_metrics_rec['io_read_kbs']) * units.Ki write_rate = float(vbd_metrics_rec['io_write_kbs']) * units.Ki disk_rate_info = virt_inspector.DiskRateStats( read_bytes_rate=read_rate, read_requests_rate=0, write_bytes_rate=write_rate, write_requests_rate=0) yield(disk, disk_rate_info) ceilometer-6.0.0/ceilometer/compute/virt/xenapi/__init__.py0000664000567000056710000000000012701406223025156 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/virt/hyperv/0000775000567000056710000000000012701406364023116 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/virt/hyperv/inspector.py0000664000567000056710000001363212701406223025475 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for Hyper-V""" import collections import functools import sys from os_win import exceptions as os_win_exc from os_win import utilsfactory from oslo_utils import units import six from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector def convert_exceptions(function, exception_map): expected_exceptions = tuple(exception_map.keys()) @functools.wraps(function) def wrapper(*args, **kwargs): try: return function(*args, **kwargs) except expected_exceptions as ex: # exception might be a subclass of an expected exception. for expected in expected_exceptions: if isinstance(ex, expected): raised_exception = exception_map[expected] break exc_info = sys.exc_info() # NOTE(claudiub): Python 3 raises the exception object given as # the second argument in six.reraise. # The original message will be maintained by passing the original # exception. exc = raised_exception(six.text_type(exc_info[1])) six.reraise(raised_exception, exc, exc_info[2]) return wrapper def decorate_all_methods(decorator, *args, **kwargs): def decorate(cls): for attr in cls.__dict__: class_member = getattr(cls, attr) if callable(class_member): setattr(cls, attr, decorator(class_member, *args, **kwargs)) return cls return decorate exception_conversion_map = collections.OrderedDict([ # NOTE(claudiub): order should be from the most specialized exception type # to the most generic exception type. # (expected_exception, converted_exception) (os_win_exc.NotFound, virt_inspector.InstanceNotFoundException), (os_win_exc.OSWinException, virt_inspector.InspectorException), ]) # NOTE(claudiub): the purpose of the decorator below is to prevent any # os_win exceptions (subclasses of OSWinException) to leak outside of the # HyperVInspector. @decorate_all_methods(convert_exceptions, exception_conversion_map) class HyperVInspector(virt_inspector.Inspector): def __init__(self): super(HyperVInspector, self).__init__() self._utils = utilsfactory.get_metricsutils() self._host_max_cpu_clock = self._compute_host_max_cpu_clock() def _compute_host_max_cpu_clock(self): hostutils = utilsfactory.get_hostutils() # host's number of CPUs and CPU clock speed will not change. cpu_info = hostutils.get_cpus_info() host_cpu_count = len(cpu_info) host_cpu_clock = cpu_info[0]['MaxClockSpeed'] return float(host_cpu_clock * host_cpu_count) def inspect_cpus(self, instance): instance_name = util.instance_name(instance) (cpu_clock_used, cpu_count, uptime) = self._utils.get_cpu_metrics(instance_name) cpu_percent_used = cpu_clock_used / self._host_max_cpu_clock # Nanoseconds cpu_time = (int(uptime * cpu_percent_used) * units.k) return virt_inspector.CPUStats(number=cpu_count, time=cpu_time) def inspect_memory_usage(self, instance, duration=None): instance_name = util.instance_name(instance) usage = self._utils.get_memory_metrics(instance_name) return virt_inspector.MemoryUsageStats(usage=usage) def inspect_vnics(self, instance): instance_name = util.instance_name(instance) for vnic_metrics in self._utils.get_vnic_metrics(instance_name): interface = virt_inspector.Interface( name=vnic_metrics["element_name"], mac=vnic_metrics["address"], fref=None, parameters=None) stats = virt_inspector.InterfaceStats( rx_bytes=vnic_metrics['rx_mb'] * units.Mi, rx_packets=0, tx_bytes=vnic_metrics['tx_mb'] * units.Mi, tx_packets=0) yield (interface, stats) def inspect_disks(self, instance): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_metrics(instance_name): disk = virt_inspector.Disk(device=disk_metrics['instance_id']) stats = virt_inspector.DiskStats( read_requests=0, # Return bytes read_bytes=disk_metrics['read_mb'] * units.Mi, write_requests=0, write_bytes=disk_metrics['write_mb'] * units.Mi, errors=0) yield (disk, stats) def inspect_disk_latency(self, instance): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_latency_metrics( instance_name): disk = virt_inspector.Disk(device=disk_metrics['instance_id']) stats = virt_inspector.DiskLatencyStats( disk_latency=disk_metrics['disk_latency']) yield (disk, stats) def inspect_disk_iops(self, instance): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_iops_count(instance_name): disk = virt_inspector.Disk(device=disk_metrics['instance_id']) stats = virt_inspector.DiskIOPSStats( iops_count=disk_metrics['iops_count']) yield (disk, stats) ceilometer-6.0.0/ceilometer/compute/virt/hyperv/__init__.py0000664000567000056710000000000012701406223025207 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/compute/util.py0000664000567000056710000000456512701406223022150 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import six # Below config is for collecting metadata which user defined in nova or else, # and then storing it to Sample for future use according to user's requirement. # Such as using it as OpenTSDB tags for metrics. OPTS = [ cfg.ListOpt('reserved_metadata_namespace', default=['metering.'], help='List of metadata prefixes reserved for metering use.'), cfg.IntOpt('reserved_metadata_length', default=256, help='Limit on length of reserved metadata values.'), cfg.ListOpt('reserved_metadata_keys', default=[], help='List of metadata keys reserved for metering use. And ' 'these keys are additional to the ones included in the ' 'namespace.'), ] cfg.CONF.register_opts(OPTS) def add_reserved_user_metadata(src_metadata, dest_metadata): limit = cfg.CONF.reserved_metadata_length user_metadata = {} for prefix in cfg.CONF.reserved_metadata_namespace: md = dict( (k[len(prefix):].replace('.', '_'), v[:limit] if isinstance(v, six.string_types) else v) for k, v in src_metadata.items() if (k.startswith(prefix) and k[len(prefix):].replace('.', '_') not in dest_metadata) ) user_metadata.update(md) for metadata_key in cfg.CONF.reserved_metadata_keys: md = dict( (k.replace('.', '_'), v[:limit] if isinstance(v, six.string_types) else v) for k, v in src_metadata.items() if (k == metadata_key and k.replace('.', '_') not in dest_metadata) ) user_metadata.update(md) if user_metadata: dest_metadata['user_metadata'] = user_metadata return dest_metadata ceilometer-6.0.0/ceilometer/compute/discovery.py0000664000567000056710000000662212701406223023176 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import timeutils from ceilometer.agent import plugin_base from ceilometer import nova_client OPTS = [ cfg.BoolOpt('workload_partitioning', default=False, help='Enable work-load partitioning, allowing multiple ' 'compute agents to be run simultaneously.'), cfg.IntOpt('resource_update_interval', default=0, min=0, help="New instances will be discovered periodically based" " on this option (in seconds). By default, " "the agent discovers instances according to pipeline " "polling interval. If option is greater than 0, " "the instance list to poll will be updated based " "on this option's interval. Measurements relating " "to the instances will match intervals " "defined in pipeline.") ] cfg.CONF.register_opts(OPTS, group='compute') class InstanceDiscovery(plugin_base.DiscoveryBase): def __init__(self): super(InstanceDiscovery, self).__init__() self.nova_cli = nova_client.Client() self.last_run = None self.instances = {} self.expiration_time = cfg.CONF.compute.resource_update_interval def discover(self, manager, param=None): """Discover resources to monitor.""" secs_from_last_update = 0 if self.last_run: secs_from_last_update = timeutils.delta_seconds( self.last_run, timeutils.utcnow(True)) instances = [] # NOTE(ityaptin) we update make a nova request only if # it's a first discovery or resources expired if not self.last_run or secs_from_last_update >= self.expiration_time: try: utc_now = timeutils.utcnow(True) since = self.last_run.isoformat() if self.last_run else None instances = self.nova_cli.instance_get_all_by_host( cfg.CONF.host, since) self.last_run = utc_now except Exception: # NOTE(zqfan): instance_get_all_by_host is wrapped and will log # exception when there is any error. It is no need to raise it # again and print one more time. return [] for instance in instances: if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted', 'error']: self.instances.pop(instance.id, None) else: self.instances[instance.id] = instance return self.instances.values() @property def group_id(self): if cfg.CONF.compute.workload_partitioning: return cfg.CONF.host else: return None ceilometer-6.0.0/ceilometer/objectstore/0000775000567000056710000000000012701406364021464 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/objectstore/__init__.py0000664000567000056710000000000012701406223023555 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/objectstore/rgw_client.py0000664000567000056710000000474312701406223024175 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import namedtuple from awsauth import S3Auth import requests import six.moves.urllib.parse as urlparse from ceilometer.i18n import _ class RGWAdminAPIFailed(Exception): pass class RGWAdminClient(object): Bucket = namedtuple('Bucket', 'name, num_objects, size') def __init__(self, endpoint, access_key, secret_key): self.access_key = access_key self.secret = secret_key self.endpoint = endpoint self.hostname = urlparse.urlparse(endpoint).netloc def _make_request(self, path, req_params): uri = "{0}/{1}".format(self.endpoint, path) r = requests.get(uri, params=req_params, auth=S3Auth(self.access_key, self.secret, self.hostname) ) if r.status_code != 200: raise RGWAdminAPIFailed( _('RGW AdminOps API returned %(status)s %(reason)s') % {'status': r.status_code, 'reason': r.reason}) return r.json() def get_bucket(self, tenant_id): path = "bucket" req_params = {"uid": tenant_id, "stats": "true"} json_data = self._make_request(path, req_params) stats = {'num_buckets': 0, 'buckets': [], 'size': 0, 'num_objects': 0} stats['num_buckets'] = len(json_data) for it in json_data: for k, v in it["usage"].items(): stats['num_objects'] += v["num_objects"] stats['size'] += v["size_kb"] stats['buckets'].append(self.Bucket(it["bucket"], v["num_objects"], v["size_kb"])) return stats def get_usage(self, tenant_id): path = "usage" req_params = {"uid": tenant_id} json_data = self._make_request(path, req_params) usage_data = json_data["summary"] return sum((it["total"]["ops"] for it in usage_data)) ceilometer-6.0.0/ceilometer/objectstore/rgw.py0000664000567000056710000001750112701406223022633 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Reliance Jio Infocomm Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with ceph object stores """ from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six.moves.urllib.parse as urlparse from ceilometer.agent import plugin_base from ceilometer import keystone_client from ceilometer import sample LOG = log.getLogger(__name__) SERVICE_OPTS = [ cfg.StrOpt('radosgw', default='object-store', help='Radosgw service type.'), ] CREDENTIAL_OPTS = [ cfg.StrOpt('access_key', secret=True, help='Access key for Radosgw Admin.'), cfg.StrOpt('secret_key', secret=True, help='Secret key for Radosgw Admin.') ] cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') cfg.CONF.register_opts(CREDENTIAL_OPTS, group='rgw_admin_credentials') cfg.CONF.import_group('rgw_admin_credentials', 'ceilometer.service') class _Base(plugin_base.PollsterBase): METHOD = 'bucket' _ENDPOINT = None def __init__(self): self.access_key = cfg.CONF.rgw_admin_credentials.access_key self.secret = cfg.CONF.rgw_admin_credentials.secret_key @property def default_discovery(self): return 'tenant' @property def CACHE_KEY_METHOD(self): return 'rgw.get_%s' % self.METHOD @staticmethod def _get_endpoint(ksclient): # we store the endpoint as a base class attribute, so keystone is # only ever called once, also we assume that in a single deployment # we may be only deploying `radosgw` or `swift` as the object-store if _Base._ENDPOINT is None: try: conf = cfg.CONF.service_credentials rgw_url = keystone_client.get_service_catalog( ksclient).url_for( service_type=cfg.CONF.service_types.radosgw, interface=conf.interface) _Base._ENDPOINT = urlparse.urljoin(rgw_url, '/admin') except exceptions.EndpointNotFound: LOG.debug("Radosgw endpoint not found") return _Base._ENDPOINT def _iter_accounts(self, ksclient, cache, tenants): if self.CACHE_KEY_METHOD not in cache: cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( ksclient, tenants)) return iter(cache[self.CACHE_KEY_METHOD]) def _get_account_info(self, ksclient, tenants): endpoint = self._get_endpoint(ksclient) if not endpoint: raise StopIteration() try: from ceilometer.objectstore.rgw_client import RGWAdminClient rgw_client = RGWAdminClient(endpoint, self.access_key, self.secret) except ImportError: raise plugin_base.PollsterPermanentError(tenants) for t in tenants: api_method = 'get_%s' % self.METHOD yield t.id, getattr(rgw_client, api_method)(t.id) class ContainersObjectsPollster(_Base): """Get info about object counts in a container using RGW Admin APIs.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): for it in bucket_info['buckets']: yield sample.Sample( name='radosgw.containers.objects', type=sample.TYPE_GAUGE, volume=int(it.num_objects), unit='object', user_id=None, project_id=tenant, resource_id=tenant + '/' + it.name, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ContainersSizePollster(_Base): """Get info about object sizes in a container using RGW Admin APIs.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): for it in bucket_info['buckets']: yield sample.Sample( name='radosgw.containers.objects.size', type=sample.TYPE_GAUGE, volume=int(it.size * 1024), unit='B', user_id=None, project_id=tenant, resource_id=tenant + '/' + it.name, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ObjectsSizePollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects.size', type=sample.TYPE_GAUGE, volume=int(bucket_info['size'] * 1024), unit='B', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ObjectsPollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects', type=sample.TYPE_GAUGE, volume=int(bucket_info['num_objects']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ObjectsContainersPollster(_Base): def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects.containers', type=sample.TYPE_GAUGE, volume=int(bucket_info['num_buckets']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class UsagePollster(_Base): METHOD = 'usage' def get_samples(self, manager, cache, resources): for tenant, usage in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.api.request', type=sample.TYPE_GAUGE, volume=int(usage), unit='request', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) ceilometer-6.0.0/ceilometer/objectstore/swift.py0000664000567000056710000001632212701406223023170 0ustar jenkinsjenkins00000000000000# # Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with object stores """ from __future__ import absolute_import from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six.moves.urllib.parse as urlparse from swiftclient import client as swift from ceilometer.agent import plugin_base from ceilometer import keystone_client from ceilometer import sample LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('reseller_prefix', default='AUTH_', help="Swift reseller prefix. Must be on par with " "reseller_prefix in proxy-server.conf."), ] SERVICE_OPTS = [ cfg.StrOpt('swift', default='object-store', help='Swift service type.'), ] cfg.CONF.register_opts(OPTS) cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') class _Base(plugin_base.PollsterBase): METHOD = 'head' _ENDPOINT = None @property def default_discovery(self): return 'tenant' @property def CACHE_KEY_METHOD(self): return 'swift.%s_account' % self.METHOD @staticmethod def _get_endpoint(ksclient): # we store the endpoint as a base class attribute, so keystone is # only ever called once if _Base._ENDPOINT is None: try: conf = cfg.CONF.service_credentials _Base._ENDPOINT = keystone_client.get_service_catalog( ksclient).url_for( service_type=cfg.CONF.service_types.swift, interface=conf.interface) except exceptions.EndpointNotFound: LOG.debug("Swift endpoint not found") return _Base._ENDPOINT def _iter_accounts(self, ksclient, cache, tenants): if self.CACHE_KEY_METHOD not in cache: cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( ksclient, tenants)) return iter(cache[self.CACHE_KEY_METHOD]) def _get_account_info(self, ksclient, tenants): endpoint = self._get_endpoint(ksclient) if not endpoint: raise StopIteration() for t in tenants: api_method = '%s_account' % self.METHOD yield (t.id, getattr(swift, api_method) (self._neaten_url(endpoint, t.id), keystone_client.get_auth_token(ksclient))) @staticmethod def _neaten_url(endpoint, tenant_id): """Transform the registered url to standard and valid format.""" return urlparse.urljoin(endpoint.split('/v1')[0].rstrip('/') + '/', 'v1/' + cfg.CONF.reseller_prefix + tenant_id) class ObjectsPollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects', type=sample.TYPE_GAUGE, volume=int(account['x-account-object-count']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ObjectsSizePollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects.size', type=sample.TYPE_GAUGE, volume=int(account['x-account-bytes-used']), unit='B', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ObjectsContainersPollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects.containers', type=sample.TYPE_GAUGE, volume=int(account['x-account-container-count']), unit='container', user_id=None, project_id=tenant, resource_id=tenant, timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ContainersObjectsPollster(_Base): """Get info about containers using Swift API.""" METHOD = 'get' def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): containers_info = account[1] for container in containers_info: yield sample.Sample( name='storage.containers.objects', type=sample.TYPE_GAUGE, volume=int(container['count']), unit='object', user_id=None, project_id=tenant, resource_id=tenant + '/' + container['name'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) class ContainersSizePollster(_Base): """Get info about containers using Swift API.""" METHOD = 'get' def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): containers_info = account[1] for container in containers_info: yield sample.Sample( name='storage.containers.objects.size', type=sample.TYPE_GAUGE, volume=int(container['bytes']), unit='B', user_id=None, project_id=tenant, resource_id=tenant + '/' + container['name'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=None, ) ceilometer-6.0.0/ceilometer/hacking/0000775000567000056710000000000012701406364020545 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/hacking/__init__.py0000664000567000056710000000000012701406223022636 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/hacking/checks.py0000664000567000056710000000330412701406223022351 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for Ceilometer specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range X3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the C3xx value. - List the new rule in the top level HACKING.rst file """ def no_log_warn(logical_line): """Disallow 'LOG.warn(' https://bugs.launchpad.net/tempest/+bug/1508442 C301 """ if logical_line.startswith('LOG.warn('): yield(0, 'C301 Use LOG.warning() rather than LOG.warn()') def no_os_popen(logical_line): """Disallow 'os.popen(' Deprecated library function os.popen() Replace it using subprocess https://bugs.launchpad.net/tempest/+bug/1529836 C302 """ if 'os.popen(' in logical_line: yield(0, 'C302 Deprecated library function os.popen(). ' 'Replace it using subprocess module. ') def factory(register): register(no_log_warn) register(no_os_popen) ceilometer-6.0.0/ceilometer/agent/0000775000567000056710000000000012701406364020237 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/agent/plugin_base.py0000664000567000056710000002257312701406224023105 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for plugins. """ import abc import collections from oslo_context import context from oslo_log import log import oslo_messaging import six from stevedore import extension from ceilometer.i18n import _LE from ceilometer import messaging LOG = log.getLogger(__name__) ExchangeTopics = collections.namedtuple('ExchangeTopics', ['exchange', 'topics']) class PluginBase(object): """Base class for all plugins.""" @six.add_metaclass(abc.ABCMeta) class NotificationBase(PluginBase): """Base class for plugins that support the notification API.""" def __init__(self, manager): super(NotificationBase, self).__init__() # NOTE(gordc): this is filter rule used by oslo.messaging to dispatch # messages to an endpoint. if self.event_types: self.filter_rule = oslo_messaging.NotificationFilter( event_type='|'.join(self.event_types)) self.manager = manager @staticmethod def get_notification_topics(conf): if 'notification_topics' in conf: return conf.notification_topics return conf.oslo_messaging_notifications.topics @abc.abstractproperty def event_types(self): """Return a sequence of strings. Strings are defining the event types to be given to this plugin. """ @abc.abstractmethod def get_targets(self, conf): """Return a sequence of oslo.messaging.Target. Sequence is defining the exchange and topics to be connected for this plugin. :param conf: Configuration. """ @abc.abstractmethod def process_notification(self, message): """Return a sequence of Counter instances for the given message. :param message: Message to process. """ def info(self, notifications): """RPC endpoint for notification messages at info level When another service sends a notification over the message bus, this method receives it. :param notifications: list of notifications """ self._process_notifications('info', notifications) def sample(self, notifications): """RPC endpoint for notification messages at sample level When another service sends a notification over the message bus at sample priority, this method receives it. :param notifications: list of notifications """ self._process_notifications('sample', notifications) def _process_notifications(self, priority, notifications): for notification in notifications: try: notification = messaging.convert_to_old_notification_format( priority, notification) self.to_samples_and_publish(context.get_admin_context(), notification) except Exception: LOG.error(_LE('Fail to process notification'), exc_info=True) def to_samples_and_publish(self, context, notification): """Return samples produced by *process_notification*. Samples produced for the given notification. :param context: Execution context from the service or RPC call :param notification: The notification to process. """ with self.manager.publisher(context) as p: p(list(self.process_notification(notification))) class NonMetricNotificationBase(object): """Use to mark non-measurement meters There are a number of historical non-measurement meters that should really be captured as events. This common base allows us to disable these invalid meters. """ pass class ExtensionLoadError(Exception): """Error of loading pollster plugin. PollsterBase provides a hook, setup_environment, called in pollster loading to setup required HW/SW dependency. Any exception from it would be propagated as ExtensionLoadError, then skip loading this pollster. """ pass class PollsterPermanentError(Exception): """Permanent error when polling. When unrecoverable error happened in polling, pollster can raise this exception with failed resource to prevent itself from polling any more. Resource is one of parameter resources from get_samples that cause polling error. """ def __init__(self, resources): self.fail_res_list = resources @six.add_metaclass(abc.ABCMeta) class PollsterBase(PluginBase): """Base class for plugins that support the polling API.""" def setup_environment(self): """Setup required environment for pollster. Each subclass could overwrite it for specific usage. Any exception raised in this function would prevent pollster being loaded. """ pass def __init__(self): super(PollsterBase, self).__init__() try: self.setup_environment() except Exception as err: raise ExtensionLoadError(err) @abc.abstractproperty def default_discovery(self): """Default discovery to use for this pollster. There are three ways a pollster can get a list of resources to poll, listed here in ascending order of precedence: 1. from the per-agent discovery, 2. from the per-pollster discovery (defined here) 3. from the per-pipeline configured discovery and/or per-pipeline configured static resources. If a pollster should only get resources from #1 or #3, this property should be set to None. """ @abc.abstractmethod def get_samples(self, manager, cache, resources): """Return a sequence of Counter instances from polling the resources. :param manager: The service manager class invoking the plugin. :param cache: A dictionary to allow pollsters to pass data between themselves when recomputing it would be expensive (e.g., asking another service for a list of objects). :param resources: A list of resources the pollster will get data from. It's up to the specific pollster to decide how to use it. It is usually supplied by a discovery, see ``default_discovery`` for more information. """ @classmethod def build_pollsters(cls): """Return a list of tuple (name, pollster). The name is the meter name which the pollster would return, the pollster is a pollster object instance. The pollster which implements this method should be registered in the namespace of ceilometer.builder.xxx instead of ceilometer.poll.xxx. """ return [] @classmethod def get_pollsters_extensions(cls): """Return a list of stevedore extensions. The returned stevedore extensions wrap the pollster object instances returned by build_pollsters. """ extensions = [] try: for name, pollster in cls.build_pollsters(): ext = extension.Extension(name, None, cls, pollster) extensions.append(ext) except Exception as err: raise ExtensionLoadError(err) return extensions @six.add_metaclass(abc.ABCMeta) class DiscoveryBase(object): KEYSTONE_REQUIRED_FOR_SERVICE = None """Service type required in keystone catalog to works""" @abc.abstractmethod def discover(self, manager, param=None): """Discover resources to monitor. The most fine-grained discovery should be preferred, so the work is the most evenly distributed among multiple agents (if they exist). For example: if the pollster can separately poll individual resources, it should have its own discovery implementation to discover those resources. If it can only poll per-tenant, then the `TenantDiscovery` should be used. If even that is not possible, use `EndpointDiscovery` (see their respective docstrings). :param manager: The service manager class invoking the plugin. :param param: an optional parameter to guide the discovery """ @property def group_id(self): """Return group id of this discovery. All running recoveries with the same group_id should return the same set of resources at a given point in time. By default, a discovery is put into a global group, meaning that all discoveries of its type running anywhere in the cloud, return the same set of resources. This property can be overridden to provide correct grouping of localized discoveries. For example, compute discovery is localized to a host, which is reflected in its group_id. A None value signifies that this discovery does not want to be part of workload partitioning at all. """ return 'global' ceilometer-6.0.0/ceilometer/agent/__init__.py0000664000567000056710000000000012701406223022330 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/agent/discovery/0000775000567000056710000000000012701406364022246 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/agent/discovery/__init__.py0000664000567000056710000000000012701406223024337 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/agent/discovery/endpoint.py0000664000567000056710000000341712701406223024437 0ustar jenkinsjenkins00000000000000# Copyright 2014-2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from ceilometer.agent import plugin_base as plugin from ceilometer.i18n import _LW from ceilometer import keystone_client LOG = log.getLogger(__name__) cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') class EndpointDiscovery(plugin.DiscoveryBase): """Discovery that supplies service endpoints. This discovery should be used when the relevant APIs are not well suited to dividing the pollster's work into smaller pieces than a whole service at once. Example of this is the floating_ip pollster which calls nova.floating_ips.list() and therefore gets all floating IPs at once. """ @staticmethod def discover(manager, param=None): endpoints = keystone_client.get_service_catalog( manager.keystone).get_urls( service_type=param, interface=cfg.CONF.service_credentials.interface, region_name=cfg.CONF.service_credentials.region_name) if not endpoints: LOG.warning(_LW('No endpoints found for service %s'), "" if param is None else param) return [] return endpoints ceilometer-6.0.0/ceilometer/agent/discovery/tenant.py0000664000567000056710000000223312701406223024103 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from ceilometer.agent import plugin_base as plugin cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') class TenantDiscovery(plugin.DiscoveryBase): """Discovery that supplies keystone tenants. This discovery should be used when the pollster's work can't be divided into smaller pieces than per-tenants. Example of this is the Swift pollster, which polls account details and does so per-project. """ def discover(self, manager, param=None): tenants = manager.keystone.projects.list() return tenants or [] ceilometer-6.0.0/ceilometer/agent/discovery/localnode.py0000664000567000056710000000142312701406223024552 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.agent import plugin_base class LocalNodeDiscovery(plugin_base.DiscoveryBase): def discover(self, manager, param=None): """Return local node as resource.""" return ['local_host'] ceilometer-6.0.0/ceilometer/agent/manager.py0000664000567000056710000004776512701406224022241 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Julien Danjou # Copyright 2014 Red Hat, Inc # # Authors: Julien Danjou # Eoghan Glynn # Nejc Saje # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools import random from keystoneauth1 import exceptions as ka_exceptions from keystoneclient import exceptions as ks_exceptions from oslo_config import cfg from oslo_context import context from oslo_log import log import oslo_messaging from six import moves from six.moves.urllib import parse as urlparse from stevedore import extension from ceilometer.agent import plugin_base from ceilometer import coordination from ceilometer.i18n import _, _LE, _LI, _LW from ceilometer import keystone_client from ceilometer import messaging from ceilometer import pipeline from ceilometer.publisher import utils as publisher_utils from ceilometer import service_base from ceilometer import utils LOG = log.getLogger(__name__) OPTS = [ cfg.BoolOpt('batch_polled_samples', default=True, help='To reduce polling agent load, samples are sent to the ' 'notification agent in a batch. To gain higher ' 'throughput at the cost of load set this to False.'), cfg.IntOpt('shuffle_time_before_polling_task', default=0, help='To reduce large requests at same time to Nova or other ' 'components from different compute agents, shuffle ' 'start time of polling task.'), ] POLLING_OPTS = [ cfg.StrOpt('partitioning_group_prefix', deprecated_group='central', help='Work-load partitioning group prefix. Use only if you ' 'want to run multiple polling agents with different ' 'config files. For each sub-group of the agent ' 'pool with the same partitioning_group_prefix a disjoint ' 'subset of pollsters should be loaded.'), ] cfg.CONF.register_opts(OPTS) cfg.CONF.register_opts(POLLING_OPTS, group='polling') cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', group='publisher_notifier') cfg.CONF.import_group('service_types', 'ceilometer.energy.kwapi') cfg.CONF.import_group('service_types', 'ceilometer.image.glance') cfg.CONF.import_group('service_types', 'ceilometer.neutron_client') cfg.CONF.import_group('service_types', 'ceilometer.nova_client') cfg.CONF.import_group('service_types', 'ceilometer.objectstore.rgw') cfg.CONF.import_group('service_types', 'ceilometer.objectstore.swift') class PollsterListForbidden(Exception): def __init__(self): msg = ('It is forbidden to use pollster-list option of polling agent ' 'in case of using coordination between multiple agents. Please ' 'use either multiple agents being coordinated or polling list ' 'option for one polling agent.') super(PollsterListForbidden, self).__init__(msg) class Resources(object): def __init__(self, agent_manager): self.agent_manager = agent_manager self._resources = [] self._discovery = [] self.blacklist = [] self.last_dup = [] def setup(self, source): self._resources = source.resources self._discovery = source.discovery def get(self, discovery_cache=None): source_discovery = (self.agent_manager.discover(self._discovery, discovery_cache) if self._discovery else []) static_resources = [] if self._resources: static_resources_group = self.agent_manager.construct_group_id( utils.hash_of_set(self._resources)) p_coord = self.agent_manager.partition_coordinator static_resources = p_coord.extract_my_subset( static_resources_group, self._resources) return static_resources + source_discovery @staticmethod def key(source_name, pollster): return '%s-%s' % (source_name, pollster.name) class PollingTask(object): """Polling task for polling samples and notifying. A polling task can be invoked periodically or only once. """ def __init__(self, agent_manager): self.manager = agent_manager # elements of the Cartesian product of sources X pollsters # with a common interval self.pollster_matches = collections.defaultdict(set) # we relate the static resources and per-source discovery to # each combination of pollster and matching source resource_factory = lambda: Resources(agent_manager) self.resources = collections.defaultdict(resource_factory) self._batch = cfg.CONF.batch_polled_samples self._telemetry_secret = cfg.CONF.publisher.telemetry_secret def add(self, pollster, source): self.pollster_matches[source.name].add(pollster) key = Resources.key(source.name, pollster) self.resources[key].setup(source) def poll_and_notify(self): """Polling sample and notify.""" cache = {} discovery_cache = {} poll_history = {} for source_name in self.pollster_matches: for pollster in self.pollster_matches[source_name]: key = Resources.key(source_name, pollster) candidate_res = list( self.resources[key].get(discovery_cache)) if not candidate_res and pollster.obj.default_discovery: candidate_res = self.manager.discover( [pollster.obj.default_discovery], discovery_cache) # Remove duplicated resources and black resources. Using # set() requires well defined __hash__ for each resource. # Since __eq__ is defined, 'not in' is safe here. polling_resources = [] black_res = self.resources[key].blacklist history = poll_history.get(pollster.name, []) for x in candidate_res: if x not in history: history.append(x) if x not in black_res: polling_resources.append(x) poll_history[pollster.name] = history # If no resources, skip for this pollster if not polling_resources: p_context = 'new ' if history else '' LOG.info(_LI("Skip pollster %(name)s, no %(p_context)s" "resources found this cycle"), {'name': pollster.name, 'p_context': p_context}) continue LOG.info(_LI("Polling pollster %(poll)s in the context of " "%(src)s"), dict(poll=pollster.name, src=source_name)) try: samples = pollster.obj.get_samples( manager=self.manager, cache=cache, resources=polling_resources ) sample_batch = [] for sample in samples: sample_dict = ( publisher_utils.meter_message_from_counter( sample, self._telemetry_secret )) if self._batch: sample_batch.append(sample_dict) else: self._send_notification([sample_dict]) if sample_batch: self._send_notification(sample_batch) except plugin_base.PollsterPermanentError as err: LOG.error(_( 'Prevent pollster %(name)s for ' 'polling source %(source)s anymore!') % ({'name': pollster.name, 'source': source_name})) self.resources[key].blacklist.extend(err.fail_res_list) except Exception as err: LOG.warning(_( 'Continue after error from %(name)s: %(error)s') % ({'name': pollster.name, 'error': err}), exc_info=True) def _send_notification(self, samples): self.manager.notifier.sample( self.manager.context.to_dict(), 'telemetry.polling', {'samples': samples} ) class AgentManager(service_base.BaseService): def __init__(self, namespaces=None, pollster_list=None): namespaces = namespaces or ['compute', 'central'] pollster_list = pollster_list or [] group_prefix = cfg.CONF.polling.partitioning_group_prefix # features of using coordination and pollster-list are exclusive, and # cannot be used at one moment to avoid both samples duplication and # samples being lost if pollster_list and cfg.CONF.coordination.backend_url: raise PollsterListForbidden() super(AgentManager, self).__init__() def _match(pollster): """Find out if pollster name matches to one of the list.""" return any(utils.match(pollster.name, pattern) for pattern in pollster_list) if type(namespaces) is not list: namespaces = [namespaces] # we'll have default ['compute', 'central'] here if no namespaces will # be passed extensions = (self._extensions('poll', namespace).extensions for namespace in namespaces) # get the extensions from pollster builder extensions_fb = (self._extensions_from_builder('poll', namespace) for namespace in namespaces) if pollster_list: extensions = (moves.filter(_match, exts) for exts in extensions) extensions_fb = (moves.filter(_match, exts) for exts in extensions_fb) self.extensions = list(itertools.chain(*list(extensions))) + list( itertools.chain(*list(extensions_fb))) self.discovery_manager = self._extensions('discover') self.context = context.RequestContext('admin', 'admin', is_admin=True) self.partition_coordinator = coordination.PartitionCoordinator() # Compose coordination group prefix. # We'll use namespaces as the basement for this partitioning. namespace_prefix = '-'.join(sorted(namespaces)) self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix) if group_prefix else namespace_prefix) self.notifier = oslo_messaging.Notifier( messaging.get_transport(), driver=cfg.CONF.publisher_notifier.telemetry_driver, publisher_id="ceilometer.polling") self._keystone = None self._keystone_last_exception = None @staticmethod def _get_ext_mgr(namespace): def _catch_extension_load_error(mgr, ep, exc): # Extension raising ExtensionLoadError can be ignored, # and ignore anything we can't import as a safety measure. if isinstance(exc, plugin_base.ExtensionLoadError): LOG.exception(_("Skip loading extension for %s") % ep.name) return if isinstance(exc, ImportError): LOG.error(_("Failed to import extension for %(name)s: " "%(error)s"), {'name': ep.name, 'error': exc}) return raise exc return extension.ExtensionManager( namespace=namespace, invoke_on_load=True, on_load_failure_callback=_catch_extension_load_error, ) def _extensions(self, category, agent_ns=None): namespace = ('ceilometer.%s.%s' % (category, agent_ns) if agent_ns else 'ceilometer.%s' % category) return self._get_ext_mgr(namespace) def _extensions_from_builder(self, category, agent_ns=None): ns = ('ceilometer.builder.%s.%s' % (category, agent_ns) if agent_ns else 'ceilometer.builder.%s' % category) mgr = self._get_ext_mgr(ns) def _build(ext): return ext.plugin.get_pollsters_extensions() # NOTE: this seems a stevedore bug. if no extensions are found, # map will raise runtimeError which is not documented. if mgr.names(): return list(itertools.chain(*mgr.map(_build))) else: return [] def join_partitioning_groups(self): self.groups = set([self.construct_group_id(d.obj.group_id) for d in self.discovery_manager]) # let each set of statically-defined resources have its own group static_resource_groups = set([ self.construct_group_id(utils.hash_of_set(p.resources)) for p in self.polling_manager.sources if p.resources ]) self.groups.update(static_resource_groups) for group in self.groups: self.partition_coordinator.join_group(group) def create_polling_task(self): """Create an initially empty polling task.""" return PollingTask(self) def setup_polling_tasks(self): polling_tasks = {} for source in self.polling_manager.sources: polling_task = None for pollster in self.extensions: if source.support_meter(pollster.name): polling_task = polling_tasks.get(source.get_interval()) if not polling_task: polling_task = self.create_polling_task() polling_tasks[source.get_interval()] = polling_task polling_task.add(pollster, source) return polling_tasks def construct_group_id(self, discovery_group_id): return ('%s-%s' % (self.group_prefix, discovery_group_id) if discovery_group_id else None) def configure_polling_tasks(self): # allow time for coordination if necessary delay_start = self.partition_coordinator.is_active() # set shuffle time before polling task if necessary delay_polling_time = random.randint( 0, cfg.CONF.shuffle_time_before_polling_task) pollster_timers = [] data = self.setup_polling_tasks() for interval, polling_task in data.items(): delay_time = (interval + delay_polling_time if delay_start else delay_polling_time) pollster_timers.append(self.tg.add_timer(interval, self.interval_task, initial_delay=delay_time, task=polling_task)) self.tg.add_timer(cfg.CONF.coordination.heartbeat, self.partition_coordinator.heartbeat) return pollster_timers def start(self): self.polling_manager = pipeline.setup_polling() self.partition_coordinator.start() self.join_partitioning_groups() self.pollster_timers = self.configure_polling_tasks() self.init_pipeline_refresh() def stop(self): if self.partition_coordinator: self.partition_coordinator.stop() super(AgentManager, self).stop() def interval_task(self, task): # NOTE(sileht): remove the previous keystone client # and exception to get a new one in this polling cycle. self._keystone = None self._keystone_last_exception = None task.poll_and_notify() @property def keystone(self): # NOTE(sileht): we do lazy loading of the keystone client # for multiple reasons: # * don't use it if no plugin need it # * use only one client for all plugins per polling cycle if self._keystone is None and self._keystone_last_exception is None: try: self._keystone = keystone_client.get_client() self._keystone_last_exception = None except (ka_exceptions.ClientException, ks_exceptions.ClientException) as e: self._keystone = None self._keystone_last_exception = e if self._keystone is not None: return self._keystone else: raise self._keystone_last_exception @staticmethod def _parse_discoverer(url): s = urlparse.urlparse(url) return (s.scheme or s.path), (s.netloc + s.path if s.scheme else None) def _discoverer(self, name): for d in self.discovery_manager: if d.name == name: return d.obj return None def discover(self, discovery=None, discovery_cache=None): resources = [] discovery = discovery or [] for url in discovery: if discovery_cache is not None and url in discovery_cache: resources.extend(discovery_cache[url]) continue name, param = self._parse_discoverer(url) discoverer = self._discoverer(name) if discoverer: try: if discoverer.KEYSTONE_REQUIRED_FOR_SERVICE: service_type = getattr( cfg.CONF.service_types, discoverer.KEYSTONE_REQUIRED_FOR_SERVICE) if not keystone_client.get_service_catalog( self.keystone).get_endpoints( service_type=service_type): LOG.warning(_LW( 'Skipping %(name)s, %(service_type)s service ' 'is not registered in keystone'), {'name': name, 'service_type': service_type}) continue discovered = discoverer.discover(self, param) partitioned = self.partition_coordinator.extract_my_subset( self.construct_group_id(discoverer.group_id), discovered) resources.extend(partitioned) if discovery_cache is not None: discovery_cache[url] = partitioned except (ka_exceptions.ClientException, ks_exceptions.ClientException) as e: LOG.error(_LE('Skipping %(name)s, keystone issue: ' '%(exc)s'), {'name': name, 'exc': e}) except Exception as err: LOG.exception(_('Unable to discover resources: %s') % err) else: LOG.warning(_('Unknown discovery extension: %s') % name) return resources def stop_pollsters(self): for x in self.pollster_timers: try: x.stop() self.tg.timer_done(x) except Exception: LOG.error(_('Error stopping pollster.'), exc_info=True) self.pollster_timers = [] def reload_pipeline(self): if self.pipeline_validated: LOG.info(_LI("Reconfiguring polling tasks.")) # stop existing pollsters and leave partitioning groups self.stop_pollsters() for group in self.groups: self.partition_coordinator.leave_group(group) # re-create partitioning groups according to pipeline # and configure polling tasks with latest pipeline conf self.join_partitioning_groups() self.pollster_timers = self.configure_polling_tasks() ceilometer-6.0.0/ceilometer/service.py0000664000567000056710000000655712701406223021162 0ustar jenkinsjenkins00000000000000# Copyright 2012-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import sys from oslo_config import cfg import oslo_i18n from oslo_log import log from oslo_reports import guru_meditation_report as gmr from ceilometer.conf import defaults from ceilometer import keystone_client from ceilometer import messaging from ceilometer import version OPTS = [ cfg.StrOpt('host', default=socket.gethostname(), help='Name of this node, which must be valid in an AMQP ' 'key. Can be an opaque identifier. For ZeroMQ only, must ' 'be a valid host name, FQDN, or IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests. Set it to None to ' 'disable timeout.'), ] cfg.CONF.register_opts(OPTS) API_OPT = cfg.IntOpt('workers', default=1, min=1, deprecated_group='DEFAULT', deprecated_name='api_workers', help='Number of workers for api, default value is 1.') cfg.CONF.register_opt(API_OPT, 'api') NOTI_OPT = cfg.IntOpt('workers', default=1, min=1, deprecated_group='DEFAULT', deprecated_name='notification_workers', help='Number of workers for notification service, ' 'default value is 1.') cfg.CONF.register_opt(NOTI_OPT, 'notification') COLL_OPT = cfg.IntOpt('workers', default=1, min=1, deprecated_group='DEFAULT', deprecated_name='collector_workers', help='Number of workers for collector service. ' 'default value is 1.') cfg.CONF.register_opt(COLL_OPT, 'collector') keystone_client.register_keystoneauth_opts(cfg.CONF) def prepare_service(argv=None, config_files=None): oslo_i18n.enable_lazy() log.register_options(cfg.CONF) log_levels = (cfg.CONF.default_log_levels + ['stevedore=INFO', 'keystoneclient=INFO', 'neutronclient=INFO']) log.set_defaults(default_log_levels=log_levels) defaults.set_cors_middleware_defaults() if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='ceilometer', validate_default_values=True, version=version.version_info.version_string(), default_config_files=config_files) keystone_client.setup_keystoneauth(cfg.CONF) log.setup(cfg.CONF, 'ceilometer') # NOTE(liusheng): guru cannot run with service under apache daemon, so when # ceilometer-api running with mod_wsgi, the argv is [], we don't start # guru. if argv: gmr.TextGuruMeditation.setup_autorun(version) messaging.setup() ceilometer-6.0.0/ceilometer/image/0000775000567000056710000000000012701406364020223 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/image/glance.py0000664000567000056710000001101712701406224022021 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with images """ from __future__ import absolute_import import glanceclient from oslo_config import cfg from oslo_utils import timeutils from ceilometer.agent import plugin_base from ceilometer import keystone_client from ceilometer import sample OPTS = [ cfg.IntOpt('glance_page_size', default=0, help="Number of items to request in " "each paginated Glance API request " "(parameter used by glancecelient). " "If this is less than or equal to 0, " "page size is not specified " "(default value in glanceclient is used)."), ] SERVICE_OPTS = [ cfg.StrOpt('glance', default='image', help='Glance service type.'), ] cfg.CONF.register_opts(OPTS) cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') class _Base(plugin_base.PollsterBase): @property def default_discovery(self): return 'endpoint:%s' % cfg.CONF.service_types.glance @staticmethod def get_glance_client(ksclient, endpoint): # hard-code v1 glance API version selection while v2 API matures return glanceclient.Client('1', session=keystone_client.get_session(), endpoint=endpoint, auth=ksclient.session.auth) def _get_images(self, ksclient, endpoint): client = self.get_glance_client(ksclient, endpoint) page_size = cfg.CONF.glance_page_size kwargs = {} if page_size > 0: kwargs['page_size'] = page_size return client.images.list(filters={"is_public": None}, **kwargs) def _iter_images(self, ksclient, cache, endpoint): """Iterate over all images.""" key = '%s-images' % endpoint if key not in cache: cache[key] = list(self._get_images(ksclient, endpoint)) return iter(cache[key]) @staticmethod def extract_image_metadata(image): return dict((k, getattr(image, k)) for k in [ "status", "is_public", "name", "deleted", "container_format", "created_at", "disk_format", "updated_at", "properties", "min_disk", "protected", "checksum", "deleted_at", "min_ram", "size", ]) class ImagePollster(_Base): def get_samples(self, manager, cache, resources): for endpoint in resources: for image in self._iter_images(manager.keystone, cache, endpoint): yield sample.Sample( name='image', type=sample.TYPE_GAUGE, unit='image', volume=1, user_id=None, project_id=image.owner, resource_id=image.id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_image_metadata(image), ) class ImageSizePollster(_Base): def get_samples(self, manager, cache, resources): for endpoint in resources: for image in self._iter_images(manager.keystone, cache, endpoint): yield sample.Sample( name='image.size', type=sample.TYPE_GAUGE, unit='B', volume=image.size, user_id=None, project_id=image.owner, resource_id=image.id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=self.extract_image_metadata(image), ) ceilometer-6.0.0/ceilometer/image/__init__.py0000664000567000056710000000000012701406223022314 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/sample.py0000664000567000056710000000713712701406223020776 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # # Authors: Doug Hellmann # Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample class for holding data about a metering event. A Sample doesn't really do anything, but we need a way to ensure that all of the appropriate fields have been filled in by the plugins that create them. """ import copy import uuid from oslo_config import cfg OPTS = [ cfg.StrOpt('sample_source', default='openstack', help='Source for samples emitted on this instance.'), ] cfg.CONF.register_opts(OPTS) # Fields explanation: # # Source: the source of this sample # Name: the name of the meter, must be unique # Type: the type of the meter, must be either: # - cumulative: the value is incremented and never reset to 0 # - delta: the value is reset to 0 each time it is sent # - gauge: the value is an absolute value and is not a counter # Unit: the unit of the meter # Volume: the sample value # User ID: the user ID # Project ID: the project ID # Resource ID: the resource ID # Timestamp: when the sample has been read # Resource metadata: various metadata # id: an uuid of a sample, can be taken from API when post sample via API class Sample(object): def __init__(self, name, type, unit, volume, user_id, project_id, resource_id, timestamp, resource_metadata, source=None, id=None): self.name = name self.type = type self.unit = unit self.volume = volume self.user_id = user_id self.project_id = project_id self.resource_id = resource_id self.timestamp = timestamp self.resource_metadata = resource_metadata self.source = source or cfg.CONF.sample_source self.id = id or str(uuid.uuid1()) def as_dict(self): return copy.copy(self.__dict__) def __repr__(self): return '' % ( self.name, self.volume, self.resource_id, self.timestamp) @classmethod def from_notification(cls, name, type, volume, unit, user_id, project_id, resource_id, message, timestamp=None, metadata=None, source=None): if not metadata: metadata = (copy.copy(message['payload']) if isinstance(message['payload'], dict) else {}) metadata['event_type'] = message['event_type'] metadata['host'] = message['publisher_id'] ts = timestamp if timestamp else message['timestamp'] return cls(name=name, type=type, volume=volume, unit=unit, user_id=user_id, project_id=project_id, resource_id=resource_id, timestamp=ts, resource_metadata=metadata, source=source) TYPE_GAUGE = 'gauge' TYPE_DELTA = 'delta' TYPE_CUMULATIVE = 'cumulative' TYPES = (TYPE_GAUGE, TYPE_DELTA, TYPE_CUMULATIVE) ceilometer-6.0.0/ceilometer/telemetry/0000775000567000056710000000000012701406364021153 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/telemetry/__init__.py0000664000567000056710000000000012701406223023244 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/telemetry/notifications.py0000664000567000056710000000437212701406223024376 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_messaging from ceilometer.agent import plugin_base from ceilometer import sample OPTS = [ cfg.StrOpt('ceilometer_control_exchange', default='ceilometer', help="Exchange name for ceilometer notifications."), ] cfg.CONF.register_opts(OPTS) class TelemetryBase(plugin_base.NotificationBase): """Convert telemetry notification into Samples.""" def get_targets(self, conf): """Return a sequence of oslo_messaging.Target Sequence defining the exchange and topics to be connected for this plugin. """ return [oslo_messaging.Target( topic=topic, exchange=conf.ceilometer_control_exchange) for topic in self.get_notification_topics(conf)] class TelemetryIpc(TelemetryBase): """Handle sample from notification bus Telemetry samples can be posted via API or polled by Polling agent. """ event_types = ['telemetry.api', 'telemetry.polling'] def process_notification(self, message): samples = message['payload']['samples'] for sample_dict in samples: yield sample.Sample( name=sample_dict['counter_name'], type=sample_dict['counter_type'], unit=sample_dict['counter_unit'], volume=sample_dict['counter_volume'], user_id=sample_dict['user_id'], project_id=sample_dict['project_id'], resource_id=sample_dict['resource_id'], timestamp=sample_dict['timestamp'], resource_metadata=sample_dict['resource_metadata'], source=sample_dict['source'], id=sample_dict['message_id']) ceilometer-6.0.0/ceilometer/neutron_client.py0000664000567000056710000003671612701406224022553 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutronclient.common import exceptions from neutronclient.v2_0 import client as clientv20 from oslo_config import cfg from oslo_log import log from ceilometer import keystone_client SERVICE_OPTS = [ cfg.StrOpt('neutron', default='network', help='Neutron service type.'), cfg.StrOpt('neutron_lbaas_version', default='v2', choices=('v1', 'v2'), help='Neutron load balancer version.') ] cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') LOG = log.getLogger(__name__) def logged(func): @functools.wraps(func) def with_logging(*args, **kwargs): try: return func(*args, **kwargs) except exceptions.NeutronClientException as e: if e.status_code == 404: LOG.warning("The resource could not be found.") else: LOG.warning(e) return [] except Exception as e: LOG.exception(e) raise return with_logging class Client(object): """A client which gets information via python-neutronclient.""" def __init__(self): conf = cfg.CONF.service_credentials params = { 'session': keystone_client.get_session(), 'endpoint_type': conf.interface, 'region_name': conf.region_name, 'service_type': cfg.CONF.service_types.neutron, } self.client = clientv20.Client(**params) self.lb_version = cfg.CONF.service_types.neutron_lbaas_version @logged def port_get_all(self): resp = self.client.list_ports() return resp.get('ports') @logged def vip_get_all(self): resp = self.client.list_vips() return resp.get('vips') @logged def pool_get_all(self): resources = [] if self.lb_version == 'v1': resp = self.client.list_pools() resources = resp.get('pools') elif self.lb_version == 'v2': resources = self.list_pools_v2() return resources @logged def member_get_all(self): resources = [] if self.lb_version == 'v1': resp = self.client.list_members() resources = resp.get('members') elif self.lb_version == 'v2': resources = self.list_members_v2() return resources @logged def health_monitor_get_all(self): resources = [] if self.lb_version == 'v1': resp = self.client.list_health_monitors() resources = resp.get('health_monitors') elif self.lb_version == 'v2': resources = self.list_health_monitors_v2() return resources @logged def pool_stats(self, pool): return self.client.retrieve_pool_stats(pool) @logged def vpn_get_all(self): resp = self.client.list_vpnservices() return resp.get('vpnservices') @logged def ipsec_site_connections_get_all(self): resp = self.client.list_ipsec_site_connections() return resp.get('ipsec_site_connections') @logged def firewall_get_all(self): resp = self.client.list_firewalls() return resp.get('firewalls') @logged def fw_policy_get_all(self): resp = self.client.list_firewall_policies() return resp.get('firewall_policies') @logged def fip_get_all(self): fips = self.client.list_floatingips()['floatingips'] return fips def list_pools_v2(self): """This method is used to get the pools list. This method uses Load Balancer v2_0 API to achieve the detailed list of the pools. :returns: The list of the pool resources """ pool_status = dict() resp = self.client.list_lbaas_pools() temp_pools = resp.get('pools') resources = [] pool_listener_dict = self._get_pool_and_listener_ids(temp_pools) for k, v in pool_listener_dict.items(): loadbalancer_id = self._get_loadbalancer_id_with_listener_id(v) status = self._get_pool_status(loadbalancer_id, v) for k, v in status.items(): pool_status[k] = v for pool in temp_pools: pool_id = pool.get('id') pool['status'] = pool_status[pool_id] pool['lb_method'] = pool.get('lb_algorithm') pool['status_description'] = pool['status'] # Based on the LBaaSv2 design, the properties 'vip_id' # and 'subnet_id' should belong to the loadbalancer resource and # not to the pool resource. However, because we don't want to # change the metadata of the pool resource this release, # we set them to empty values manually. pool['provider'] = '' pool['vip_id'] = '' pool['subnet_id'] = '' resources.append(pool) return resources def list_members_v2(self): """Method is used to list the members info. This method is used to get the detailed list of the members with Load Balancer v2_0 API :returns: The list of the member resources """ resources = [] pools = self.client.list_lbaas_pools().get('pools') for pool in pools: pool_id = pool.get('id') listener_id = pool.get('listeners')[0].get('id') lb_id = self._get_loadbalancer_id_with_listener_id(listener_id) status = self._get_member_status(lb_id, [listener_id, pool_id]) resp = self.client.list_lbaas_members(pool_id) temp_members = resp.get('members') for member in temp_members: member['status'] = status[member.get('id')] member['pool_id'] = pool_id member['status_description'] = member['status'] resources.append(member) return resources def list_health_monitors_v2(self): """Method is used to list the health monitors This method is used to get the detailed list of the health monitors with Load Balancer v2_0 :returns: The list of the health monitor resources """ resp = self.client.list_lbaas_healthmonitors() resources = resp.get('healthmonitors') return resources def _get_pool_and_listener_ids(self, pools): """Method is used to get the mapping between pool and listener This method is used to get the pool ids and listener ids from the pool list. :param pools: The list of the polls :returns: The relationship between pool and listener. It's a dictionary type. The key of this dict is the id of pool and the value of it is the id of the first listener which the pool belongs to """ pool_listener_dict = dict() for pool in pools: key = pool.get("id") value = pool.get('listeners')[0].get('id') pool_listener_dict[key] = value return pool_listener_dict def _retrieve_loadbalancer_status_tree(self, loadbalancer_id): """Method is used to get the status of a LB. This method is used to get the status tree of a specific Load Balancer. :param loadbalancer_id: The ID of the specific Load Balancer. :returns: The status of the specific Load Balancer. It consists of the load balancer and all of its children's provisioning and operating statuses """ lb_status_tree = self.client.retrieve_loadbalancer_status( loadbalancer_id) return lb_status_tree def _get_loadbalancer_id_with_listener_id(self, listener_id): """This method is used to get the loadbalancer id. :param listener_id: The ID of the listener :returns: The ID of the Loadbalancer """ listener = self.client.show_listener(listener_id) listener_lbs = listener.get('listener').get('loadbalancers') loadbalancer_id = listener_lbs[0].get('id') return loadbalancer_id def _get_member_status(self, loadbalancer_id, parent_id): """Method used to get the status of member resource. This method is used to get the status of member resource belonged to the specific Load Balancer. :param loadbalancer_id: The ID of the Load Balancer. :param parent_id: The parent ID list of the member resource. For the member resource, the parent_id should be [listener_id, pool_id]. :returns: The status dictionary of the member resource. The key is the ID of the member. The value is the operating statuse of the member resource. """ # FIXME(liamji) the following meters are experimental and # may generate a large load against neutron api. The future # enhancements can be tracked against: # https://review.openstack.org/#/c/218560. # After it has been merged and the neutron client supports # with the corresponding apis, will change to use the new # method to get the status of the members. resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) status_tree = resp.get('statuses').get('loadbalancer') status_dict = dict() listeners_status = status_tree.get('listeners') for listener_status in listeners_status: listener_id = listener_status.get('id') if listener_id == parent_id[0]: pools_status = listener_status.get('pools') for pool_status in pools_status: if pool_status.get('id') == parent_id[1]: members_status = pool_status.get('members') for member_status in members_status: key = member_status.get('id') # If the item has no the property 'id', skip # it. if key is None: continue # The situation that the property # 'operating_status' is none is handled in # the method get_sample() in lbaas.py. value = member_status.get('operating_status') status_dict[key] = value break break return status_dict def _get_listener_status(self, loadbalancer_id): """Method used to get the status of the listener resource. This method is used to get the status of the listener resources belonged to the specific Load Balancer. :param loadbalancer_id: The ID of the Load Balancer. :returns: The status dictionary of the listener resource. The key is the ID of the listener resource. The value is the operating statuse of the listener resource. """ # FIXME(liamji) the following meters are experimental and # may generate a large load against neutron api. The future # enhancements can be tracked against: # https://review.openstack.org/#/c/218560. # After it has been merged and the neutron client supports # with the corresponding apis, will change to use the new # method to get the status of the listeners. resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) status_tree = resp.get('statuses').get('loadbalancer') status_dict = dict() listeners_status = status_tree.get('listeners') for listener_status in listeners_status: key = listener_status.get('id') # If the item has no the property 'id', skip # it. if key is None: continue # The situation that the property # 'operating_status' is none is handled in # the method get_sample() in lbaas.py. value = listener_status.get('operating_status') status_dict[key] = value return status_dict def _get_pool_status(self, loadbalancer_id, parent_id): """Method used to get the status of pool resource. This method is used to get the status of the pool resources belonged to the specific Load Balancer. :param loadbalancer_id: The ID of the Load Balancer. :param parent_id: The parent ID of the pool resource. :returns: The status dictionary of the pool resource. The key is the ID of the pool resource. The value is the operating statuse of the pool resource. """ # FIXME(liamji) the following meters are experimental and # may generate a large load against neutron api. The future # enhancements can be tracked against: # https://review.openstack.org/#/c/218560. # After it has been merged and the neutron client supports # with the corresponding apis, will change to use the new # method to get the status of the pools. resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) status_tree = resp.get('statuses').get('loadbalancer') status_dict = dict() listeners_status = status_tree.get('listeners') for listener_status in listeners_status: listener_id = listener_status.get('id') if listener_id == parent_id: pools_status = listener_status.get('pools') for pool_status in pools_status: key = pool_status.get('id') # If the item has no the property 'id', skip # it. if key is None: continue # The situation that the property # 'operating_status' is none is handled in # the method get_sample() in lbaas.py. value = pool_status.get('operating_status') status_dict[key] = value break return status_dict def list_listener(self): """This method is used to get the list of the listeners.""" resp = self.client.list_listeners() resources = resp.get('listeners') for listener in resources: loadbalancer_id = listener.get('loadbalancers')[0].get('id') status = self._get_listener_status(loadbalancer_id) listener['operating_status'] = status[listener.get('id')] return resources def list_loadbalancer(self): """This method is used to get the list of the loadbalancers.""" resp = self.client.list_loadbalancers() resources = resp.get('loadbalancers') return resources def get_loadbalancer_stats(self, loadbalancer_id): """This method is used to get the statistics of the loadbalancer. :param loadbalancer_id: the ID of the specified loadbalancer """ resp = self.client.retrieve_loadbalancer_stats(loadbalancer_id) resource = resp.get('stats') return resource ceilometer-6.0.0/ceilometer/middleware.py0000664000567000056710000000511712701406223021626 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_messaging from ceilometer.agent import plugin_base from ceilometer import sample cfg.CONF.import_opt('nova_control_exchange', 'ceilometer.compute.notifications') cfg.CONF.import_opt('glance_control_exchange', 'ceilometer.notification') cfg.CONF.import_opt('neutron_control_exchange', 'ceilometer.network.notifications') cfg.CONF.import_opt('cinder_control_exchange', 'ceilometer.notification') OPTS = [ cfg.MultiStrOpt('http_control_exchanges', default=[cfg.CONF.nova_control_exchange, cfg.CONF.glance_control_exchange, cfg.CONF.neutron_control_exchange, cfg.CONF.cinder_control_exchange], help="Exchanges name to listen for notifications."), ] cfg.CONF.register_opts(OPTS) class HTTPRequest(plugin_base.NotificationBase, plugin_base.NonMetricNotificationBase): event_types = ['http.request'] def get_targets(self, conf): """Return a sequence of oslo_messaging.Target This sequence is defining the exchange and topics to be connected for this plugin. """ return [oslo_messaging.Target(topic=topic, exchange=exchange) for topic in self.get_notification_topics(conf) for exchange in conf.http_control_exchanges] def process_notification(self, message): yield sample.Sample.from_notification( name=message['event_type'], type=sample.TYPE_DELTA, volume=1, unit=message['event_type'].split('.')[1], user_id=message['payload']['request'].get('HTTP_X_USER_ID'), project_id=message['payload']['request'].get('HTTP_X_PROJECT_ID'), resource_id=message['payload']['request'].get( 'HTTP_X_SERVICE_NAME'), message=message) class HTTPResponse(HTTPRequest): event_types = ['http.response'] ceilometer-6.0.0/ceilometer/transformer/0000775000567000056710000000000012701406364021503 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/transformer/arithmetic.py0000664000567000056710000001355212701406224024207 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import keyword import math import re from oslo_log import log import six from ceilometer.i18n import _ from ceilometer import sample from ceilometer import transformer LOG = log.getLogger(__name__) class ArithmeticTransformer(transformer.TransformerBase): """Multi meter arithmetic transformer. Transformer that performs arithmetic operations over one or more meters and/or their metadata. """ grouping_keys = ['resource_id'] meter_name_re = re.compile(r'\$\(([\w\.\-]+)\)') def __init__(self, target=None, **kwargs): super(ArithmeticTransformer, self).__init__(**kwargs) target = target or {} self.target = target self.expr = target.get('expr', '') self.expr_escaped, self.escaped_names = self.parse_expr(self.expr) self.required_meters = list(self.escaped_names.values()) self.misconfigured = len(self.required_meters) == 0 if not self.misconfigured: self.reference_meter = self.required_meters[0] # convert to set for more efficient contains operation self.required_meters = set(self.required_meters) self.cache = collections.defaultdict(dict) self.latest_timestamp = None else: LOG.warning(_('Arithmetic transformer must use at least one' ' meter in expression \'%s\''), self.expr) def _update_cache(self, _sample): """Update the cache with the latest sample.""" escaped_name = self.escaped_names.get(_sample.name, '') if escaped_name not in self.required_meters: return self.cache[_sample.resource_id][escaped_name] = _sample def _check_requirements(self, resource_id): """Check if all the required meters are available in the cache.""" return len(self.cache[resource_id]) == len(self.required_meters) def _calculate(self, resource_id): """Evaluate the expression and return a new sample if successful.""" ns_dict = dict((m, s.as_dict()) for m, s in six.iteritems(self.cache[resource_id])) ns = transformer.Namespace(ns_dict) try: new_volume = eval(self.expr_escaped, {}, ns) if math.isnan(new_volume): raise ArithmeticError(_('Expression evaluated to ' 'a NaN value!')) reference_sample = self.cache[resource_id][self.reference_meter] return sample.Sample( name=self.target.get('name', reference_sample.name), unit=self.target.get('unit', reference_sample.unit), type=self.target.get('type', reference_sample.type), volume=float(new_volume), user_id=reference_sample.user_id, project_id=reference_sample.project_id, resource_id=reference_sample.resource_id, timestamp=self.latest_timestamp, resource_metadata=reference_sample.resource_metadata ) except Exception as e: LOG.warning(_('Unable to evaluate expression %(expr)s: %(exc)s'), {'expr': self.expr, 'exc': e}) def handle_sample(self, context, _sample): self._update_cache(_sample) self.latest_timestamp = _sample.timestamp def flush(self, context): new_samples = [] cache_clean_list = [] if not self.misconfigured: for resource_id in self.cache: if self._check_requirements(resource_id): new_samples.append(self._calculate(resource_id)) cache_clean_list.append(resource_id) for res_id in cache_clean_list: self.cache.pop(res_id) return new_samples @classmethod def parse_expr(cls, expr): """Transforms meter names in the expression into valid identifiers. :param expr: unescaped expression :return: A tuple of the escaped expression and a dict representing the translation of meter names into Python identifiers """ class Replacer(object): """Replaces matched meter names with escaped names. If the meter name is not followed by parameter access in the expression, it defaults to accessing the 'volume' parameter. """ def __init__(self, original_expr): self.original_expr = original_expr self.escaped_map = {} def __call__(self, match): meter_name = match.group(1) escaped_name = self.escape(meter_name) self.escaped_map[meter_name] = escaped_name if (match.end(0) == len(self.original_expr) or self.original_expr[match.end(0)] != '.'): escaped_name += '.volume' return escaped_name @staticmethod def escape(name): has_dot = '.' in name if has_dot: name = name.replace('.', '_') if has_dot or name.endswith('ESC') or name in keyword.kwlist: name = "_" + name + '_ESC' return name replacer = Replacer(expr) expr = re.sub(cls.meter_name_re, replacer, expr) return expr, replacer.escaped_map ceilometer-6.0.0/ceilometer/transformer/__init__.py0000664000567000056710000000466512701406224023622 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import six @six.add_metaclass(abc.ABCMeta) class TransformerBase(object): """Base class for plugins that transform the sample.""" def __init__(self, **kwargs): """Setup transformer. Each time a transformed is involved in a pipeline, a new transformer instance is created and chained into the pipeline. i.e. transformer instance is per pipeline. This helps if transformer need keep some cache and per-pipeline information. :param kwargs: The parameters that are defined in pipeline config file. """ super(TransformerBase, self).__init__() @abc.abstractmethod def handle_sample(self, context, sample): """Transform a sample. :param context: Passed from the data collector. :param sample: A sample. """ @abc.abstractproperty def grouping_keys(self): """Keys used to group transformer.""" def flush(self, context): """Flush samples cached previously. :param context: Passed from the data collector. """ return [] class Namespace(object): """Encapsulates the namespace. Encapsulation is done by wrapping the evaluation of the configured rule. This allows nested dicts to be accessed in the attribute style, and missing attributes to yield false when used in a boolean expression. """ def __init__(self, seed): self.__dict__ = collections.defaultdict(lambda: Namespace({})) self.__dict__.update(seed) for k, v in six.iteritems(self.__dict__): if isinstance(v, dict): self.__dict__[k] = Namespace(v) def __getattr__(self, attr): return self.__dict__[attr] def __getitem__(self, key): return self.__dict__[key] def __nonzero__(self): return len(self.__dict__) > 0 __bool__ = __nonzero__ ceilometer-6.0.0/ceilometer/transformer/accumulator.py0000664000567000056710000000247412701406224024376 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer import transformer class TransformerAccumulator(transformer.TransformerBase): """Transformer that accumulates samples until a threshold. And then flushes them out into the wild. """ grouping_keys = ['resource_id'] def __init__(self, size=1, **kwargs): if size >= 1: self.samples = [] self.size = size super(TransformerAccumulator, self).__init__(**kwargs) def handle_sample(self, context, sample): if self.size >= 1: self.samples.append(sample) else: return sample def flush(self, context): if len(self.samples) >= self.size: x = self.samples self.samples = [] return x return [] ceilometer-6.0.0/ceilometer/transformer/conversions.py0000664000567000056710000003143112701406224024422 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import re from oslo_log import log from oslo_utils import timeutils import six from ceilometer.i18n import _, _LW from ceilometer import sample from ceilometer import transformer LOG = log.getLogger(__name__) class BaseConversionTransformer(transformer.TransformerBase): """Transformer to derive conversion.""" grouping_keys = ['resource_id'] def __init__(self, source=None, target=None, **kwargs): """Initialize transformer with configured parameters. :param source: dict containing source sample unit :param target: dict containing target sample name, type, unit and scaling factor (a missing value connotes no change) """ source = source or {} target = target or {} self.source = source self.target = target super(BaseConversionTransformer, self).__init__(**kwargs) def _map(self, s, attr): """Apply the name or unit mapping if configured.""" mapped = None from_ = self.source.get('map_from') to_ = self.target.get('map_to') if from_ and to_: if from_.get(attr) and to_.get(attr): try: mapped = re.sub(from_[attr], to_[attr], getattr(s, attr)) except Exception: pass return mapped or self.target.get(attr, getattr(s, attr)) class DeltaTransformer(BaseConversionTransformer): """Transformer based on the delta of a sample volume.""" def __init__(self, target=None, growth_only=False, **kwargs): """Initialize transformer with configured parameters. :param growth_only: capture only positive deltas """ super(DeltaTransformer, self).__init__(target=target, **kwargs) self.growth_only = growth_only self.cache = {} def handle_sample(self, context, s): """Handle a sample, converting if necessary.""" key = s.name + s.resource_id prev = self.cache.get(key) timestamp = timeutils.parse_isotime(s.timestamp) self.cache[key] = (s.volume, timestamp) if prev: prev_volume = prev[0] prev_timestamp = prev[1] time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) # disallow violations of the arrow of time if time_delta < 0: LOG.warning(_LW('Dropping out of time order sample: %s'), (s,)) # Reset the cache to the newer sample. self.cache[key] = prev return None volume_delta = s.volume - prev_volume if self.growth_only and volume_delta < 0: LOG.warning(_LW('Negative delta detected, dropping value')) s = None else: s = self._convert(s, volume_delta) LOG.debug('Converted to: %s', s) else: LOG.warning(_LW('Dropping sample with no predecessor: %s'), (s,)) s = None return s def _convert(self, s, delta): """Transform the appropriate sample fields.""" return sample.Sample( name=self._map(s, 'name'), unit=s.unit, type=sample.TYPE_DELTA, volume=delta, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp, resource_metadata=s.resource_metadata ) class ScalingTransformer(BaseConversionTransformer): """Transformer to apply a scaling conversion.""" def __init__(self, source=None, target=None, **kwargs): """Initialize transformer with configured parameters. :param source: dict containing source sample unit :param target: dict containing target sample name, type, unit and scaling factor (a missing value connotes no change) """ super(ScalingTransformer, self).__init__(source=source, target=target, **kwargs) self.scale = self.target.get('scale') LOG.debug('scaling conversion transformer with source:' ' %(source)s target: %(target)s:', {'source': self.source, 'target': self.target}) def _scale(self, s): """Apply the scaling factor. Either a straight multiplicative factor or else a string to be eval'd. """ ns = transformer.Namespace(s.as_dict()) scale = self.scale return ((eval(scale, {}, ns) if isinstance(scale, six.string_types) else s.volume * scale) if scale else s.volume) def _convert(self, s, growth=1): """Transform the appropriate sample fields.""" return sample.Sample( name=self._map(s, 'name'), unit=self._map(s, 'unit'), type=self.target.get('type', s.type), volume=self._scale(s) * growth, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp, resource_metadata=s.resource_metadata ) def handle_sample(self, context, s): """Handle a sample, converting if necessary.""" LOG.debug('handling sample %s', s) if self.source.get('unit', s.unit) == s.unit: s = self._convert(s) LOG.debug('converted to: %s', s) return s class RateOfChangeTransformer(ScalingTransformer): """Transformer based on the rate of change of a sample volume. For example taking the current and previous volumes of a cumulative sample and producing a gauge value based on the proportion of some maximum used. """ def __init__(self, **kwargs): """Initialize transformer with configured parameters.""" super(RateOfChangeTransformer, self).__init__(**kwargs) self.cache = {} self.scale = self.scale or '1' def handle_sample(self, context, s): """Handle a sample, converting if necessary.""" LOG.debug('handling sample %s', s) key = s.name + s.resource_id prev = self.cache.get(key) timestamp = timeutils.parse_isotime(s.timestamp) self.cache[key] = (s.volume, timestamp) if prev: prev_volume = prev[0] prev_timestamp = prev[1] time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) # disallow violations of the arrow of time if time_delta < 0: LOG.warning(_('dropping out of time order sample: %s'), (s,)) # Reset the cache to the newer sample. self.cache[key] = prev return None # we only allow negative volume deltas for noncumulative # samples, whereas for cumulative we assume that a reset has # occurred in the interim so that the current volume gives a # lower bound on growth volume_delta = (s.volume - prev_volume if (prev_volume <= s.volume or s.type != sample.TYPE_CUMULATIVE) else s.volume) rate_of_change = ((1.0 * volume_delta / time_delta) if time_delta else 0.0) s = self._convert(s, rate_of_change) LOG.debug('converted to: %s', s) else: LOG.warning(_('dropping sample with no predecessor: %s'), (s,)) s = None return s class AggregatorTransformer(ScalingTransformer): """Transformer that aggregates samples. Aggregation goes until a threshold or/and a retention_time, and then flushes them out into the wild. Example: To aggregate sample by resource_metadata and keep the resource_metadata of the latest received sample; AggregatorTransformer(retention_time=60, resource_metadata='last') To aggregate sample by user_id and resource_metadata and keep the user_id of the first received sample and drop the resource_metadata. AggregatorTransformer(size=15, user_id='first', resource_metadata='drop') To keep the timestamp of the last received sample rather than the first: AggregatorTransformer(timestamp="last") """ def __init__(self, size=1, retention_time=None, project_id=None, user_id=None, resource_metadata="last", timestamp="first", **kwargs): super(AggregatorTransformer, self).__init__(**kwargs) self.samples = {} self.counts = collections.defaultdict(int) self.size = int(size) if size else None self.retention_time = float(retention_time) if retention_time else None if not (self.size or self.retention_time): self.size = 1 if timestamp in ["first", "last"]: self.timestamp = timestamp else: self.timestamp = "first" self.initial_timestamp = None self.aggregated_samples = 0 self.key_attributes = [] self.merged_attribute_policy = {} self._init_attribute('project_id', project_id) self._init_attribute('user_id', user_id) self._init_attribute('resource_metadata', resource_metadata, is_droppable=True, mandatory=True) def _init_attribute(self, name, value, is_droppable=False, mandatory=False): drop = ['drop'] if is_droppable else [] if value or mandatory: if value not in ['last', 'first'] + drop: LOG.warning('%s is unknown (%s), using last' % (name, value)) value = 'last' self.merged_attribute_policy[name] = value else: self.key_attributes.append(name) def _get_unique_key(self, s): # NOTE(arezmerita): in samples generated by ceilometer middleware, # when accessing without authentication publicly readable/writable # swift containers, the project_id and the user_id are missing. # They will be replaced by for unique key construction. keys = ['' if getattr(s, f) is None else getattr(s, f) for f in self.key_attributes] non_aggregated_keys = "-".join(keys) # NOTE(sileht): it assumes, a meter always have the same unit/type return "%s-%s-%s" % (s.name, s.resource_id, non_aggregated_keys) def handle_sample(self, context, sample_): if not self.initial_timestamp: self.initial_timestamp = timeutils.parse_isotime(sample_.timestamp) self.aggregated_samples += 1 key = self._get_unique_key(sample_) self.counts[key] += 1 if key not in self.samples: self.samples[key] = self._convert(sample_) if self.merged_attribute_policy[ 'resource_metadata'] == 'drop': self.samples[key].resource_metadata = {} else: if self.timestamp == "last": self.samples[key].timestamp = sample_.timestamp if sample_.type == sample.TYPE_CUMULATIVE: self.samples[key].volume = self._scale(sample_) else: self.samples[key].volume += self._scale(sample_) for field in self.merged_attribute_policy: if self.merged_attribute_policy[field] == 'last': setattr(self.samples[key], field, getattr(sample_, field)) def flush(self, context): if not self.initial_timestamp: return [] expired = (self.retention_time and timeutils.is_older_than(self.initial_timestamp, self.retention_time)) full = self.size and self.aggregated_samples >= self.size if full or expired: x = list(self.samples.values()) # gauge aggregates need to be averages for s in x: if s.type == sample.TYPE_GAUGE: key = self._get_unique_key(s) s.volume /= self.counts[key] self.samples.clear() self.counts.clear() self.aggregated_samples = 0 self.initial_timestamp = None return x return [] ceilometer-6.0.0/ceilometer/hardware/0000775000567000056710000000000012701406364020736 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/hardware/pollsters/0000775000567000056710000000000012701406364022765 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/hardware/pollsters/data/0000775000567000056710000000000012701406364023676 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/hardware/pollsters/data/snmp.yaml0000664000567000056710000001176512701406223025543 0ustar jenkinsjenkins00000000000000--- metric: # cpu - name: hardware.cpu.load.1min unit: process type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.10.1.3.1" type: "lambda x: float(str(x))" - name: hardware.cpu.load.5min unit: process type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.10.1.3.2" type: "lambda x: float(str(x))" - name: hardware.cpu.load.15min unit: process type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.10.1.3.3" type: "lambda x: float(str(x))" - name: hardware.cpu.util unit: "%" type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.9.0" type: "int" # disk - name: hardware.disk.size.total unit: KB type: gauge snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.4.1.2021.9.1.6" type: "int" metadata: &disk_metadata path: oid: "1.3.6.1.4.1.2021.9.1.2" type: "str" device: oid: "1.3.6.1.4.1.2021.9.1.3" type: "str" post_op: "_post_op_disk" - name: hardware.disk.size.used unit: KB type: gauge snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.4.1.2021.9.1.8" type: "int" metadata: *disk_metadata post_op: "_post_op_disk" # memory - name: hardware.memory.total unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.5.0" type: "int" - name: hardware.memory.used unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.6.0" type: "int" post_op: "_post_op_memory_avail_to_used" - name: hardware.memory.swap.total unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.3.0" type: "int" - name: hardware.memory.swap.avail unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.4.0" type: "int" - name: hardware.memory.buffer unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.14.0" type: "int" - name: hardware.memory.cached unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.15.0" type: "int" # network interface - name: hardware.network.incoming.bytes unit: B type: cumulative snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.2.1.2.2.1.10" type: "int" metadata: &net_metadata name: oid: "1.3.6.1.2.1.2.2.1.2" type: "str" speed: oid: "1.3.6.1.2.1.2.2.1.5" type: "lambda x: int(x) / 8" mac: oid: "1.3.6.1.2.1.2.2.1.6" type: "lambda x: x.prettyPrint().replace('0x', '')" post_op: "_post_op_net" - name: hardware.network.outgoing.bytes unit: B type: cumulative snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.2.1.2.2.1.16" type: "int" metadata: *net_metadata post_op: "_post_op_net" - name: hardware.network.outgoing.errors unit: packet type: cumulative snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.2.1.2.2.1.20" type: "int" metadata: *net_metadata post_op: "_post_op_net" #network aggregate - name: hardware.network.ip.outgoing.datagrams unit: datagrams type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.2.1.4.10.0" type: "int" - name: hardware.network.ip.incoming.datagrams unit: datagrams type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.2.1.4.3.0" type: "int" #system stats - name: hardware.system_stats.cpu.idle unit: "%" type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.11.0" type: "int" - name: hardware.system_stats.io.outgoing.blocks unit: blocks type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.57.0" type: "int" - name: hardware.system_stats.io.incoming.blocks unit: blocks type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.58.0" type: "int" ceilometer-6.0.0/ceilometer/hardware/pollsters/generic.py0000664000567000056710000002127712701406223024756 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import pkg_resources from oslo_config import cfg from oslo_log import log from oslo_utils import netutils import six from ceilometer.agent import plugin_base from ceilometer import declarative from ceilometer.hardware import inspector as insloader from ceilometer.hardware.pollsters import util from ceilometer.i18n import _LE, _LW from ceilometer import sample OPTS = [ cfg.StrOpt('meter_definitions_file', default="snmp.yaml", help="Configuration file for defining hardware snmp meters." ), ] cfg.CONF.register_opts(OPTS, group='hardware') LOG = log.getLogger(__name__) class MeterDefinitionException(Exception): def __init__(self, message, definition_cfg): super(MeterDefinitionException, self).__init__(message) self.message = message self.definition_cfg = definition_cfg def __str__(self): return '%s %s: %s' % (self.__class__.__name__, self.definition_cfg, self.message) class MeterDefinition(object): required_fields = ['name', 'unit', 'type'] def __init__(self, definition_cfg): self.cfg = definition_cfg for fname, fval in self.cfg.items(): if (isinstance(fname, six.string_types) and (fname in self.required_fields or fname.endswith('_inspector'))): setattr(self, fname, fval) else: LOG.warning(_LW("Ignore unrecognized field %s"), fname) for fname in self.required_fields: if not getattr(self, fname, None): raise MeterDefinitionException( _LE("Missing field %s") % fname, self.cfg) if self.type not in sample.TYPES: raise MeterDefinitionException( _LE("Unrecognized type value %s") % self.type, self.cfg) class GenericHardwareDeclarativePollster(plugin_base.PollsterBase): CACHE_KEY = 'hardware.generic' mapping = None def __init__(self): super(GenericHardwareDeclarativePollster, self).__init__() self.inspectors = {} def _update_meter_definition(self, definition): self.meter_definition = definition self.cached_inspector_params = {} @property def default_discovery(self): return 'tripleo_overcloud_nodes' @staticmethod def _parse_resource(res): """Parse resource from discovery. Either URL can be given or dict. Dict has to contain at least keys 'resource_id' and 'resource_url', all the dict keys will be stored as metadata. :param res: URL or dict containing all resource info. :return parsed_url, resource_id, metadata: Returns parsed URL used for SNMP query, unique identifier of the resource and metadata of the resource. """ parsed_url, resource_id, metadata = (None, None, None) if isinstance(res, dict): if 'resource_url' not in res or 'resource_id' not in res: LOG.error(_LE('Passed resource dict must contain keys ' 'resource_id and resource_url.')) else: metadata = res parsed_url = netutils.urlsplit(res['resource_url']) resource_id = res['resource_id'] else: metadata = {} parsed_url = netutils.urlsplit(res) resource_id = res return parsed_url, resource_id, metadata def _get_inspector(self, parsed_url): if parsed_url.scheme not in self.inspectors: try: driver = insloader.get_inspector(parsed_url) self.inspectors[parsed_url.scheme] = driver except Exception as err: LOG.exception(_LE("Cannot load inspector %(name)s: %(err)s"), dict(name=parsed_url.scheme, err=err)) raise err return self.inspectors[parsed_url.scheme] def get_samples(self, manager, cache, resources=None): """Return an iterable of Sample instances from polling the resources. :param manager: The service manager invoking the plugin :param cache: A dictionary for passing data between plugins :param resources: end point to poll data from """ resources = resources or [] h_cache = cache.setdefault(self.CACHE_KEY, {}) sample_iters = [] # Get the meter identifiers to poll identifier = self.meter_definition.name for resource in resources: parsed_url, res, extra_metadata = self._parse_resource(resource) if parsed_url is None: LOG.error(_LE("Skip invalid resource %s"), resource) continue ins = self._get_inspector(parsed_url) try: # Call hardware inspector to poll for the data i_cache = h_cache.setdefault(res, {}) # Prepare inspector parameters and cache it for performance param_key = parsed_url.scheme + '.' + identifier inspector_param = self.cached_inspector_params.get(param_key) if not inspector_param: param = getattr(self.meter_definition, parsed_url.scheme + '_inspector', {}) inspector_param = ins.prepare_params(param) self.cached_inspector_params[param_key] = inspector_param if identifier not in i_cache: i_cache[identifier] = list(ins.inspect_generic( host=parsed_url, cache=i_cache, extra_metadata=extra_metadata, param=inspector_param)) # Generate samples if i_cache[identifier]: sample_iters.append(self.generate_samples( parsed_url, i_cache[identifier])) except Exception as err: LOG.exception(_LE('inspector call failed for %(ident)s ' 'host %(host)s: %(err)s'), dict(ident=identifier, host=parsed_url.hostname, err=err)) return itertools.chain(*sample_iters) def generate_samples(self, host_url, data): """Generate a list of Sample from the data returned by inspector :param host_url: host url of the endpoint :param data: list of data returned by the corresponding inspector """ samples = [] definition = self.meter_definition for (value, metadata, extra) in data: s = util.make_sample_from_host(host_url, name=definition.name, sample_type=definition.type, unit=definition.unit, volume=value, res_metadata=metadata, extra=extra, name_prefix=None) samples.append(s) return samples @classmethod def build_pollsters(cls): if not cls.mapping: definition_cfg = declarative.load_definitions( {}, cfg.CONF.hardware.meter_definitions_file, pkg_resources.resource_filename(__name__, "data/snmp.yaml")) cls.mapping = load_definition(definition_cfg) pollsters = [] for name in cls.mapping: pollster = cls() pollster._update_meter_definition(cls.mapping[name]) pollsters.append((name, pollster)) return pollsters def load_definition(config_def): mappings = {} for meter_def in config_def.get('metric', []): try: meter = MeterDefinition(meter_def) mappings[meter.name] = meter except MeterDefinitionException as me: errmsg = (_LE("Error loading meter definition : %(err)s") % dict(err=me.message)) LOG.error(errmsg) return mappings ceilometer-6.0.0/ceilometer/hardware/pollsters/__init__.py0000664000567000056710000000000012701406223025056 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/hardware/pollsters/util.py0000664000567000056710000000420312701406223024305 0ustar jenkinsjenkins00000000000000# # Copyright 2013 ZHAW SoE # Copyright 2014 Intel Corp. # # Authors: Lucas Graf # Toni Zehnder # Lianhao Lu # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_utils import timeutils from six.moves.urllib import parse as urlparse from ceilometer import sample def get_metadata_from_host(host_url): return {'resource_url': urlparse.urlunsplit(host_url)} def make_resource_metadata(res_metadata=None, host_url=None): resource_metadata = dict() if res_metadata is not None: metadata = copy.copy(res_metadata) resource_metadata.update(metadata) resource_metadata.update(get_metadata_from_host(host_url)) return resource_metadata def make_sample_from_host(host_url, name, sample_type, unit, volume, project_id=None, user_id=None, resource_id=None, res_metadata=None, extra=None, name_prefix='hardware'): extra = extra or {} resource_metadata = make_resource_metadata(res_metadata, host_url) resource_metadata.update(extra) res_id = resource_id or extra.get('resource_id') or host_url.hostname if name_prefix: name = name_prefix + '.' + name return sample.Sample( name=name, type=sample_type, unit=unit, volume=volume, user_id=user_id or extra.get('user_id'), project_id=project_id or extra.get('project_id'), resource_id=res_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=resource_metadata, source='hardware', ) ceilometer-6.0.0/ceilometer/hardware/__init__.py0000664000567000056710000000000012701406223023027 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/hardware/inspector/0000775000567000056710000000000012701406364022744 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/hardware/inspector/snmp.py0000664000567000056710000003020412701406223024264 0ustar jenkinsjenkins00000000000000# # Copyright 2014 ZHAW SoE # Copyright 2014 Intel Corp # # Authors: Lucas Graf # Toni Zehnder # Lianhao Lu # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Inspector for collecting data over SNMP""" import copy from pysnmp.entity.rfc3413.oneliner import cmdgen import six from ceilometer.hardware.inspector import base class SNMPException(Exception): pass def parse_snmp_return(ret, is_bulk=False): """Check the return value of snmp operations :param ret: a tuple of (errorIndication, errorStatus, errorIndex, data) returned by pysnmp :param is_bulk: True if the ret value is from GetBulkRequest :return: a tuple of (err, data) err: True if error found, or False if no error found data: a string of error description if error found, or the actual return data of the snmp operation """ err = True (errIndication, errStatus, errIdx, varBinds) = ret if errIndication: data = errIndication elif errStatus: if is_bulk: varBinds = varBinds[-1] data = "%s at %s" % (errStatus.prettyPrint(), errIdx and varBinds[int(errIdx) - 1] or "?") else: err = False data = varBinds return err, data EXACT = 'type_exact' PREFIX = 'type_prefix' class SNMPInspector(base.Inspector): # Default port _port = 161 _CACHE_KEY_OID = "snmp_cached_oid" # NOTE: The following mapping has been moved to the yaml file identified # by the config options hardware.meter_definitions_file. However, we still # keep the description here for code reading purpose. """ The following mapping define how to construct (value, metadata, extra) returned by inspect_generic MAPPING = { 'identifier: { 'matching_type': EXACT or PREFIX, 'metric_oid': (oid, value_converter) 'metadata': { metadata_name1: (oid1, value_converter), metadata_name2: (oid2, value_converter), }, 'post_op': special func to modify the return data, }, } For matching_type of EXACT, each item in the above mapping will return exact one (value, metadata, extra) tuple. The value would be returned from SNMP request GetRequest for oid of 'metric_oid', the metadata dict would be constructed based on the returning from SNMP GetRequest for oids of 'metadata'. For matching_type of PREFIX, SNMP request GetBulkRequest would be sent to get values for oids of 'metric_oid' and 'metadata' of each item in the above mapping. And each item might return multiple (value, metadata, extra) tuples, e.g. Suppose we have the following mapping: MAPPING = { 'disk.size.total': { 'matching_type': PREFIX, 'metric_oid': ("1.3.6.1.4.1.2021.9.1.6", int) 'metadata': { 'device': ("1.3.6.1.4.1.2021.9.1.3", str), 'path': ("1.3.6.1.4.1.2021.9.1.2", str), }, 'post_op': None, }, and the SNMP have the following oid/value(s): { '1.3.6.1.4.1.2021.9.1.6.1': 19222656, '1.3.6.1.4.1.2021.9.1.3.1': "/dev/sda2", '1.3.6.1.4.1.2021.9.1.2.1': "/" '1.3.6.1.4.1.2021.9.1.6.2': 808112, '1.3.6.1.4.1.2021.9.1.3.2': "tmpfs", '1.3.6.1.4.1.2021.9.1.2.2': "/run", } So here we'll return 2 instances of (value, metadata, extra): (19222656, {'device': "/dev/sda2", 'path': "/"}, None) (808112, {'device': "tmpfs", 'path': "/run"}, None) The post_op is assumed to be implemented by new metric developer. It could be used to add additional special metadata(e.g. ip address), or it could be used to add information into extra dict to be returned to construct the pollster how to build final sample, e.g. extra.update('project_id': xy, 'user_id': zw) """ def __init__(self): super(SNMPInspector, self).__init__() self._cmdGen = cmdgen.CommandGenerator() def _query_oids(self, host, oids, cache, is_bulk): # send GetRequest or GetBulkRequest to get oids values and # populate the values into cache authData = self._get_auth_strategy(host) transport = cmdgen.UdpTransportTarget((host.hostname, host.port or self._port)) oid_cache = cache.setdefault(self._CACHE_KEY_OID, {}) if is_bulk: ret = self._cmdGen.bulkCmd(authData, transport, 0, 100, *oids, lookupValues=True) else: ret = self._cmdGen.getCmd(authData, transport, *oids, lookupValues=True) (error, data) = parse_snmp_return(ret, is_bulk) if error: raise SNMPException("An error occurred, oids %(oid)s, " "host %(host)s, %(err)s" % dict(oid=oids, host=host.hostname, err=data)) # save result into cache if is_bulk: for var_bind_table_row in data: for name, val in var_bind_table_row: oid_cache[str(name)] = val else: for name, val in data: oid_cache[str(name)] = val @staticmethod def find_matching_oids(oid_cache, oid, match_type, find_one=True): matched = [] if match_type == PREFIX: for key in oid_cache.keys(): if key.startswith(oid): matched.append(key) if find_one: break else: if oid in oid_cache: matched.append(oid) return matched @staticmethod def get_oid_value(oid_cache, oid_def, suffix=''): oid, converter = oid_def value = oid_cache[oid + suffix] if converter: value = converter(value) return value @classmethod def construct_metadata(cls, oid_cache, meta_defs, suffix=''): metadata = {} for key, oid_def in six.iteritems(meta_defs): metadata[key] = cls.get_oid_value(oid_cache, oid_def, suffix) return metadata @classmethod def _find_missing_oids(cls, meter_def, cache): # find oids have not been queried and cached new_oids = [] oid_cache = cache.setdefault(cls._CACHE_KEY_OID, {}) # check metric_oid if not cls.find_matching_oids(oid_cache, meter_def['metric_oid'][0], meter_def['matching_type']): new_oids.append(meter_def['metric_oid'][0]) for metadata in meter_def['metadata'].values(): if not cls.find_matching_oids(oid_cache, metadata[0], meter_def['matching_type']): new_oids.append(metadata[0]) return new_oids def inspect_generic(self, host, cache, extra_metadata, param): # the snmp definition for the corresponding meter meter_def = param # collect oids that needs to be queried oids_to_query = self._find_missing_oids(meter_def, cache) # query oids and populate into caches if oids_to_query: self._query_oids(host, oids_to_query, cache, meter_def['matching_type'] == PREFIX) # construct (value, metadata, extra) oid_cache = cache[self._CACHE_KEY_OID] # find all oids which needed to construct final sample values # for matching type of EXACT, only 1 sample would be generated # for matching type of PREFIX, multiple samples could be generated oids_for_sample_values = self.find_matching_oids( oid_cache, meter_def['metric_oid'][0], meter_def['matching_type'], False) input_extra_metadata = extra_metadata for oid in oids_for_sample_values: suffix = oid[len(meter_def['metric_oid'][0]):] value = self.get_oid_value(oid_cache, meter_def['metric_oid'], suffix) # get the metadata for this sample value metadata = self.construct_metadata(oid_cache, meter_def['metadata'], suffix) extra_metadata = copy.deepcopy(input_extra_metadata) or {} # call post_op for special cases if meter_def['post_op']: func = getattr(self, meter_def['post_op'], None) if func: value = func(host, cache, meter_def, value, metadata, extra_metadata, suffix) yield (value, metadata, extra_metadata) def _post_op_memory_avail_to_used(self, host, cache, meter_def, value, metadata, extra, suffix): _memory_total_oid = "1.3.6.1.4.1.2021.4.5.0" if _memory_total_oid not in cache[self._CACHE_KEY_OID]: self._query_oids(host, [_memory_total_oid], cache, False) value = int(cache[self._CACHE_KEY_OID][_memory_total_oid]) - value return value def _post_op_net(self, host, cache, meter_def, value, metadata, extra, suffix): # add ip address into metadata _interface_ip_oid = "1.3.6.1.2.1.4.20.1.2" oid_cache = cache.setdefault(self._CACHE_KEY_OID, {}) if not self.find_matching_oids(oid_cache, _interface_ip_oid, PREFIX): # populate the oid into cache self._query_oids(host, [_interface_ip_oid], cache, True) ip_addr = '' for k, v in six.iteritems(oid_cache): if k.startswith(_interface_ip_oid) and v == int(suffix[1:]): ip_addr = k.replace(_interface_ip_oid + ".", "") metadata.update(ip=ip_addr) # update resource_id for each nic interface self._suffix_resource_id(host, metadata, 'name', extra) return value def _post_op_disk(self, host, cache, meter_def, value, metadata, extra, suffix): self._suffix_resource_id(host, metadata, 'device', extra) return value @staticmethod def _suffix_resource_id(host, metadata, key, extra): prefix = metadata.get(key) if prefix: res_id = extra.get('resource_id') or host.hostname res_id = res_id + ".%s" % metadata.get(key) extra.update(resource_id=res_id) @staticmethod def _get_auth_strategy(host): if host.password: auth_strategy = cmdgen.UsmUserData(host.username, authKey=host.password) else: auth_strategy = cmdgen.CommunityData(host.username or 'public') return auth_strategy def prepare_params(self, param): processed = {} processed['matching_type'] = param['matching_type'] processed['metric_oid'] = (param['oid'], eval(param['type'])) processed['post_op'] = param.get('post_op', None) processed['metadata'] = {} for k, v in six.iteritems(param.get('metadata', {})): processed['metadata'][k] = (v['oid'], eval(v['type'])) return processed ceilometer-6.0.0/ceilometer/hardware/inspector/__init__.py0000664000567000056710000000171112701406223025047 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from stevedore import driver def get_inspector(parsed_url, namespace='ceilometer.hardware.inspectors'): """Get inspector driver and load it. :param parsed_url: urlparse.SplitResult object for the inspector :param namespace: Namespace to use to look for drivers. """ loaded_driver = driver.DriverManager(namespace, parsed_url.scheme) return loaded_driver.driver() ceilometer-6.0.0/ceilometer/hardware/inspector/base.py0000664000567000056710000000324312701406223024224 0ustar jenkinsjenkins00000000000000# # Copyright 2014 ZHAW SoE # # Authors: Lucas Graf # Toni Zehnder # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Inspector abstraction for read-only access to hardware components""" import abc import six @six.add_metaclass(abc.ABCMeta) class Inspector(object): @abc.abstractmethod def inspect_generic(self, host, cache, extra_metadata, param): """A generic inspect function. :param host: the target host :param cache: cache passed from the pollster :param extra_metadata: extra dict to be used as metadata :param param: a dict of inspector specific param :return: an iterator of (value, metadata, extra) :return value: the sample value :return metadata: dict to construct sample's metadata :return extra: dict of extra metadata to help constructing sample """ def prepare_params(self, param): """Parse the params to a format which the inspector itself recognizes. :param param: inspector params from meter definition file :return: a dict of param which the inspector recognized """ return {} ceilometer-6.0.0/ceilometer/hardware/discovery.py0000664000567000056710000000704112701406223023313 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ceilometer.agent import plugin_base from ceilometer.i18n import _ from ceilometer import nova_client LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('url_scheme', default='snmp://', help='URL scheme to use for hardware nodes.'), cfg.StrOpt('readonly_user_name', default='ro_snmp_user', help='SNMPd user name of all nodes running in the cloud.'), cfg.StrOpt('readonly_user_password', default='password', help='SNMPd password of all the nodes running in the cloud.', secret=True), ] cfg.CONF.register_opts(OPTS, group='hardware') class NodesDiscoveryTripleO(plugin_base.DiscoveryBase): def __init__(self): super(NodesDiscoveryTripleO, self).__init__() self.nova_cli = nova_client.Client() self.last_run = None self.instances = {} @staticmethod def _address(instance, field): return instance.addresses['ctlplane'][0].get(field) def discover(self, manager, param=None): """Discover resources to monitor. instance_get_all will return all instances if last_run is None, and will return only the instances changed since the last_run time. """ try: instances = self.nova_cli.instance_get_all(self.last_run) except Exception: # NOTE(zqfan): instance_get_all is wrapped and will log exception # when there is any error. It is no need to raise it again and # print one more time. return [] for instance in instances: if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted', 'error']: self.instances.pop(instance.id, None) else: self.instances[instance.id] = instance self.last_run = timeutils.utcnow(True).isoformat() resources = [] for instance in self.instances.values(): try: ip_address = self._address(instance, 'addr') final_address = ( cfg.CONF.hardware.url_scheme + cfg.CONF.hardware.readonly_user_name + ':' + cfg.CONF.hardware.readonly_user_password + '@' + ip_address) resource = { 'resource_id': instance.id, 'resource_url': final_address, 'mac_addr': self._address(instance, 'OS-EXT-IPS-MAC:mac_addr'), 'image_id': instance.image['id'], 'flavor_id': instance.flavor['id'] } resources.append(resource) except KeyError: LOG.error(_("Couldn't obtain IP address of " "instance %s") % instance.id) return resources ceilometer-6.0.0/ceilometer/storage/0000775000567000056710000000000012701406364020605 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/storage/impl_sqlalchemy.py0000664000567000056710000010672712701406224024352 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" from __future__ import absolute_import import datetime import hashlib import os from oslo_config import cfg from oslo_db import api from oslo_db import exception as dbexc from oslo_db.sqlalchemy import session as db_session from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import timeutils import six import sqlalchemy as sa from sqlalchemy import and_ from sqlalchemy import distinct from sqlalchemy import func from sqlalchemy.orm import aliased from sqlalchemy.sql.expression import cast import ceilometer from ceilometer.i18n import _, _LI from ceilometer import storage from ceilometer.storage import base from ceilometer.storage import models as api_models from ceilometer.storage.sqlalchemy import models from ceilometer.storage.sqlalchemy import utils as sql_utils from ceilometer import utils LOG = log.getLogger(__name__) STANDARD_AGGREGATES = dict( avg=func.avg(models.Sample.volume).label('avg'), sum=func.sum(models.Sample.volume).label('sum'), min=func.min(models.Sample.volume).label('min'), max=func.max(models.Sample.volume).label('max'), count=func.count(models.Sample.volume).label('count') ) UNPARAMETERIZED_AGGREGATES = dict( stddev=func.stddev_pop(models.Sample.volume).label('stddev') ) PARAMETERIZED_AGGREGATES = dict( validate=dict( cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id'] ), compute=dict( cardinality=lambda p: func.count( distinct(getattr(models.Resource, p)) ).label('cardinality/%s' % p) ) ) AVAILABLE_CAPABILITIES = { 'meters': {'query': {'simple': True, 'metadata': True}}, 'resources': {'query': {'simple': True, 'metadata': True}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True}, 'aggregation': {'standard': True, 'selectable': { 'max': True, 'min': True, 'sum': True, 'avg': True, 'count': True, 'stddev': True, 'cardinality': True}} }, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } def apply_metaquery_filter(session, query, metaquery): """Apply provided metaquery filter to existing query. :param session: session used for original query :param query: Query instance :param metaquery: dict with metadata to match on. """ for k, value in six.iteritems(metaquery): key = k[9:] # strip out 'metadata.' prefix try: _model = sql_utils.META_TYPE_MAP[type(value)] except KeyError: raise ceilometer.NotImplementedError( 'Query on %(key)s is of %(value)s ' 'type and is not supported' % {"key": k, "value": type(value)}) else: meta_alias = aliased(_model) on_clause = and_(models.Resource.internal_id == meta_alias.id, meta_alias.meta_key == key) # outer join is needed to support metaquery # with or operator on non existent metadata field # see: test_query_non_existing_metadata_with_result # test case. query = query.outerjoin(meta_alias, on_clause) query = query.filter(meta_alias.value == value) return query def make_query_from_filter(session, query, sample_filter, require_meter=True): """Return a query dictionary based on the settings in the filter. :param session: session used for original query :param query: Query instance :param sample_filter: SampleFilter instance :param require_meter: If true and the filter does not have a meter, raise an error. """ if sample_filter.meter: query = query.filter(models.Meter.name == sample_filter.meter) elif require_meter: raise RuntimeError('Missing required meter specifier') if sample_filter.source: query = query.filter( models.Resource.source_id == sample_filter.source) if sample_filter.start_timestamp: ts_start = sample_filter.start_timestamp if sample_filter.start_timestamp_op == 'gt': query = query.filter(models.Sample.timestamp > ts_start) else: query = query.filter(models.Sample.timestamp >= ts_start) if sample_filter.end_timestamp: ts_end = sample_filter.end_timestamp if sample_filter.end_timestamp_op == 'le': query = query.filter(models.Sample.timestamp <= ts_end) else: query = query.filter(models.Sample.timestamp < ts_end) if sample_filter.user: if sample_filter.user == 'None': sample_filter.user = None query = query.filter(models.Resource.user_id == sample_filter.user) if sample_filter.project: if sample_filter.project == 'None': sample_filter.project = None query = query.filter( models.Resource.project_id == sample_filter.project) if sample_filter.resource: query = query.filter( models.Resource.resource_id == sample_filter.resource) if sample_filter.message_id: query = query.filter( models.Sample.message_id == sample_filter.message_id) if sample_filter.metaquery: query = apply_metaquery_filter(session, query, sample_filter.metaquery) return query class Connection(base.Connection): """Put the data into a SQLAlchemy database. Tables:: - meter - meter definition - { id: meter id name: meter name type: meter type unit: meter unit } - resource - resource definition - { internal_id: resource id resource_id: resource uuid user_id: user uuid project_id: project uuid source_id: source id resource_metadata: metadata dictionary metadata_hash: metadata dictionary hash } - sample - the raw incoming data - { id: sample id meter_id: meter id (->meter.id) resource_id: resource id (->resource.internal_id) volume: sample volume timestamp: datetime recorded_at: datetime message_signature: message signature message_id: message uuid } """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def __init__(self, url): # Set max_retries to 0, since oslo.db in certain cases may attempt # to retry making the db connection retried max_retries ^ 2 times # in failure case and db reconnection has already been implemented # in storage.__init__.get_connection_from_config function options = dict(cfg.CONF.database.items()) options['max_retries'] = 0 # oslo.db doesn't support options defined by Ceilometer for opt in storage.OPTS: options.pop(opt.name, None) self._engine_facade = db_session.EngineFacade(url, **options) def upgrade(self): # NOTE(gordc): to minimise memory, only import migration when needed from oslo_db.sqlalchemy import migration path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sqlalchemy', 'migrate_repo') migration.db_sync(self._engine_facade.get_engine(), path) def clear(self): engine = self._engine_facade.get_engine() for table in reversed(models.Base.metadata.sorted_tables): engine.execute(table.delete()) engine.dispose() @staticmethod def _create_meter(conn, name, type, unit): # TODO(gordc): implement lru_cache to improve performance try: meter = models.Meter.__table__ trans = conn.begin_nested() if conn.dialect.name == 'sqlite': trans = conn.begin() with trans: meter_row = conn.execute( sa.select([meter.c.id]) .where(sa.and_(meter.c.name == name, meter.c.type == type, meter.c.unit == unit))).first() meter_id = meter_row[0] if meter_row else None if meter_id is None: result = conn.execute(meter.insert(), name=name, type=type, unit=unit) meter_id = result.inserted_primary_key[0] except dbexc.DBDuplicateEntry: # retry function to pick up duplicate committed object meter_id = Connection._create_meter(conn, name, type, unit) return meter_id @staticmethod def _create_resource(conn, res_id, user_id, project_id, source_id, rmeta): # TODO(gordc): implement lru_cache to improve performance try: res = models.Resource.__table__ m_hash = jsonutils.dumps(rmeta, sort_keys=True) if six.PY3: m_hash = m_hash.encode('utf-8') m_hash = hashlib.md5(m_hash).hexdigest() trans = conn.begin_nested() if conn.dialect.name == 'sqlite': trans = conn.begin() with trans: res_row = conn.execute( sa.select([res.c.internal_id]) .where(sa.and_(res.c.resource_id == res_id, res.c.user_id == user_id, res.c.project_id == project_id, res.c.source_id == source_id, res.c.metadata_hash == m_hash))).first() internal_id = res_row[0] if res_row else None if internal_id is None: result = conn.execute(res.insert(), resource_id=res_id, user_id=user_id, project_id=project_id, source_id=source_id, resource_metadata=rmeta, metadata_hash=m_hash) internal_id = result.inserted_primary_key[0] if rmeta and isinstance(rmeta, dict): meta_map = {} for key, v in utils.dict_to_keyval(rmeta): try: _model = sql_utils.META_TYPE_MAP[type(v)] if meta_map.get(_model) is None: meta_map[_model] = [] meta_map[_model].append( {'id': internal_id, 'meta_key': key, 'value': v}) except KeyError: LOG.warning(_("Unknown metadata type. Key " "(%s) will not be queryable."), key) for _model in meta_map.keys(): conn.execute(_model.__table__.insert(), meta_map[_model]) except dbexc.DBDuplicateEntry: # retry function to pick up duplicate committed object internal_id = Connection._create_resource( conn, res_id, user_id, project_id, source_id, rmeta) return internal_id @api.wrap_db_retry(retry_interval=cfg.CONF.database.retry_interval, max_retries=cfg.CONF.database.max_retries, retry_on_deadlock=True) def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter """ engine = self._engine_facade.get_engine() with engine.begin() as conn: # Record the raw data for the sample. m_id = self._create_meter(conn, data['counter_name'], data['counter_type'], data['counter_unit']) res_id = self._create_resource(conn, data['resource_id'], data['user_id'], data['project_id'], data['source'], data['resource_metadata']) sample = models.Sample.__table__ conn.execute(sample.insert(), meter_id=m_id, resource_id=res_id, timestamp=data['timestamp'], volume=data['counter_volume'], message_signature=data['message_signature'], message_id=data['message_id']) def clear_expired_metering_data(self, ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ # Prevent database deadlocks from occurring by # using separate transaction for each delete session = self._engine_facade.get_session() with session.begin(): end = timeutils.utcnow() - datetime.timedelta(seconds=ttl) sample_q = (session.query(models.Sample) .filter(models.Sample.timestamp < end)) rows = sample_q.delete() LOG.info(_LI("%d samples removed from database"), rows) if not cfg.CONF.sql_expire_samples_only: with session.begin(): # remove Meter definitions with no matching samples (session.query(models.Meter) .filter(~models.Meter.samples.any()) .delete(synchronize_session=False)) with session.begin(): resource_q = (session.query(models.Resource.internal_id) .filter(~models.Resource.samples.any())) # mark resource with no matching samples for delete resource_q.update({models.Resource.metadata_hash: "delete_" + cast(models.Resource.internal_id, sa.String)}, synchronize_session=False) # remove metadata of resources marked for delete for table in [models.MetaText, models.MetaBigInt, models.MetaFloat, models.MetaBool]: with session.begin(): resource_q = (session.query(models.Resource.internal_id) .filter(models.Resource.metadata_hash .like('delete_%'))) resource_subq = resource_q.subquery() (session.query(table) .filter(table.id.in_(resource_subq)) .delete(synchronize_session=False)) # remove resource marked for delete with session.begin(): resource_q = (session.query(models.Resource.internal_id) .filter(models.Resource.metadata_hash .like('delete_%'))) resource_q.delete(synchronize_session=False) LOG.info(_LI("Expired residual resource and" " meter definition data")) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of api_models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ if limit == 0: return s_filter = storage.SampleFilter(user=user, project=project, source=source, start_timestamp=start_timestamp, start_timestamp_op=start_timestamp_op, end_timestamp=end_timestamp, end_timestamp_op=end_timestamp_op, metaquery=metaquery, resource=resource) session = self._engine_facade.get_session() # get list of resource_ids has_timestamp = start_timestamp or end_timestamp # NOTE: When sql_expire_samples_only is enabled, there will be some # resources without any sample, in such case we should use inner # join on sample table to avoid wrong result. if cfg.CONF.sql_expire_samples_only or has_timestamp: res_q = session.query(distinct(models.Resource.resource_id)).join( models.Sample, models.Sample.resource_id == models.Resource.internal_id) else: res_q = session.query(distinct(models.Resource.resource_id)) res_q = make_query_from_filter(session, res_q, s_filter, require_meter=False) res_q = res_q.limit(limit) if limit else res_q for res_id in res_q.all(): # get max and min sample timestamp value min_max_q = (session.query(func.max(models.Sample.timestamp) .label('max_timestamp'), func.min(models.Sample.timestamp) .label('min_timestamp')) .join(models.Resource, models.Resource.internal_id == models.Sample.resource_id) .filter(models.Resource.resource_id == res_id[0])) min_max_q = make_query_from_filter(session, min_max_q, s_filter, require_meter=False) min_max = min_max_q.first() # get resource details for latest sample res_q = (session.query(models.Resource.resource_id, models.Resource.user_id, models.Resource.project_id, models.Resource.source_id, models.Resource.resource_metadata) .join(models.Sample, models.Sample.resource_id == models.Resource.internal_id) .filter(models.Sample.timestamp == min_max.max_timestamp) .filter(models.Resource.resource_id == res_id[0]) .order_by(models.Sample.id.desc()).limit(1)) res = res_q.first() yield api_models.Resource( resource_id=res.resource_id, project_id=res.project_id, first_sample_timestamp=min_max.min_timestamp, last_sample_timestamp=min_max.max_timestamp, source=res.source_id, user_id=res.user_id, metadata=res.resource_metadata ) def get_meters(self, user=None, project=None, resource=None, source=None, metaquery=None, limit=None, unique=False): """Return an iterable of api_models.Meter instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional ID of the resource. :param source: Optional source filter. :param metaquery: Optional dict with metadata to match on. :param limit: Maximum number of results to return. :param unique: If set to true, return only unique meter information. """ if limit == 0: return s_filter = storage.SampleFilter(user=user, project=project, source=source, metaquery=metaquery, resource=resource) # NOTE(gordc): get latest sample of each meter/resource. we do not # filter here as we want to filter only on latest record. session = self._engine_facade.get_session() subq = session.query(func.max(models.Sample.id).label('id')).join( models.Resource, models.Resource.internal_id == models.Sample.resource_id) if unique: subq = subq.group_by(models.Sample.meter_id) else: subq = subq.group_by(models.Sample.meter_id, models.Resource.resource_id) if resource: subq = subq.filter(models.Resource.resource_id == resource) subq = subq.subquery() # get meter details for samples. query_sample = (session.query(models.Sample.meter_id, models.Meter.name, models.Meter.type, models.Meter.unit, models.Resource.resource_id, models.Resource.project_id, models.Resource.source_id, models.Resource.user_id).join( subq, subq.c.id == models.Sample.id) .join(models.Meter, models.Meter.id == models.Sample.meter_id) .join(models.Resource, models.Resource.internal_id == models.Sample.resource_id)) query_sample = make_query_from_filter(session, query_sample, s_filter, require_meter=False) query_sample = query_sample.limit(limit) if limit else query_sample if unique: for row in query_sample.all(): yield api_models.Meter( name=row.name, type=row.type, unit=row.unit, resource_id=None, project_id=None, source=None, user_id=None) else: for row in query_sample.all(): yield api_models.Meter( name=row.name, type=row.type, unit=row.unit, resource_id=row.resource_id, project_id=row.project_id, source=row.source_id, user_id=row.user_id) @staticmethod def _retrieve_samples(query): samples = query.all() for s in samples: # Remove the id generated by the database when # the sample was inserted. It is an implementation # detail that should not leak outside of the driver. yield api_models.Sample( source=s.source_id, counter_name=s.counter_name, counter_type=s.counter_type, counter_unit=s.counter_unit, counter_volume=s.counter_volume, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp, recorded_at=s.recorded_at, resource_metadata=s.resource_metadata, message_id=s.message_id, message_signature=s.message_signature, ) def get_samples(self, sample_filter, limit=None): """Return an iterable of api_models.Samples. :param sample_filter: Filter. :param limit: Maximum number of results to return. """ if limit == 0: return [] session = self._engine_facade.get_session() query = session.query(models.Sample.timestamp, models.Sample.recorded_at, models.Sample.message_id, models.Sample.message_signature, models.Sample.volume.label('counter_volume'), models.Meter.name.label('counter_name'), models.Meter.type.label('counter_type'), models.Meter.unit.label('counter_unit'), models.Resource.source_id, models.Resource.user_id, models.Resource.project_id, models.Resource.resource_metadata, models.Resource.resource_id).join( models.Meter, models.Meter.id == models.Sample.meter_id).join( models.Resource, models.Resource.internal_id == models.Sample.resource_id).order_by( models.Sample.timestamp.desc()) query = make_query_from_filter(session, query, sample_filter, require_meter=False) if limit: query = query.limit(limit) return self._retrieve_samples(query) def query_samples(self, filter_expr=None, orderby=None, limit=None): if limit == 0: return [] session = self._engine_facade.get_session() engine = self._engine_facade.get_engine() query = session.query(models.Sample.timestamp, models.Sample.recorded_at, models.Sample.message_id, models.Sample.message_signature, models.Sample.volume.label('counter_volume'), models.Meter.name.label('counter_name'), models.Meter.type.label('counter_type'), models.Meter.unit.label('counter_unit'), models.Resource.source_id, models.Resource.user_id, models.Resource.project_id, models.Resource.resource_metadata, models.Resource.resource_id).join( models.Meter, models.Meter.id == models.Sample.meter_id).join( models.Resource, models.Resource.internal_id == models.Sample.resource_id) transformer = sql_utils.QueryTransformer(models.FullSample, query, dialect=engine.dialect.name) if filter_expr is not None: transformer.apply_filter(filter_expr) transformer.apply_options(orderby, limit) return self._retrieve_samples(transformer.get_query()) @staticmethod def _get_aggregate_functions(aggregate): if not aggregate: return [f for f in STANDARD_AGGREGATES.values()] functions = [] for a in aggregate: if a.func in STANDARD_AGGREGATES: functions.append(STANDARD_AGGREGATES[a.func]) elif a.func in UNPARAMETERIZED_AGGREGATES: functions.append(UNPARAMETERIZED_AGGREGATES[a.func]) elif a.func in PARAMETERIZED_AGGREGATES['compute']: validate = PARAMETERIZED_AGGREGATES['validate'].get(a.func) if not (validate and validate(a.param)): raise storage.StorageBadAggregate('Bad aggregate: %s.%s' % (a.func, a.param)) compute = PARAMETERIZED_AGGREGATES['compute'][a.func] functions.append(compute(a.param)) else: raise ceilometer.NotImplementedError( 'Selectable aggregate function %s' ' is not supported' % a.func) return functions def _make_stats_query(self, sample_filter, groupby, aggregate): select = [ func.min(models.Sample.timestamp).label('tsmin'), func.max(models.Sample.timestamp).label('tsmax'), models.Meter.unit ] select.extend(self._get_aggregate_functions(aggregate)) session = self._engine_facade.get_session() if groupby: group_attributes = [] for g in groupby: if g != 'resource_metadata.instance_type': group_attributes.append(getattr(models.Resource, g)) else: group_attributes.append( getattr(models.MetaText, 'value') .label('resource_metadata.instance_type')) select.extend(group_attributes) query = ( session.query(*select) .join(models.Meter, models.Meter.id == models.Sample.meter_id) .join(models.Resource, models.Resource.internal_id == models.Sample.resource_id) .group_by(models.Meter.unit)) if groupby: for g in groupby: if g == 'resource_metadata.instance_type': query = query.join( models.MetaText, models.Resource.internal_id == models.MetaText.id) query = query.filter( models.MetaText.meta_key == 'instance_type') query = query.group_by(*group_attributes) return make_query_from_filter(session, query, sample_filter) @staticmethod def _stats_result_aggregates(result, aggregate): stats_args = {} if isinstance(result.count, six.integer_types): stats_args['count'] = result.count for attr in ['min', 'max', 'sum', 'avg']: if hasattr(result, attr): stats_args[attr] = getattr(result, attr) if aggregate: stats_args['aggregate'] = {} for a in aggregate: key = '%s%s' % (a.func, '/%s' % a.param if a.param else '') stats_args['aggregate'][key] = getattr(result, key) return stats_args @staticmethod def _stats_result_to_model(result, period, period_start, period_end, groupby, aggregate): stats_args = Connection._stats_result_aggregates(result, aggregate) stats_args['unit'] = result.unit duration = (timeutils.delta_seconds(result.tsmin, result.tsmax) if result.tsmin is not None and result.tsmax is not None else None) stats_args['duration'] = duration stats_args['duration_start'] = result.tsmin stats_args['duration_end'] = result.tsmax stats_args['period'] = period stats_args['period_start'] = period_start stats_args['period_end'] = period_end stats_args['groupby'] = (dict( (g, getattr(result, g)) for g in groupby) if groupby else None) return api_models.Statistics(**stats_args) def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of api_models.Statistics instances. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. """ if groupby: for group in groupby: if group not in ['user_id', 'project_id', 'resource_id', 'resource_metadata.instance_type']: raise ceilometer.NotImplementedError('Unable to group by ' 'these fields') if not period: for res in self._make_stats_query(sample_filter, groupby, aggregate): if res.count: yield self._stats_result_to_model(res, 0, res.tsmin, res.tsmax, groupby, aggregate) return if not (sample_filter.start_timestamp and sample_filter.end_timestamp): res = self._make_stats_query(sample_filter, None, aggregate).first() if not res: # NOTE(liusheng):The 'res' may be NoneType, because no # sample has found with sample filter(s). return query = self._make_stats_query(sample_filter, groupby, aggregate) # HACK(jd) This is an awful method to compute stats by period, but # since we're trying to be SQL agnostic we have to write portable # code, so here it is, admire! We're going to do one request to get # stats by period. We would like to use GROUP BY, but there's no # portable way to manipulate timestamp in SQL, so we can't. for period_start, period_end in base.iter_period( sample_filter.start_timestamp or res.tsmin, sample_filter.end_timestamp or res.tsmax, period): q = query.filter(models.Sample.timestamp >= period_start) q = q.filter(models.Sample.timestamp < period_end) for r in q.all(): if r.count: yield self._stats_result_to_model( result=r, period=int(timeutils.delta_seconds(period_start, period_end)), period_start=period_start, period_end=period_end, groupby=groupby, aggregate=aggregate ) ceilometer-6.0.0/ceilometer/storage/__init__.py0000664000567000056710000002060312701406223022711 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend management """ from oslo_config import cfg from oslo_db import options as db_options from oslo_log import log import retrying import six import six.moves.urllib.parse as urlparse from stevedore import driver from ceilometer import utils LOG = log.getLogger(__name__) OLD_OPTS = [ cfg.StrOpt('database_connection', secret=True, help='DEPRECATED - Database connection string.', ), ] cfg.CONF.register_opts(OLD_OPTS) OPTS = [ cfg.IntOpt('metering_time_to_live', default=-1, help="Number of seconds that samples are kept " "in the database for (<= 0 means forever).", deprecated_opts=[cfg.DeprecatedOpt('time_to_live', 'database')]), cfg.IntOpt('event_time_to_live', default=-1, help=("Number of seconds that events are kept " "in the database for (<= 0 means forever).")), cfg.StrOpt('metering_connection', secret=True, help='The connection string used to connect to the metering ' 'database. (if unset, connection is used)'), cfg.StrOpt('event_connection', secret=True, help='The connection string used to connect to the event ' 'database. (if unset, connection is used)'), cfg.IntOpt('db2nosql_resource_id_maxlen', default=512, help="The max length of resources id in DB2 nosql, " "the value should be larger than len(hostname) * 2 " "as compute node's resource id is _."), ] cfg.CONF.register_opts(OPTS, group='database') CLI_OPTS = [ cfg.BoolOpt('sql-expire-samples-only', default=False, help="Indicates if expirer expires only samples. If set true," " expired samples will be deleted, but residual" " resource and meter definition data will remain.", ), ] cfg.CONF.register_cli_opts(CLI_OPTS) db_options.set_defaults(cfg.CONF) class StorageUnknownWriteError(Exception): """Error raised when an unknown error occurs while recording.""" class StorageBadVersion(Exception): """Error raised when the storage backend version is not good enough.""" class StorageBadAggregate(Exception): """Error raised when an aggregate is unacceptable to storage backend.""" code = 400 def get_connection_from_config(conf, purpose='metering'): retries = conf.database.max_retries # Convert retry_interval secs to msecs for retry decorator @retrying.retry(wait_fixed=conf.database.retry_interval * 1000, stop_max_attempt_number=retries if retries >= 0 else None) def _inner(): if conf.database_connection: conf.set_override('connection', conf.database_connection, group='database') namespace = 'ceilometer.%s.storage' % purpose url = (getattr(conf.database, '%s_connection' % purpose) or conf.database.connection) return get_connection(url, namespace) return _inner() def get_connection(url, namespace): """Return an open connection to the database.""" connection_scheme = urlparse.urlparse(url).scheme # SqlAlchemy connections specify may specify a 'dialect' or # 'dialect+driver'. Handle the case where driver is specified. engine_name = connection_scheme.split('+')[0] if engine_name == 'db2': import warnings warnings.simplefilter("always") import debtcollector debtcollector.deprecate("The DB2nosql driver is no longer supported", version="Liberty", removal_version="N*-cycle") # NOTE: translation not applied bug #1446983 LOG.debug('looking for %(name)r driver in %(namespace)r', {'name': engine_name, 'namespace': namespace}) mgr = driver.DriverManager(namespace, engine_name) return mgr.driver(url) class SampleFilter(object): """Holds the properties for building a query from a meter/sample filter. :param user: The sample owner. :param project: The sample project. :param start_timestamp: Earliest time point in the request. :param start_timestamp_op: Earliest timestamp operation in the request. :param end_timestamp: Latest time point in the request. :param end_timestamp_op: Latest timestamp operation in the request. :param resource: Optional filter for resource id. :param meter: Optional filter for meter type using the meter name. :param source: Optional source filter. :param message_id: Optional sample_id filter. :param metaquery: Optional filter on the metadata """ def __init__(self, user=None, project=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, resource=None, meter=None, source=None, message_id=None, metaquery=None): self.user = user self.project = project self.start_timestamp = utils.sanitize_timestamp(start_timestamp) self.start_timestamp_op = start_timestamp_op self.end_timestamp = utils.sanitize_timestamp(end_timestamp) self.end_timestamp_op = end_timestamp_op self.resource = resource self.meter = meter self.source = source self.metaquery = metaquery or {} self.message_id = message_id def __repr__(self): return ("" % (self.user, self.project, self.start_timestamp, self.start_timestamp_op, self.end_timestamp, self.end_timestamp_op, self.resource, self.meter, self.source, self.metaquery, self.message_id)) class EventFilter(object): """Properties for building an Event query. :param start_timestamp: UTC start datetime (mandatory) :param end_timestamp: UTC end datetime (mandatory) :param event_type: the name of the event. None for all. :param message_id: the message_id of the event. None for all. :param admin_proj: the project_id of admin role. None if non-admin user. :param traits_filter: the trait filter dicts, all of which are optional. This parameter is a list of dictionaries that specify trait values: .. code-block:: python {'key': , 'string': , 'integer': , 'datetime': , 'float': , 'op': } """ def __init__(self, start_timestamp=None, end_timestamp=None, event_type=None, message_id=None, traits_filter=None, admin_proj=None): self.start_timestamp = utils.sanitize_timestamp(start_timestamp) self.end_timestamp = utils.sanitize_timestamp(end_timestamp) self.message_id = message_id self.event_type = event_type self.traits_filter = traits_filter or [] self.admin_proj = admin_proj def __repr__(self): return ("" % (self.start_timestamp, self.end_timestamp, self.event_type, six.text_type(self.traits_filter))) ceilometer-6.0.0/ceilometer/storage/impl_log.py0000664000567000056710000001144412701406224022760 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Simple logging storage backend. """ from oslo_log import log from ceilometer.i18n import _LI from ceilometer.storage import base LOG = log.getLogger(__name__) class Connection(base.Connection): """Log the data.""" def upgrade(self): pass def clear(self): pass def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter. """ LOG.info(_LI('metering data %(counter_name)s for %(resource_id)s: ' '%(counter_volume)s') % ({'counter_name': data['counter_name'], 'resource_id': data['resource_id'], 'counter_volume': data['counter_volume']})) def clear_expired_metering_data(self, ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ LOG.info(_LI("Dropping metering data with TTL %d"), ttl) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of dictionaries containing resource information. { 'resource_id': UUID of the resource, 'project_id': UUID of project owning the resource, 'user_id': UUID of user owning the resource, 'timestamp': UTC datetime of last update to the resource, 'metadata': most current metadata for the resource, 'meter': list of the meters reporting data for the resource, } :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ return [] def get_meters(self, user=None, project=None, resource=None, source=None, limit=None, metaquery=None, unique=False): """Return an iterable of dictionaries containing meter information. { 'name': name of the meter, 'type': type of the meter (gauge, delta, cumulative), 'resource_id': UUID of the resource, 'project_id': UUID of project owning the resource, 'user_id': UUID of user owning the resource, } :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional resource filter. :param source: Optional source filter. :param limit: Maximum number of results to return. :param metaquery: Optional dict with metadata to match on. :param unique: If set to true, return only unique meter information. """ return [] def get_samples(self, sample_filter, limit=None): """Return an iterable of samples. Items are created by :func:`ceilometer.meter.meter_message_from_counter`. """ return [] def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return a dictionary containing meter statistics. Meter statistics is described by the query parameters. The filter must have a meter value set. { 'min': 'max': 'avg': 'sum': 'count': 'period': 'period_start': 'period_end': 'duration': 'duration_start': 'duration_end': } """ return [] ceilometer-6.0.0/ceilometer/storage/sqlalchemy/0000775000567000056710000000000012701406364022747 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/0000775000567000056710000000000012701406364025424 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/README0000664000567000056710000000021712701406223026276 0ustar jenkinsjenkins00000000000000sqlalchemy-migrate is DEPRECATED. All new migrations should be written using alembic. Please see ceilometer/storage/sqlalchemy/alembic/README ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/0000775000567000056710000000000012701406364027274 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_i0000664000567000056710000000154012701406223035254 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa # Add index on metadata_hash column of resource def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) resource = sa.Table('resource', meta, autoload=True) index = sa.Index('ix_resource_metadata_hash', resource.c.metadata_hash) index.create(bind=migrate_engine) ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_type0000664000567000056710000000313612701406223035365 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) resource = Table('resource', meta, autoload=True) resource.c.user_id.alter(type=String(255)) resource.c.project_id.alter(type=String(255)) resource.c.resource_id.alter(type=String(255)) resource.c.source_id.alter(type=String(255)) sample = Table('sample', meta, autoload=True) sample.c.message_signature.alter(type=String(64)) sample.c.message_id.alter(type=String(128)) alarm = Table('alarm', meta, autoload=True) alarm.c.alarm_id.alter(type=String(128)) alarm.c.user_id.alter(type=String(255)) alarm.c.project_id.alter(type=String(255)) alarm_history = Table('alarm_history', meta, autoload=True) alarm_history.c.alarm_id.alter(type=String(128)) alarm_history.c.user_id.alter(type=String(255)) alarm_history.c.project_id.alter(type=String(255)) alarm_history.c.event_id.alter(type=String(128)) alarm_history.c.on_behalf_of.alter(type=String(255)) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py0000664000567000056710000001166112701406223034066 0ustar jenkinsjenkins00000000000000# # Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sa def get_alembic_version(meta): """Return Alembic version or None if no Alembic table exists.""" try: a_ver = sa.Table( 'alembic_version', meta, autoload=True) return sa.select([a_ver.c.version_num]).scalar() except sa.exc.NoSuchTableError: return None def delete_alembic(meta): try: sa.Table( 'alembic_version', meta, autoload=True).drop(checkfirst=True) except sa.exc.NoSuchTableError: pass INDEXES = ( # ([dialects], table_name, index_name, create/delete, uniq/not_uniq) (['mysql', 'sqlite', 'postgresql'], 'resource', 'resource_user_id_project_id_key', ('user_id', 'project_id'), True, False, True), (['mysql'], 'source', 'id', ('id',), False, True, False)) def index_cleanup(meta, table_name, uniq_name, columns, create, unique, limited): table = sa.Table(table_name, meta, autoload=True) if create: if limited and meta.bind.engine.name == 'mysql': # For some versions of mysql we can get an error # "Specified key was too long; max key length is 1000 bytes". # We should create an index by hand in this case with limited # length of columns. columns_mysql = ",".join((c + "(100)" for c in columns)) sql = ("create index %s ON %s (%s)" % (uniq_name, table, columns_mysql)) meta.bind.engine.execute(sql) else: cols = [table.c[col] for col in columns] sa.Index(uniq_name, *cols, unique=unique).create() else: if unique: migrate.UniqueConstraint(*columns, table=table, name=uniq_name).drop() else: cols = [table.c[col] for col in columns] sa.Index(uniq_name, *cols).drop() def change_uniq(meta): uniq_name = 'uniq_sourceassoc0meter_id0user_id' columns = ('meter_id', 'user_id') if meta.bind.engine.name == 'sqlite': return sourceassoc = sa.Table('sourceassoc', meta, autoload=True) meter = sa.Table('meter', meta, autoload=True) user = sa.Table('user', meta, autoload=True) if meta.bind.engine.name == 'mysql': # For mysql dialect all dependent FK should be removed # before renaming of constraint. params = {'columns': [sourceassoc.c.meter_id], 'refcolumns': [meter.c.id], 'name': 'fk_sourceassoc_meter_id'} migrate.ForeignKeyConstraint(**params).drop() params = {'columns': [sourceassoc.c.user_id], 'refcolumns': [user.c.id], 'name': 'fk_sourceassoc_user_id'} migrate.ForeignKeyConstraint(**params).drop() migrate.UniqueConstraint(*columns, table=sourceassoc, name=uniq_name).create() if meta.bind.engine.name == 'mysql': params = {'columns': [sourceassoc.c.meter_id], 'refcolumns': [meter.c.id], 'name': 'fk_sourceassoc_meter_id'} migrate.ForeignKeyConstraint(**params).create() params = {'columns': [sourceassoc.c.user_id], 'refcolumns': [user.c.id], 'name': 'fk_sourceassoc_user_id'} migrate.ForeignKeyConstraint(**params).create() def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) a_ver = get_alembic_version(meta) if not a_ver: alarm = sa.Table('alarm', meta, autoload=True) repeat_act = sa.Column('repeat_actions', sa.Boolean, server_default=sa.sql.expression.false()) alarm.create_column(repeat_act) a_ver = '43b1a023dfaa' if a_ver == '43b1a023dfaa': meter = sa.Table('meter', meta, autoload=True) meter.c.resource_metadata.alter(type=sa.Text) a_ver = '17738166b91' if a_ver == '17738166b91': for (engine_names, table_name, uniq_name, columns, create, uniq, limited) in INDEXES: if migrate_engine.name in engine_names: index_cleanup(meta, table_name, uniq_name, columns, create, uniq, limited) a_ver = 'b6ae66d05e3' if a_ver == 'b6ae66d05e3': change_uniq(meta) delete_alembic(meta) ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_t0000664000567000056710000000533512701406223035416 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration TABLES = ['sample', 'resource', 'source', 'sourceassoc'] DROP_TABLES = ['resource', 'source', 'sourceassoc'] INDEXES = { "sample": (('resource_id', 'resource', 'id'),), "sourceassoc": (('sample_id', 'sample', 'id'), ('resource_id', 'resource', 'id'), ('source_id', 'source', 'id')) } def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) load_tables = dict((table_name, sa.Table(table_name, meta, autoload=True)) for table_name in TABLES) # drop foreign keys if migrate_engine.name != 'sqlite': for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for column, ref_table_name, ref_column_name in indexes: ref_table = load_tables[ref_table_name] params = {'columns': [table.c[column]], 'refcolumns': [ref_table.c[ref_column_name]]} fk_table_name = table_name if migrate_engine.name == "mysql": params['name'] = "_".join(('fk', fk_table_name, column)) elif (migrate_engine.name == "postgresql" and table_name == 'sample'): # fk was not renamed in script 030 params['name'] = "_".join(('meter', column, 'fkey')) fkey = ForeignKeyConstraint(**params) fkey.drop() # create source field in sample sample = load_tables['sample'] sample.create_column(sa.Column('source_id', sa.String(255))) # move source values to samples sourceassoc = load_tables['sourceassoc'] query = (sa.select([sourceassoc.c.sample_id, sourceassoc.c.source_id]). where(sourceassoc.c.sample_id.isnot(None))) for sample_id, source_id in migration.paged(query): (sample.update().where(sample_id == sample.c.id). values({'source_id': source_id}).execute()) # drop tables for table_name in DROP_TABLES: sa.Table(table_name, meta, autoload=True).drop() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py0000664000567000056710000000154512701406223034066 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) alarm = Table('alarm', meta, autoload=True) severity = Column('severity', String(50)) alarm.create_column(severity) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py0000664000567000056710000000423212701406223034273 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from ceilometer.storage.sqlalchemy import models tables = [('trait_text', sa.String(255), True, 't_string', 1), ('trait_int', sa.Integer, False, 't_int', 2), ('trait_float', sa.Float(53), False, 't_float', 3), ('trait_datetime', models.PreciseTimestamp(), False, 't_datetime', 4)] def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) trait = sa.Table('trait', meta, autoload=True) event = sa.Table('event', meta, autoload=True) trait_type = sa.Table('trait_type', meta, autoload=True) for t_name, t_type, t_nullable, col_name, __ in tables: t_table = sa.Table( t_name, meta, sa.Column('event_id', sa.Integer, sa.ForeignKey(event.c.id), primary_key=True), sa.Column('key', sa.String(255), primary_key=True), sa.Column('value', t_type, nullable=t_nullable), sa.Index('ix_%s_event_id_key' % t_name, 'event_id', 'key'), mysql_engine='InnoDB', mysql_charset='utf8', ) t_table.create() query = sa.select( [trait.c.event_id, trait_type.c.desc, trait.c[col_name]]).select_from( trait.join(trait_type, trait.c.trait_type_id == trait_type.c.id)).where( trait.c[col_name] != sa.null()) if query.alias().select().scalar() is not None: t_table.insert().from_select( ['event_id', 'key', 'value'], query).execute() trait.drop() trait_type.drop() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py0000664000567000056710000000154012701406223033551 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) meter = Table('meter', meta, autoload=True) unit = Column('counter_unit', String(255)) meter.create_column(unit) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py0000664000567000056710000000635512701406223033324 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import UniqueConstraint def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) meter = Table( 'meter', meta, Column('id', Integer, primary_key=True, index=True), Column('counter_name', String(255)), Column('user_id', String(255), index=True), Column('project_id', String(255), index=True), Column('resource_id', String(255)), Column('resource_metadata', String(5000)), Column('counter_type', String(255)), Column('counter_volume', Integer), Column('counter_duration', Integer), Column('timestamp', DateTime(timezone=False), index=True), Column('message_signature', String(1000)), Column('message_id', String(1000)), mysql_engine='InnoDB', mysql_charset='utf8', ) resource = Table( 'resource', meta, Column('id', String(255), primary_key=True, index=True), Column('resource_metadata', String(5000)), Column('project_id', String(255), index=True), Column('received_timestamp', DateTime(timezone=False)), Column('timestamp', DateTime(timezone=False), index=True), Column('user_id', String(255), index=True), mysql_engine='InnoDB', mysql_charset='utf8', ) user = Table( 'user', meta, Column('id', String(255), primary_key=True, index=True), mysql_engine='InnoDB', mysql_charset='utf8', ) project = Table( 'project', meta, Column('id', String(255), primary_key=True, index=True), mysql_engine='InnoDB', mysql_charset='utf8', ) sourceassoc = Table( 'sourceassoc', meta, Column('source_id', String(255), index=True), Column('user_id', String(255)), Column('project_id', String(255)), Column('resource_id', String(255)), Column('meter_id', Integer), Index('idx_su', 'source_id', 'user_id'), Index('idx_sp', 'source_id', 'project_id'), Index('idx_sr', 'source_id', 'resource_id'), Index('idx_sm', 'source_id', 'meter_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) source = Table( 'source', meta, Column('id', String(255), primary_key=True, index=True), UniqueConstraint('id'), mysql_engine='InnoDB', mysql_charset='utf8', ) tables = [meter, project, resource, user, source, sourceassoc] for i in sorted(tables, key=lambda table: table.fullname): i.create() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql0000664000567000056710000000142212701406223033402 0ustar jenkinsjenkins00000000000000ALTER TABLE trait RENAME TO trait_orig; CREATE TABLE trait_type ( id INTEGER PRIMARY KEY ASC, 'desc' STRING NOT NULL, data_type INTEGER NOT NULL, UNIQUE ('desc', data_type) ); INSERT INTO trait_type SELECT un.id, un.key, t.t_type FROM unique_name un JOIN trait_orig t ON un.id = t.name_id GROUP BY un.id; CREATE TABLE trait ( id INTEGER PRIMARY KEY ASC, t_string VARCHAR(255), t_int INTEGER, t_float FLOAT, t_datetime FLOAT, trait_type_id INTEGER NOT NULL, event_id INTEGER NOT NULL, FOREIGN KEY (trait_type_id) REFERENCES trait_type (id) FOREIGN KEY (event_id) REFERENCES event (id) ); INSERT INTO trait SELECT t.id, t.t_string, t.t_int, t.t_float, t.t_datetime, t.name_id, t.event_id FROM trait_orig t; DROP TABLE trait_orig; DROP TABLE unique_name;ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py0000664000567000056710000001015612701406223034037 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sa def _handle_meter_indices(meta): if meta.bind.engine.name == 'sqlite': return resource = sa.Table('resource', meta, autoload=True) project = sa.Table('project', meta, autoload=True) user = sa.Table('user', meta, autoload=True) meter = sa.Table('meter', meta, autoload=True) indices = [(sa.Index('ix_meter_timestamp', meter.c.timestamp), sa.Index('ix_sample_timestamp', meter.c.timestamp)), (sa.Index('ix_meter_user_id', meter.c.user_id), sa.Index('ix_sample_user_id', meter.c.user_id)), (sa.Index('ix_meter_project_id', meter.c.project_id), sa.Index('ix_sample_project_id', meter.c.project_id)), (sa.Index('idx_meter_rid_cname', meter.c.resource_id, meter.c.counter_name), sa.Index('idx_sample_rid_cname', meter.c.resource_id, meter.c.counter_name))] fk_params = [({'columns': [meter.c.resource_id], 'refcolumns': [resource.c.id]}, 'fk_meter_resource_id', 'fk_sample_resource_id'), ({'columns': [meter.c.project_id], 'refcolumns': [project.c.id]}, 'fk_meter_project_id', 'fk_sample_project_id'), ({'columns': [meter.c.user_id], 'refcolumns': [user.c.id]}, 'fk_meter_user_id', 'fk_sample_user_id')] for fk in fk_params: params = fk[0] if meta.bind.engine.name == 'mysql': params['name'] = fk[1] migrate.ForeignKeyConstraint(**params).drop() for meter_ix, sample_ix in indices: meter_ix.drop() sample_ix.create() for fk in fk_params: params = fk[0] if meta.bind.engine.name == 'mysql': params['name'] = fk[2] migrate.ForeignKeyConstraint(**params).create() def _alter_sourceassoc(meta, t_name, ix_name, post_action=False): if meta.bind.engine.name == 'sqlite': return sourceassoc = sa.Table('sourceassoc', meta, autoload=True) table = sa.Table(t_name, meta, autoload=True) user = sa.Table('user', meta, autoload=True) c_name = '%s_id' % t_name col = getattr(sourceassoc.c, c_name) uniq_name = 'uniq_sourceassoc0%s0user_id' % c_name uniq_cols = (c_name, 'user_id') param = {'columns': [col], 'refcolumns': [table.c.id]} user_param = {'columns': [sourceassoc.c.user_id], 'refcolumns': [user.c.id]} if meta.bind.engine.name == 'mysql': param['name'] = 'fk_sourceassoc_%s' % c_name user_param['name'] = 'fk_sourceassoc_user_id' actions = [migrate.ForeignKeyConstraint(**user_param), migrate.ForeignKeyConstraint(**param), sa.Index(ix_name, sourceassoc.c.source_id, col), migrate.UniqueConstraint(*uniq_cols, table=sourceassoc, name=uniq_name)] for action in actions: action.create() if post_action else action.drop() def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) _handle_meter_indices(meta) meter = sa.Table('meter', meta, autoload=True) meter.rename('sample') _alter_sourceassoc(meta, 'meter', 'idx_sm') sourceassoc = sa.Table('sourceassoc', meta, autoload=True) sourceassoc.c.meter_id.alter(name='sample_id') # re-bind metadata to pick up alter name change meta = sa.MetaData(bind=migrate_engine) _alter_sourceassoc(meta, 'sample', 'idx_ss', True) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py0000664000567000056710000000000012701406223031365 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py0000664000567000056710000000162312701406223032370 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Float from sqlalchemy import MetaData from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) metadata_float = Table('metadata_float', meta, autoload=True) metadata_float.c.value.alter(type=Float(53)) trait = Table('trait', meta, autoload=True) trait.c.t_float.alter(type=Float(53)) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py0000664000567000056710000000244712701406223033376 0ustar jenkinsjenkins00000000000000# # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Index, MetaData, Table INDEXES = { # `table_name`: ((`index_name`, `column`),) "user": (('ix_user_id', 'id'),), "source": (('ix_source_id', 'id'),), "project": (('ix_project_id', 'id'),), "meter": (('ix_meter_id', 'id'),), "alarm": (('ix_alarm_id', 'id'),), "resource": (('ix_resource_id', 'id'),) } def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) load_tables = dict((table_name, Table(table_name, meta, autoload=True)) for table_name in INDEXES.keys()) for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for index_name, column in indexes: index = Index(index_name, table.c[column]) index.drop() ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints0000664000567000056710000000304112701406223035357 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Intel Crop. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint from sqlalchemy import MetaData, Table TABLES = ['user', 'project', 'alarm'] INDEXES = { "alarm": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), } def upgrade(migrate_engine): if migrate_engine.name == 'sqlite': return meta = MetaData(bind=migrate_engine) load_tables = dict((table_name, Table(table_name, meta, autoload=True)) for table_name in TABLES) for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for column, ref_table_name, ref_column_name in indexes: ref_table = load_tables[ref_table_name] params = {'columns': [table.c[column]], 'refcolumns': [ref_table.c[ref_column_name]]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', table_name, column)) fkey = ForeignKeyConstraint(**params) fkey.drop() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py0000664000567000056710000000343512701406223033306 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table, Column, Text from sqlalchemy import Boolean, Integer, String, DateTime, Float def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) alarm = Table( 'alarm', meta, Column('id', String(255), primary_key=True, index=True), Column('enabled', Boolean), Column('name', Text()), Column('description', Text()), Column('timestamp', DateTime(timezone=False)), Column('counter_name', String(255), index=True), Column('user_id', String(255), index=True), Column('project_id', String(255), index=True), Column('comparison_operator', String(2)), Column('threshold', Float), Column('statistic', String(255)), Column('evaluation_periods', Integer), Column('period', Integer), Column('state', String(255)), Column('state_timestamp', DateTime(timezone=False)), Column('ok_actions', Text()), Column('alarm_actions', Text()), Column('insufficient_data_actions', Text()), Column('matching_metadata', Text()), mysql_engine='InnoDB', mysql_charset='utf8') alarm.create() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py0000664000567000056710000000562712701406223033411 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import select from sqlalchemy import String from sqlalchemy import Table from ceilometer.storage.sqlalchemy import migration def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) event_type = Table( 'event_type', meta, Column('id', Integer, primary_key=True), Column('desc', String(255), unique=True), mysql_engine='InnoDB', mysql_charset='utf8', ) event_type.create() event = Table('event', meta, autoload=True) unique_name = Table('unique_name', meta, autoload=True) # Event type is a specialization of Unique name, so # we insert into the event_type table all the distinct # unique names from the event.unique_name field along # with the key from the unique_name table, and # then rename the event.unique_name field to event.event_type conn = migrate_engine.connect() sql = ("INSERT INTO event_type " "SELECT unique_name.id, unique_name.key FROM event " "INNER JOIN unique_name " "ON event.unique_name_id = unique_name.id " "GROUP BY unique_name.id") conn.execute(sql) conn.close() # Now we need to drop the foreign key constraint, rename # the event.unique_name column, and re-add a new foreign # key constraint params = {'columns': [event.c.unique_name_id], 'refcolumns': [unique_name.c.id]} if migrate_engine.name == 'mysql': params['name'] = "event_ibfk_1" fkey = ForeignKeyConstraint(**params) fkey.drop() Column('event_type_id', Integer).create(event) # Move data from unique_name_id column into event_type_id column # and delete the entry from the unique_name table query = select([event.c.id, event.c.unique_name_id]) for key, value in migration.paged(query): (event.update().where(event.c.id == key). values({"event_type_id": value}).execute()) unique_name.delete().where(unique_name.c.id == key).execute() params = {'columns': [event.c.event_type_id], 'refcolumns': [event_type.c.id]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', 'event_type', 'id')) fkey = ForeignKeyConstraint(**params) fkey.create() event.c.unique_name_id.drop() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py0000664000567000056710000000476512701406223034157 0ustar jenkinsjenkins00000000000000# # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import six from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import Float from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy.sql import select from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import Text from ceilometer import utils tables = [('metadata_text', Text, True), ('metadata_bool', Boolean, False), ('metadata_int', Integer, False), ('metadata_float', Float, False)] def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) meter = Table('meter', meta, autoload=True) meta_tables = {} for t_name, t_type, t_nullable in tables: meta_tables[t_name] = Table( t_name, meta, Column('id', Integer, ForeignKey('meter.id'), primary_key=True), Column('meta_key', String(255), index=True, primary_key=True), Column('value', t_type, nullable=t_nullable), mysql_engine='InnoDB', mysql_charset='utf8', ) meta_tables[t_name].create() for row in select([meter]).execute(): if row['resource_metadata']: meter_id = row['id'] rmeta = json.loads(row['resource_metadata']) for key, v in utils.dict_to_keyval(rmeta): ins = None if isinstance(v, six.string_types) or v is None: ins = meta_tables['metadata_text'].insert() elif isinstance(v, bool): ins = meta_tables['metadata_bool'].insert() elif isinstance(v, six.integer_types): ins = meta_tables['metadata_int'].insert() elif isinstance(v, float): ins = meta_tables['metadata_float'].insert() if ins is not None: ins.values(id=meter_id, meta_key=key, value=v).execute() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py0000664000567000056710000000323712701406223034415 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint import sqlalchemy as sa class ForeignKeyHandle(object): def __init__(self, meta): sample = sa.Table('sample', meta, autoload=True) meter = sa.Table('meter', meta, autoload=True) self.sample_params = {'columns': [sample.c.meter_id], 'refcolumns': [meter.c.id]} if meta.bind.engine.name == 'mysql': self.sample_params['name'] = "fk_sample_meter_id" def __enter__(self): ForeignKeyConstraint(**self.sample_params).drop() def __exit__(self, type, value, traceback): ForeignKeyConstraint(**self.sample_params).create() def upgrade(migrate_engine): if migrate_engine.name == 'sqlite': return meta = sa.MetaData(bind=migrate_engine) sample = sa.Table('sample', meta, autoload=True) with ForeignKeyHandle(meta): # remove stray indexes implicitly created by InnoDB for index in sample.indexes: if index.name in ['fk_sample_meter_id', 'fk_sample_resource_id']: index.drop() sa.Index('ix_sample_meter_id', sample.c.meter_id).create() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py0000664000567000056710000000621112701406223034167 0ustar jenkinsjenkins00000000000000# # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy as sa def handle_rid_index(meta): if meta.bind.engine.name == 'sqlite': return resource = sa.Table('resource', meta, autoload=True) sample = sa.Table('sample', meta, autoload=True) params = {'columns': [sample.c.resource_id], 'refcolumns': [resource.c.id], 'name': 'fk_sample_resource_id'} if meta.bind.engine.name == 'mysql': # For mysql dialect all dependent FK should be removed # before index create/delete migrate.ForeignKeyConstraint(**params).drop() index = sa.Index('idx_sample_rid_cname', sample.c.resource_id, sample.c.counter_name) index.drop() if meta.bind.engine.name == 'mysql': migrate.ForeignKeyConstraint(**params).create() def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) meter = sa.Table( 'meter', meta, sa.Column('id', sa.Integer, primary_key=True), sa.Column('name', sa.String(255), nullable=False), sa.Column('type', sa.String(255)), sa.Column('unit', sa.String(255)), sa.UniqueConstraint('name', 'type', 'unit', name='def_unique'), mysql_engine='InnoDB', mysql_charset='utf8' ) meter.create() sample = sa.Table('sample', meta, autoload=True) query = sa.select([sample.c.counter_name, sample.c.counter_type, sample.c.counter_unit]).distinct() for row in query.execute(): meter.insert().values(name=row['counter_name'], type=row['counter_type'], unit=row['counter_unit']).execute() meter_id = sa.Column('meter_id', sa.Integer) meter_id.create(sample) params = {'columns': [sample.c.meter_id], 'refcolumns': [meter.c.id]} if migrate_engine.name == 'mysql': params['name'] = 'fk_sample_meter_id' if migrate_engine.name != 'sqlite': migrate.ForeignKeyConstraint(**params).create() index = sa.Index('ix_meter_name', meter.c.name) index.create(bind=migrate_engine) for row in sa.select([meter]).execute(): (sample.update(). where(sa.and_(sample.c.counter_name == row['name'], sample.c.counter_type == row['type'], sample.c.counter_unit == row['unit'])). values({sample.c.meter_id: row['id']}).execute()) handle_rid_index(meta) sample.c.counter_name.drop() sample.c.counter_type.drop() sample.c.counter_unit.drop() sample.c.counter_volume.alter(name='volume') ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql0000664000567000056710000000116312701406223033402 0ustar jenkinsjenkins00000000000000CREATE TABLE event_type ( id INTEGER PRIMARY KEY ASC, desc STRING NOT NULL ); INSERT INTO event_type SELECT un.id, un.key FROM unique_name un JOIN event e ON un.id = e.unique_name_id GROUP BY un.id; ALTER TABLE event RENAME TO event_orig; CREATE TABLE event ( id INTEGER PRIMARY KEY ASC, generated FLOAT NOT NULL, message_id VARCHAR(50) UNIQUE, event_type_id INTEGER NOT NULL, FOREIGN KEY (event_type_id) REFERENCES event_type (id) ); INSERT INTO event SELECT id, generated, message_id, unique_name_id FROM event_orig; DROP TABLE event_orig; DELETE FROM unique_name WHERE id IN (SELECT id FROM event_type); ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py0000664000567000056710000000407712701406223033065 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from sqlalchemy import MetaData, Table, Column, Index from sqlalchemy import String, Text def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine table = Table('alarm', meta, autoload=True) type = Column('type', String(50), default='threshold') type.create(table, populate_default=True) rule = Column('rule', Text()) rule.create(table) for row in table.select().execute().fetchall(): query = [] if row.matching_metadata is not None: matching_metadata = json.loads(row.matching_metadata) for key in matching_metadata: query.append({'field': key, 'op': 'eq', 'value': matching_metadata[key]}) rule = { 'meter_name': row.meter_name, 'comparison_operator': row.comparison_operator, 'threshold': row.threshold, 'statistic': row.statistic, 'evaluation_periods': row.evaluation_periods, 'period': row.period, 'query': query } table.update().where(table.c.id == row.id).values(rule=rule).execute() index = Index('ix_alarm_counter_name', table.c.meter_name) index.drop(bind=migrate_engine) table.c.meter_name.drop() table.c.comparison_operator.drop() table.c.threshold.drop() table.c.statistic.drop() table.c.evaluation_periods.drop() table.c.period.drop() table.c.matching_metadata.drop() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py0000664000567000056710000000150512701406223034741 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def upgrade(migrate_engine): # NOTE(gordc): this is a noop script to handle bug1468916 # previous lowering of id length will fail if db contains data longer. # this skips migration for those failing. the next script will resize # if this original migration passed. pass ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py0000664000567000056710000000157012701406223035156 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Copyright 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Float from sqlalchemy import MetaData from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) meter = Table('meter', meta, autoload=True) meter.c.counter_volume.alter(type=Float(53)) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py0000664000567000056710000000216512701406223033500 0ustar jenkinsjenkins00000000000000# Copyright 2012 Canonical. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def upgrade(migrate_engine): if migrate_engine.name == "mysql": tables = ['meter', 'user', 'resource', 'project', 'source', 'sourceassoc'] migrate_engine.execute("SET foreign_key_checks = 0") for table in tables: migrate_engine.execute( "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table) migrate_engine.execute("SET foreign_key_checks = 1") migrate_engine.execute( "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" % migrate_engine.url.database) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py0000664000567000056710000000226612701406223033557 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa TABLES_012 = ['resource', 'sourceassoc', 'user', 'project', 'meter', 'source', 'alarm'] TABLES_027 = ['user', 'project', 'alarm'] def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) for table_name in TABLES_027: try: (sa.Table('dump027_' + table_name, meta, autoload=True). drop(checkfirst=True)) except sa.exc.NoSuchTableError: pass for table_name in TABLES_012: try: (sa.Table('dump_' + table_name, meta, autoload=True). drop(checkfirst=True)) except sa.exc.NoSuchTableError: pass ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py0000664000567000056710000000444612701406223035257 0ustar jenkinsjenkins00000000000000# # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint from sqlalchemy import MetaData, Table from sqlalchemy.sql.expression import select TABLES = ['resource', 'sourceassoc', 'user', 'project', 'meter', 'source', 'alarm'] INDEXES = { "resource": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), "sourceassoc": (('user_id', 'user', 'id'), ('project_id', 'project', 'id'), ('resource_id', 'resource', 'id'), ('meter_id', 'meter', 'id'), ('source_id', 'source', 'id')), "alarm": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), "meter": (('user_id', 'user', 'id'), ('project_id', 'project', 'id'), ('resource_id', 'resource', 'id'),) } def upgrade(migrate_engine): if migrate_engine.name == 'sqlite': return meta = MetaData(bind=migrate_engine) load_tables = dict((table_name, Table(table_name, meta, autoload=True)) for table_name in TABLES) for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for column, ref_table_name, ref_column_name in indexes: ref_table = load_tables[ref_table_name] subq = select([getattr(ref_table.c, ref_column_name)]) sql_del = table.delete().where( ~ getattr(table.c, column).in_(subq)) migrate_engine.execute(sql_del) params = {'columns': [table.c[column]], 'refcolumns': [ref_table.c[ref_column_name]]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', table_name, column)) fkey = ForeignKeyConstraint(**params) fkey.create() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py0000664000567000056710000000161212701406223034704 0ustar jenkinsjenkins00000000000000 # Copyright 2013 OpenStack Foundation # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import BigInteger from sqlalchemy import MetaData from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) resource = Table('metadata_int', meta, autoload=True) resource.c.value.alter(type=BigInteger) ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision0000664000567000056710000000423612701406223035377 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration from ceilometer.storage.sqlalchemy import models def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): temp_col_n = 'convert_data_type_temp_col' # Override column we're going to convert with from_t, since the type we're # replacing could be custom and we need to tell SQLALchemy how to perform # CRUD operations with it. table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), extend_existing=True) sa.Column(temp_col_n, to_t).create(table) key_attr = getattr(table.c, pk_attr) orig_col = getattr(table.c, col) new_col = getattr(table.c, temp_col_n) query = sa.select([key_attr, orig_col]) for key, value in migration.paged(query): (table.update().where(key_attr == key).values({temp_col_n: value}). execute()) orig_col.drop() new_col.alter(name=col) if index: sa.Index('ix_%s_%s' % (table.name, col), new_col).create() def upgrade(migrate_engine): if migrate_engine.name == 'mysql': meta = sa.MetaData(bind=migrate_engine) event = sa.Table('event', meta, autoload=True) _convert_data_type(event, 'generated', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) trait = sa.Table('trait', meta, autoload=True) _convert_data_type(trait, 't_datetime', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_0000664000567000056710000000160612701406223035341 0ustar jenkinsjenkins00000000000000 # Copyright 2013 OpenStack Foundation # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import Text def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) resource = Table('resource', meta, autoload=True) resource.c.resource_metadata.alter(type=Text) ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_tex0000664000567000056710000000160012701406223035351 0ustar jenkinsjenkins00000000000000 # Copyright 2013 OpenStack Foundation # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import Text def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) alm_hist = Table('alarm_history', meta, autoload=True) alm_hist.c.detail.alter(type=Text) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py0000664000567000056710000000154712701406223033432 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) meter = Table('meter', meta, autoload=True) duration = Column('counter_duration', Integer) meter.drop_column(duration) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py0000664000567000056710000000151612701406223034040 0ustar jenkinsjenkins00000000000000# # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) meter = sa.Table('meter', meta, autoload=True) index = sa.Index('idx_meter_rid_cname', meter.c.resource_id, meter.c.counter_name) index.create(bind=migrate_engine) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py0000664000567000056710000000137312701406223033207 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) event = sa.Table('event', meta, autoload=True) raw = sa.Column('raw', sa.Text) event.create_column(raw) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py0000664000567000056710000000647012701406223035320 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint, UniqueConstraint import sqlalchemy as sa TABLES_DROP = ['user', 'project'] TABLES = ['user', 'project', 'sourceassoc', 'sample', 'resource', 'alarm_history'] INDEXES = { "sample": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), "sourceassoc": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), "resource": (('user_id', 'user', 'id'), ('project_id', 'project', 'id')), "alarm_history": (('user_id', 'user', 'id'), ('project_id', 'project', 'id'), ('on_behalf_of', 'project', 'id')), } def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) load_tables = dict((table_name, sa.Table(table_name, meta, autoload=True)) for table_name in TABLES) if migrate_engine.name != 'sqlite': for table_name, indexes in INDEXES.items(): table = load_tables[table_name] for column, ref_table_name, ref_column_name in indexes: ref_table = load_tables[ref_table_name] params = {'columns': [table.c[column]], 'refcolumns': [ref_table.c[ref_column_name]]} if (migrate_engine.name == "mysql" and table_name != 'alarm_history'): params['name'] = "_".join(('fk', table_name, column)) elif (migrate_engine.name == "postgresql" and table_name == "sample"): # The fk contains the old table name params['name'] = "_".join(('meter', column, 'fkey')) fkey = ForeignKeyConstraint(**params) fkey.drop() sourceassoc = load_tables['sourceassoc'] if migrate_engine.name != 'sqlite': idx = sa.Index('idx_su', sourceassoc.c.source_id, sourceassoc.c.user_id) idx.drop(bind=migrate_engine) idx = sa.Index('idx_sp', sourceassoc.c.source_id, sourceassoc.c.project_id) idx.drop(bind=migrate_engine) params = {} if migrate_engine.name == "mysql": params = {'name': 'uniq_sourceassoc0sample_id'} uc = UniqueConstraint('sample_id', table=sourceassoc, **params) uc.create() params = {} if migrate_engine.name == "mysql": params = {'name': 'uniq_sourceassoc0sample_id0user_id'} uc = UniqueConstraint('sample_id', 'user_id', table=sourceassoc, **params) uc.drop() sourceassoc.c.user_id.drop() sourceassoc.c.project_id.drop() for table_name in TABLES_DROP: sa.Table(table_name, meta, autoload=True).drop() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py0000664000567000056710000000165112701406223034051 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils import sqlalchemy from ceilometer.storage.sqlalchemy import models def upgrade(migrate_engine): meta = sqlalchemy.MetaData(bind=migrate_engine) meter = sqlalchemy.Table('meter', meta, autoload=True) c = sqlalchemy.Column('recorded_at', models.PreciseTimestamp(), default=timeutils.utcnow) meter.create_column(c) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py0000664000567000056710000000606212701406223033407 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import select from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import UniqueConstraint from ceilometer.storage.sqlalchemy import migration def upgrade(migrate_engine): meta = MetaData(migrate_engine) trait_type = Table( 'trait_type', meta, Column('id', Integer, primary_key=True), Column('desc', String(255)), Column('data_type', Integer), UniqueConstraint('desc', 'data_type', name="tt_unique"), mysql_engine='InnoDB', mysql_charset='utf8', ) trait = Table('trait', meta, autoload=True) unique_name = Table('unique_name', meta, autoload=True) trait_type.create(migrate_engine) # Trait type extracts data from Trait and Unique name. # We take all trait names from Unique Name, and data types # from Trait. We then remove dtype and name from trait, and # remove the name field. conn = migrate_engine.connect() sql = ("INSERT INTO trait_type " "SELECT unique_name.id, unique_name.key, trait.t_type FROM trait " "INNER JOIN unique_name " "ON trait.name_id = unique_name.id " "GROUP BY unique_name.id, unique_name.key, trait.t_type") conn.execute(sql) conn.close() # Now we need to drop the foreign key constraint, rename # the trait.name column, and re-add a new foreign # key constraint params = {'columns': [trait.c.name_id], 'refcolumns': [unique_name.c.id]} if migrate_engine.name == 'mysql': params['name'] = "trait_ibfk_1" # foreign key to the unique name table fkey = ForeignKeyConstraint(**params) fkey.drop() Column('trait_type_id', Integer).create(trait) # Move data from name_id column into trait_type_id column query = select([trait.c.id, trait.c.name_id]) for key, value in migration.paged(query): (trait.update().where(trait.c.id == key). values({"trait_type_id": value}).execute()) trait.c.name_id.drop() params = {'columns': [trait.c.trait_type_id], 'refcolumns': [trait_type.c.id]} if migrate_engine.name == 'mysql': params['name'] = "_".join(('fk', 'trait_type', 'id')) fkey = ForeignKeyConstraint(**params) fkey.create() # Drop the t_type column to data_type. trait.c.t_type.drop() # Finally, drop the unique_name table - we don't need it # anymore. unique_name.drop() ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.0000664000567000056710000000156512701406223035233 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import Text def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) alarm = Table('alarm', meta, autoload=True) time_constraints = Column('time_constraints', Text()) alarm.create_column(time_constraints) ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_dateti0000664000567000056710000000367412701406223035372 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration from ceilometer.storage.sqlalchemy import models _col = 'timestamp' def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): temp_col_n = 'convert_data_type_temp_col' # Override column we're going to convert with from_t, since the type we're # replacing could be custom and we need to tell SQLALchemy how to perform # CRUD operations with it. table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), extend_existing=True) sa.Column(temp_col_n, to_t).create(table) key_attr = getattr(table.c, pk_attr) orig_col = getattr(table.c, col) new_col = getattr(table.c, temp_col_n) query = sa.select([key_attr, orig_col]) for key, value in migration.paged(query): (table.update().where(key_attr == key).values({temp_col_n: value}). execute()) orig_col.drop() new_col.alter(name=col) if index: sa.Index('ix_%s_%s' % (table.name, col), new_col).create() def upgrade(migrate_engine): if migrate_engine.name == 'mysql': meta = sa.MetaData(bind=migrate_engine) meter = sa.Table('meter', meta, autoload=True) _convert_data_type(meter, _col, sa.DateTime(), models.PreciseTimestamp(), pk_attr='id', index=True) ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_ala0000664000567000056710000000146612701406223035324 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine alarm = Table('alarm', meta, autoload=True) alarm.c.counter_name.alter(name='meter_name') ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py0000664000567000056710000000406412701406223032347 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import Float from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) unique_name = Table( 'unique_name', meta, Column('id', Integer, primary_key=True), Column('key', String(32), index=True), mysql_engine='InnoDB', mysql_charset='utf8', ) unique_name.create() event = Table( 'event', meta, Column('id', Integer, primary_key=True), Column('generated', Float(asdecimal=True), index=True), Column('unique_name_id', Integer, ForeignKey('unique_name.id')), mysql_engine='InnoDB', mysql_charset='utf8', ) event.create() trait = Table( 'trait', meta, Column('id', Integer, primary_key=True), Column('name_id', Integer, ForeignKey('unique_name.id')), Column('t_type', Integer, index=True), Column('t_string', String(32), nullable=True, default=None, index=True), Column('t_float', Float, nullable=True, default=None, index=True), Column('t_int', Integer, nullable=True, default=None, index=True), Column('t_datetime', Float(asdecimal=True), nullable=True, default=None, index=True), Column('event_id', Integer, ForeignKey('event.id')), mysql_engine='InnoDB', mysql_charset='utf8', ) trait.create() ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py0000664000567000056710000000327112701406223034340 0ustar jenkinsjenkins00000000000000# # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset.constraint import UniqueConstraint import sqlalchemy def upgrade(migrate_engine): meta = sqlalchemy.MetaData(bind=migrate_engine) event = sqlalchemy.Table('event', meta, autoload=True) message_id = sqlalchemy.Column('message_id', sqlalchemy.String(50)) event.create_column(message_id) cons = UniqueConstraint('message_id', table=event) cons.create() index = sqlalchemy.Index('idx_event_message_id', event.c.message_id) index.create(bind=migrate_engine) # Populate the new column ... trait = sqlalchemy.Table('trait', meta, autoload=True) unique_name = sqlalchemy.Table('unique_name', meta, autoload=True) join = trait.join(unique_name, unique_name.c.id == trait.c.name_id) traits = sqlalchemy.select([trait.c.event_id, trait.c.t_string], whereclause=(unique_name.c.key == 'message_id'), from_obj=join) for event_id, value in traits.execute(): (event.update().where(event.c.id == event_id).values(message_id=value). execute()) # Leave the Trait, makes the rollback easier and won't really hurt anyone. ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.p0000664000567000056710000000166512701406223035332 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table, Column, DateTime def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) resource = Table('resource', meta, autoload=True) timestamp = Column('timestamp', DateTime) resource.drop_column(timestamp) received_timestamp = Column('received_timestamp', DateTime) resource.drop_column(received_timestamp) ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgs0000664000567000056710000000426512701406223035404 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE (gordc): this is a copy of 024 migration script which missed pgsql import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration from ceilometer.storage.sqlalchemy import models def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): temp_col_n = 'convert_data_type_temp_col' # Override column we're going to convert with from_t, since the type we're # replacing could be custom and we need to tell SQLALchemy how to perform # CRUD operations with it. table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), extend_existing=True) sa.Column(temp_col_n, to_t).create(table) key_attr = getattr(table.c, pk_attr) orig_col = getattr(table.c, col) new_col = getattr(table.c, temp_col_n) query = sa.select([key_attr, orig_col]) for key, value in migration.paged(query): (table.update().where(key_attr == key).values({temp_col_n: value}). execute()) orig_col.drop() new_col.alter(name=col) if index: sa.Index('ix_%s_%s' % (table.name, col), new_col).create() def upgrade(migrate_engine): if migrate_engine.name == 'postgresql': meta = sa.MetaData(bind=migrate_engine) event = sa.Table('event', meta, autoload=True) _convert_data_type(event, 'generated', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) trait = sa.Table('trait', meta, autoload=True) _convert_data_type(trait, 't_datetime', sa.Float(), models.PreciseTimestamp(), pk_attr='id', index=True) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py0000664000567000056710000000162112701406223033122 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import VARCHAR def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) name = Table('unique_name', meta, autoload=True) name.c.key.alter(type=VARCHAR(length=255)) trait = Table('trait', meta, autoload=True) trait.c.t_string.alter(type=VARCHAR(length=255)) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py0000664000567000056710000000137712701406223033334 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) users = Table('alarm', meta, autoload=True) users.c.id.alter(name='alarm_id') ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py0000664000567000056710000001257212701406223033564 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import migrate from oslo_serialization import jsonutils import sqlalchemy as sa m_tables = [('metadata_text', sa.Text, True), ('metadata_bool', sa.Boolean, False), ('metadata_int', sa.BigInteger, False), ('metadata_float', sa.Float(53), False)] def _migrate_meta_tables(meta, col, new_col, new_fk): for t_name, t_type, t_nullable in m_tables: m_table = sa.Table(t_name, meta, autoload=True) m_table_new = sa.Table( '%s_new' % t_name, meta, sa.Column('id', sa.Integer, sa.ForeignKey(new_fk), primary_key=True), sa.Column('meta_key', sa.String(255), primary_key=True), sa.Column('value', t_type, nullable=t_nullable), mysql_engine='InnoDB', mysql_charset='utf8', ) m_table_new.create() if m_table.select().scalar() is not None: m_table_new.insert().from_select( ['id', 'meta_key', 'value'], sa.select([new_col, m_table.c.meta_key, m_table.c.value]).where( col == m_table.c.id).group_by( new_col, m_table.c.meta_key, m_table.c.value)).execute() m_table.drop() if meta.bind.engine.name != 'sqlite': sa.Index('ix_%s_meta_key' % t_name, m_table_new.c.meta_key).create() m_table_new.rename(t_name) def upgrade(migrate_engine): meta = sa.MetaData(bind=migrate_engine) resource = sa.Table( 'resource', meta, sa.Column('internal_id', sa.Integer, primary_key=True), sa.Column('resource_id', sa.String(255)), sa.Column('user_id', sa.String(255)), sa.Column('project_id', sa.String(255)), sa.Column('source_id', sa.String(255)), sa.Column('resource_metadata', sa.Text), sa.Column('metadata_hash', sa.String(32)), mysql_engine='InnoDB', mysql_charset='utf8') resource.create() # copy resource data in to resource table sample = sa.Table('sample', meta, autoload=True) sa.Column('metadata_hash', sa.String(32)).create(sample) for row in sa.select([sample.c.id, sample.c.resource_metadata]).execute(): sample.update().where(sample.c.id == row['id']).values( {sample.c.metadata_hash: hashlib.md5(jsonutils.dumps( row['resource_metadata'], sort_keys=True)).hexdigest()}).execute() query = sa.select([sample.c.resource_id, sample.c.user_id, sample.c.project_id, sample.c.source_id, sample.c.resource_metadata, sample.c.metadata_hash]).distinct() for row in query.execute(): resource.insert().values( resource_id=row['resource_id'], user_id=row['user_id'], project_id=row['project_id'], source_id=row['source_id'], resource_metadata=row['resource_metadata'], metadata_hash=row['metadata_hash']).execute() # link sample records to new resource records sa.Column('resource_id_new', sa.Integer).create(sample) for row in sa.select([resource]).execute(): (sample.update(). where(sa.and_( sample.c.resource_id == row['resource_id'], sample.c.user_id == row['user_id'], sample.c.project_id == row['project_id'], sample.c.source_id == row['source_id'], sample.c.metadata_hash == row['metadata_hash'])). values({sample.c.resource_id_new: row['internal_id']}).execute()) sample.c.resource_id.drop() sample.c.metadata_hash.drop() sample.c.resource_id_new.alter(name='resource_id') # re-bind metadata to pick up alter name change meta = sa.MetaData(bind=migrate_engine) sample = sa.Table('sample', meta, autoload=True) resource = sa.Table('resource', meta, autoload=True) if migrate_engine.name != 'sqlite': sa.Index('ix_resource_resource_id', resource.c.resource_id).create() sa.Index('ix_sample_user_id', sample.c.user_id).drop() sa.Index('ix_sample_project_id', sample.c.project_id).drop() sa.Index('ix_sample_resource_id', sample.c.resource_id).create() sa.Index('ix_sample_meter_id_resource_id', sample.c.meter_id, sample.c.resource_id).create() params = {'columns': [sample.c.resource_id], 'refcolumns': [resource.c.internal_id]} if migrate_engine.name == 'mysql': params['name'] = 'fk_sample_resource_internal_id' migrate.ForeignKeyConstraint(**params).create() sample.c.user_id.drop() sample.c.project_id.drop() sample.c.source_id.drop() sample.c.resource_metadata.drop() _migrate_meta_tables(meta, sample.c.id, sample.c.resource_id, 'resource.internal_id') ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.pyceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision0000664000567000056710000000406212701406223035350 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from ceilometer.storage.sqlalchemy import migration from ceilometer.storage.sqlalchemy import models def _convert_data_type(table, col, from_t, to_t, pk_attr='id'): temp_col_n = 'convert_data_type_temp_col' # Override column we're going to convert with from_t, since the type we're # replacing could be custom and we need to tell SQLALchemy how to perform # CRUD operations with it. table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), extend_existing=True) sa.Column(temp_col_n, to_t).create(table) key_attr = getattr(table.c, pk_attr) orig_col = getattr(table.c, col) new_col = getattr(table.c, temp_col_n) query = sa.select([key_attr, orig_col]) for key, value in migration.paged(query): (table.update().where(key_attr == key).values({temp_col_n: value}). execute()) orig_col.drop() new_col.alter(name=col) to_convert = [ ('alarm', 'timestamp', 'id'), ('alarm', 'state_timestamp', 'id'), ('alarm_history', 'timestamp', 'alarm_id'), ] def upgrade(migrate_engine): if migrate_engine.name == 'mysql': meta = sa.MetaData(bind=migrate_engine) for table_name, col_name, pk_attr in to_convert: table = sa.Table(table_name, meta, autoload=True) _convert_data_type(table, col_name, sa.DateTime(), models.PreciseTimestamp(), pk_attr=pk_attr) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py0000664000567000056710000000465712701406223035075 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint from sqlalchemy import MetaData, Table, Column, Index from sqlalchemy import String, DateTime def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine project = Table('project', meta, autoload=True) user = Table('user', meta, autoload=True) alarm_history = Table( 'alarm_history', meta, Column('event_id', String(255), primary_key=True, index=True), Column('alarm_id', String(255)), Column('on_behalf_of', String(255)), Column('project_id', String(255)), Column('user_id', String(255)), Column('type', String(20)), Column('detail', String(255)), Column('timestamp', DateTime(timezone=False)), mysql_engine='InnoDB', mysql_charset='utf8') alarm_history.create() if migrate_engine.name in ['mysql', 'postgresql']: indices = [Index('ix_alarm_history_alarm_id', alarm_history.c.alarm_id), Index('ix_alarm_history_on_behalf_of', alarm_history.c.on_behalf_of), Index('ix_alarm_history_project_id', alarm_history.c.project_id), Index('ix_alarm_history_on_user_id', alarm_history.c.user_id)] for index in indices: index.create(migrate_engine) fkeys = [ForeignKeyConstraint(columns=[alarm_history.c.on_behalf_of], refcolumns=[project.c.id]), ForeignKeyConstraint(columns=[alarm_history.c.project_id], refcolumns=[project.c.id]), ForeignKeyConstraint(columns=[alarm_history.c.user_id], refcolumns=[user.c.id])] for fkey in fkeys: fkey.create(engine=migrate_engine) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/manage.py0000664000567000056710000000016412701406223027221 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python from migrate.versioning.shell import main if __name__ == '__main__': main(debug='False') ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/__init__.py0000664000567000056710000000000012701406223027515 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg0000664000567000056710000000231612701406223027531 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=ceilometer # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] # When creating new change scripts, Migrate will stamp the new script with # a version number. By default this is latest_version + 1. You can set this # to 'true' to tell Migrate to use the UTC timestamp instead. use_timestamp_numbering=False ceilometer-6.0.0/ceilometer/storage/sqlalchemy/utils.py0000664000567000056710000001133712701406223024460 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import operator import six from sqlalchemy import and_ from sqlalchemy import asc from sqlalchemy import desc from sqlalchemy import not_ from sqlalchemy import or_ from sqlalchemy.orm import aliased import ceilometer from ceilometer.storage.sqlalchemy import models META_TYPE_MAP = {bool: models.MetaBool, str: models.MetaText, six.text_type: models.MetaText, type(None): models.MetaText, int: models.MetaBigInt, float: models.MetaFloat} if six.PY2: META_TYPE_MAP[long] = models.MetaBigInt class QueryTransformer(object): operators = {"=": operator.eq, "<": operator.lt, ">": operator.gt, "<=": operator.le, "=<": operator.le, ">=": operator.ge, "=>": operator.ge, "!=": operator.ne, "in": lambda field_name, values: field_name.in_(values), "=~": lambda field, value: field.op("regexp")(value)} # operators which are differs for different dialects dialect_operators = {'postgresql': {'=~': (lambda field, value: field.op("~")(value))}} complex_operators = {"or": or_, "and": and_, "not": not_} ordering_functions = {"asc": asc, "desc": desc} def __init__(self, table, query, dialect='mysql'): self.table = table self.query = query self.dialect_name = dialect def _get_operator(self, op): return (self.dialect_operators.get(self.dialect_name, {}).get(op) or self.operators[op]) def _handle_complex_op(self, complex_op, nodes): op = self.complex_operators[complex_op] if op == not_: nodes = [nodes] element_list = [] for node in nodes: element = self._transform(node) element_list.append(element) return op(*element_list) def _handle_simple_op(self, simple_op, nodes): op = self._get_operator(simple_op) field_name, value = list(nodes.items())[0] if field_name.startswith('resource_metadata.'): return self._handle_metadata(op, field_name, value) else: return op(getattr(self.table, field_name), value) def _handle_metadata(self, op, field_name, value): if op == self.operators["in"]: raise ceilometer.NotImplementedError('Metadata query with in ' 'operator is not implemented') field_name = field_name[len('resource_metadata.'):] meta_table = META_TYPE_MAP[type(value)] meta_alias = aliased(meta_table) on_clause = and_(self.table.internal_id == meta_alias.id, meta_alias.meta_key == field_name) # outer join is needed to support metaquery # with or operator on non existent metadata field # see: test_query_non_existing_metadata_with_result # test case. self.query = self.query.outerjoin(meta_alias, on_clause) return op(meta_alias.value, value) def _transform(self, sub_tree): operator, nodes = list(sub_tree.items())[0] if operator in self.complex_operators: return self._handle_complex_op(operator, nodes) else: return self._handle_simple_op(operator, nodes) def apply_filter(self, expression_tree): condition = self._transform(expression_tree) self.query = self.query.filter(condition) def apply_options(self, orderby, limit): self._apply_order_by(orderby) if limit is not None: self.query = self.query.limit(limit) def _apply_order_by(self, orderby): if orderby is not None: for field in orderby: attr, order = list(field.items())[0] ordering_function = self.ordering_functions[order] self.query = self.query.order_by(ordering_function( getattr(self.table, attr))) else: self.query = self.query.order_by(desc(self.table.timestamp)) def get_query(self): return self.query ceilometer-6.0.0/ceilometer/storage/sqlalchemy/__init__.py0000664000567000056710000000000012701406223025040 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/storage/sqlalchemy/models.py0000664000567000056710000002452312701406223024604 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for Ceilometer data. """ import hashlib import json from oslo_utils import timeutils import six from sqlalchemy import (Column, Integer, String, ForeignKey, Index, UniqueConstraint, BigInteger) from sqlalchemy import event from sqlalchemy import Float, Boolean, Text, DateTime from sqlalchemy.dialects.mysql import DECIMAL from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import deferred from sqlalchemy.orm import relationship from sqlalchemy.types import TypeDecorator from ceilometer import utils class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string.""" impl = String @staticmethod def process_bind_param(value, dialect): if value is not None: value = json.dumps(value) return value @staticmethod def process_result_value(value, dialect): if value is not None: value = json.loads(value) return value class PreciseTimestamp(TypeDecorator): """Represents a timestamp precise to the microsecond.""" impl = DateTime def load_dialect_impl(self, dialect): if dialect.name == 'mysql': return dialect.type_descriptor(DECIMAL(precision=20, scale=6, asdecimal=True)) return self.impl @staticmethod def process_bind_param(value, dialect): if value is None: return value elif dialect.name == 'mysql': return utils.dt_to_decimal(value) return value @staticmethod def process_result_value(value, dialect): if value is None: return value elif dialect.name == 'mysql': return utils.decimal_to_dt(value) return value class CeilometerBase(object): """Base class for Ceilometer Models.""" __table_args__ = {'mysql_charset': "utf8", 'mysql_engine': "InnoDB"} __table_initialized__ = False def __setitem__(self, key, value): setattr(self, key, value) def __getitem__(self, key): return getattr(self, key) def update(self, values): """Make the model object behave like a dict.""" for k, v in six.iteritems(values): setattr(self, k, v) Base = declarative_base(cls=CeilometerBase) class MetaText(Base): """Metering text metadata.""" __tablename__ = 'metadata_text' __table_args__ = ( Index('ix_meta_text_key', 'meta_key'), ) id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) meta_key = Column(String(255), primary_key=True) value = Column(Text) class MetaBool(Base): """Metering boolean metadata.""" __tablename__ = 'metadata_bool' __table_args__ = ( Index('ix_meta_bool_key', 'meta_key'), ) id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) meta_key = Column(String(255), primary_key=True) value = Column(Boolean) class MetaBigInt(Base): """Metering integer metadata.""" __tablename__ = 'metadata_int' __table_args__ = ( Index('ix_meta_int_key', 'meta_key'), ) id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) meta_key = Column(String(255), primary_key=True) value = Column(BigInteger, default=False) class MetaFloat(Base): """Metering float metadata.""" __tablename__ = 'metadata_float' __table_args__ = ( Index('ix_meta_float_key', 'meta_key'), ) id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) meta_key = Column(String(255), primary_key=True) value = Column(Float(53), default=False) class Meter(Base): """Meter definition data.""" __tablename__ = 'meter' __table_args__ = ( UniqueConstraint('name', 'type', 'unit', name='def_unique'), Index('ix_meter_name', 'name'), ) id = Column(Integer, primary_key=True) name = Column(String(255), nullable=False) type = Column(String(255)) unit = Column(String(255)) samples = relationship("Sample", backref="meter") class Resource(Base): """Resource data.""" __tablename__ = 'resource' __table_args__ = ( # TODO(gordc): this should exist but the attribute values we set # for user/project/source/resource id's are too large # for a uuid. # UniqueConstraint('resource_id', 'user_id', 'project_id', # 'source_id', 'metadata_hash', # name='res_def_unique'), Index('ix_resource_resource_id', 'resource_id'), Index('ix_resource_metadata_hash', 'metadata_hash'), ) internal_id = Column(Integer, primary_key=True) user_id = Column(String(255)) project_id = Column(String(255)) source_id = Column(String(255)) resource_id = Column(String(255), nullable=False) resource_metadata = deferred(Column(JSONEncodedDict())) metadata_hash = deferred(Column(String(32))) samples = relationship("Sample", backref="resource") meta_text = relationship("MetaText", backref="resource", cascade="all, delete-orphan") meta_float = relationship("MetaFloat", backref="resource", cascade="all, delete-orphan") meta_int = relationship("MetaBigInt", backref="resource", cascade="all, delete-orphan") meta_bool = relationship("MetaBool", backref="resource", cascade="all, delete-orphan") @event.listens_for(Resource, "before_insert") def before_insert(mapper, connection, target): metadata = json.dumps(target.resource_metadata, sort_keys=True) target.metadata_hash = hashlib.md5(metadata).hexdigest() class Sample(Base): """Metering data.""" __tablename__ = 'sample' __table_args__ = ( Index('ix_sample_timestamp', 'timestamp'), Index('ix_sample_resource_id', 'resource_id'), Index('ix_sample_meter_id', 'meter_id'), Index('ix_sample_meter_id_resource_id', 'meter_id', 'resource_id') ) id = Column(Integer, primary_key=True) meter_id = Column(Integer, ForeignKey('meter.id')) resource_id = Column(Integer, ForeignKey('resource.internal_id')) volume = Column(Float(53)) timestamp = Column(PreciseTimestamp(), default=lambda: timeutils.utcnow()) recorded_at = Column(PreciseTimestamp(), default=lambda: timeutils.utcnow()) message_signature = Column(String(64)) message_id = Column(String(128)) class FullSample(object): """A fake model for query samples.""" id = Sample.id timestamp = Sample.timestamp message_id = Sample.message_id message_signature = Sample.message_signature recorded_at = Sample.recorded_at counter_name = Meter.name counter_type = Meter.type counter_unit = Meter.unit counter_volume = Sample.volume resource_id = Resource.resource_id source_id = Resource.source_id user_id = Resource.user_id project_id = Resource.project_id resource_metadata = Resource.resource_metadata internal_id = Resource.internal_id class EventType(Base): """Types of event records.""" __tablename__ = 'event_type' id = Column(Integer, primary_key=True) desc = Column(String(255), unique=True) def __init__(self, event_type): self.desc = event_type def __repr__(self): return "" % self.desc class Event(Base): __tablename__ = 'event' __table_args__ = ( Index('ix_event_message_id', 'message_id'), Index('ix_event_type_id', 'event_type_id'), Index('ix_event_generated', 'generated') ) id = Column(Integer, primary_key=True) message_id = Column(String(50), unique=True) generated = Column(PreciseTimestamp()) raw = deferred(Column(JSONEncodedDict())) event_type_id = Column(Integer, ForeignKey('event_type.id')) event_type = relationship("EventType", backref='events') def __init__(self, message_id, event_type, generated, raw): self.message_id = message_id self.event_type = event_type self.generated = generated self.raw = raw def __repr__(self): return "" % (self.id, self.message_id, self.event_type, self.generated) class TraitText(Base): """Event text traits.""" __tablename__ = 'trait_text' __table_args__ = ( Index('ix_trait_text_event_id_key', 'event_id', 'key'), ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(String(255)) class TraitInt(Base): """Event integer traits.""" __tablename__ = 'trait_int' __table_args__ = ( Index('ix_trait_int_event_id_key', 'event_id', 'key'), ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(Integer) class TraitFloat(Base): """Event float traits.""" __tablename__ = 'trait_float' __table_args__ = ( Index('ix_trait_float_event_id_key', 'event_id', 'key'), ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(Float(53)) class TraitDatetime(Base): """Event datetime traits.""" __tablename__ = 'trait_datetime' __table_args__ = ( Index('ix_trait_datetime_event_id_key', 'event_id', 'key'), ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(PreciseTimestamp()) ceilometer-6.0.0/ceilometer/storage/sqlalchemy/migration.py0000664000567000056710000000172312701406223025307 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def paged(query, size=1000): """Page query results :param query: the SQLAlchemy query to execute :param size: the max page size return: generator with query data """ offset = 0 while True: page = query.offset(offset).limit(size).execute() if page.rowcount <= 0: # There are no more rows break for row in page: yield row offset += size ceilometer-6.0.0/ceilometer/storage/mongo/0000775000567000056710000000000012701406364021724 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/storage/mongo/utils.py0000664000567000056710000005657212701406223023447 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Authors: Ildiko Vancsa # Balazs Gibizer # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common functions for MongoDB and DB2 backends """ import datetime import time import weakref from oslo_config import cfg from oslo_log import log from oslo_utils import netutils import pymongo import pymongo.errors import six from six.moves.urllib import parse from ceilometer.i18n import _, _LI ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS = 86 LOG = log.getLogger(__name__) EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3, 'datetime': 4} OP_SIGN = {'lt': '$lt', 'le': '$lte', 'ne': '$ne', 'gt': '$gt', 'ge': '$gte'} MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4] COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6] FINALIZE_FLOAT_LAMBDA = lambda result, param=None: float(result) FINALIZE_INT_LAMBDA = lambda result, param=None: int(result) CARDINALITY_VALIDATION = (lambda name, param: param in ['resource_id', 'user_id', 'project_id', 'source']) def make_timestamp_range(start, end, start_timestamp_op=None, end_timestamp_op=None): """Create the query document to find timestamps within that range. This is done by given two possible datetimes and their operations. By default, using $gte for the lower bound and $lt for the upper bound. """ ts_range = {} if start: if start_timestamp_op == 'gt': start_timestamp_op = '$gt' else: start_timestamp_op = '$gte' ts_range[start_timestamp_op] = start if end: if end_timestamp_op == 'le': end_timestamp_op = '$lte' else: end_timestamp_op = '$lt' ts_range[end_timestamp_op] = end return ts_range def make_events_query_from_filter(event_filter): """Return start and stop row for filtering and a query. Query is based on the selected parameter. :param event_filter: storage.EventFilter object. """ query = {} q_list = [] ts_range = make_timestamp_range(event_filter.start_timestamp, event_filter.end_timestamp) if ts_range: q_list.append({'timestamp': ts_range}) if event_filter.event_type: q_list.append({'event_type': event_filter.event_type}) if event_filter.message_id: q_list.append({'_id': event_filter.message_id}) if event_filter.traits_filter: for trait_filter in event_filter.traits_filter: op = trait_filter.pop('op', 'eq') dict_query = {} for k, v in six.iteritems(trait_filter): if v is not None: # All parameters in EventFilter['traits'] are optional, so # we need to check if they are in the query or no. if k == 'key': dict_query.setdefault('trait_name', v) elif k in ['string', 'integer', 'datetime', 'float']: dict_query.setdefault('trait_type', EVENT_TRAIT_TYPES[k]) dict_query.setdefault('trait_value', v if op == 'eq' else {OP_SIGN[op]: v}) dict_query = {'$elemMatch': dict_query} q_list.append({'traits': dict_query}) if event_filter.admin_proj: q_list.append({'$or': [ {'traits': {'$not': {'$elemMatch': {'trait_name': 'project_id'}}}}, {'traits': { '$elemMatch': {'trait_name': 'project_id', 'trait_value': event_filter.admin_proj}}}]}) if q_list: query = {'$and': q_list} return query def make_query_from_filter(sample_filter, require_meter=True): """Return a query dictionary based on the settings in the filter. :param sample_filter: SampleFilter instance :param require_meter: If true and the filter does not have a meter, raise an error. """ q = {} if sample_filter.user: q['user_id'] = sample_filter.user if sample_filter.project: q['project_id'] = sample_filter.project if sample_filter.meter: q['counter_name'] = sample_filter.meter elif require_meter: raise RuntimeError('Missing required meter specifier') ts_range = make_timestamp_range(sample_filter.start_timestamp, sample_filter.end_timestamp, sample_filter.start_timestamp_op, sample_filter.end_timestamp_op) if ts_range: q['timestamp'] = ts_range if sample_filter.resource: q['resource_id'] = sample_filter.resource if sample_filter.source: q['source'] = sample_filter.source if sample_filter.message_id: q['message_id'] = sample_filter.message_id # so the samples call metadata resource_metadata, so we convert # to that. q.update(dict( ('resource_%s' % k, v) for (k, v) in six.iteritems( improve_keys(sample_filter.metaquery, metaquery=True)))) return q def quote_key(key, reverse=False): """Prepare key for storage data in MongoDB. :param key: key that should be quoted :param reverse: boolean, True --- if we need a reverse order of the keys parts :return: iter of quoted part of the key """ r = -1 if reverse else 1 for k in key.split('.')[::r]: if k.startswith('$'): k = parse.quote(k) yield k def improve_keys(data, metaquery=False): """Improves keys in dict if they contained '.' or started with '$'. :param data: is a dictionary where keys need to be checked and improved :param metaquery: boolean, if True dots are not escaped from the keys :return: improved dictionary if keys contained dots or started with '$': {'a.b': 'v'} -> {'a': {'b': 'v'}} {'$ab': 'v'} -> {'%24ab': 'v'} """ if not isinstance(data, dict): return data if metaquery: for key in six.iterkeys(data): if '.$' in key: key_list = [] for k in quote_key(key): key_list.append(k) new_key = '.'.join(key_list) data[new_key] = data.pop(key) else: for key, value in data.items(): if isinstance(value, dict): improve_keys(value) if '.' in key: new_dict = {} for k in quote_key(key, reverse=True): new = {} new[k] = new_dict if new_dict else data.pop(key) new_dict = new data.update(new_dict) else: if key.startswith('$'): new_key = parse.quote(key) data[new_key] = data.pop(key) return data def unquote_keys(data): """Restores initial view of 'quoted' keys in dictionary data :param data: is a dictionary :return: data with restored keys if they were 'quoted'. """ if isinstance(data, dict): for key, value in data.items(): if isinstance(value, dict): unquote_keys(value) if key.startswith('%24'): k = parse.unquote(key) data[k] = data.pop(key) return data class ConnectionPool(object): def __init__(self): self._pool = {} def connect(self, url): connection_options = pymongo.uri_parser.parse_uri(url) del connection_options['database'] del connection_options['username'] del connection_options['password'] del connection_options['collection'] pool_key = tuple(connection_options) if pool_key in self._pool: client = self._pool.get(pool_key)() if client: return client splitted_url = netutils.urlsplit(url) log_data = {'db': splitted_url.scheme, 'nodelist': connection_options['nodelist']} LOG.info(_LI('Connecting to %(db)s on %(nodelist)s') % log_data) client = self._mongo_connect(url) self._pool[pool_key] = weakref.ref(client) return client @staticmethod def _mongo_connect(url): try: return MongoProxy(pymongo.MongoClient(url)) except pymongo.errors.ConnectionFailure as e: LOG.warning(_('Unable to connect to the database server: ' '%(errmsg)s.') % {'errmsg': e}) raise class QueryTransformer(object): operators = {"<": "$lt", ">": "$gt", "<=": "$lte", "=<": "$lte", ">=": "$gte", "=>": "$gte", "!=": "$ne", "in": "$in", "=~": "$regex"} complex_operators = {"or": "$or", "and": "$and"} ordering_functions = {"asc": pymongo.ASCENDING, "desc": pymongo.DESCENDING} def transform_orderby(self, orderby): orderby_filter = [] for field in orderby: field_name = list(field.keys())[0] ordering = self.ordering_functions[list(field.values())[0]] orderby_filter.append((field_name, ordering)) return orderby_filter @staticmethod def _move_negation_to_leaf(condition): """Moves every not operator to the leafs. Moving is going by applying the De Morgan rules and annihilating double negations. """ def _apply_de_morgan(tree, negated_subtree, negated_op): if negated_op == "and": new_op = "or" else: new_op = "and" tree[new_op] = [{"not": child} for child in negated_subtree[negated_op]] del tree["not"] def transform(subtree): op = list(subtree.keys())[0] if op in ["and", "or"]: [transform(child) for child in subtree[op]] elif op == "not": negated_tree = subtree[op] negated_op = list(negated_tree.keys())[0] if negated_op == "and": _apply_de_morgan(subtree, negated_tree, negated_op) transform(subtree) elif negated_op == "or": _apply_de_morgan(subtree, negated_tree, negated_op) transform(subtree) elif negated_op == "not": # two consecutive not annihilates themselves value = list(negated_tree.values())[0] new_op = list(value.keys())[0] subtree[new_op] = negated_tree[negated_op][new_op] del subtree["not"] transform(subtree) transform(condition) def transform_filter(self, condition): # in Mongo not operator can only be applied to # simple expressions so we have to move every # not operator to the leafs of the expression tree self._move_negation_to_leaf(condition) return self._process_json_tree(condition) def _handle_complex_op(self, complex_op, nodes): element_list = [] for node in nodes: element = self._process_json_tree(node) element_list.append(element) complex_operator = self.complex_operators[complex_op] op = {complex_operator: element_list} return op def _handle_not_op(self, negated_tree): # assumes that not is moved to the leaf already # so we are next to a leaf negated_op = list(negated_tree.keys())[0] negated_field = list(negated_tree[negated_op].keys())[0] value = negated_tree[negated_op][negated_field] if negated_op == "=": return {negated_field: {"$ne": value}} elif negated_op == "!=": return {negated_field: value} else: return {negated_field: {"$not": {self.operators[negated_op]: value}}} def _handle_simple_op(self, simple_op, nodes): field_name = list(nodes.keys())[0] field_value = list(nodes.values())[0] # no operator for equal in Mongo if simple_op == "=": op = {field_name: field_value} return op operator = self.operators[simple_op] op = {field_name: {operator: field_value}} return op def _process_json_tree(self, condition_tree): operator_node = list(condition_tree.keys())[0] nodes = list(condition_tree.values())[0] if operator_node in self.complex_operators: return self._handle_complex_op(operator_node, nodes) if operator_node == "not": negated_tree = condition_tree[operator_node] return self._handle_not_op(negated_tree) return self._handle_simple_op(operator_node, nodes) def safe_mongo_call(call): def closure(*args, **kwargs): # NOTE(idegtiarov) options max_retries and retry_interval have been # registered in storage.__init__ in oslo_db.options.set_defaults # default values for both options are 10. max_retries = cfg.CONF.database.max_retries retry_interval = cfg.CONF.database.retry_interval attempts = 0 while True: try: return call(*args, **kwargs) except pymongo.errors.AutoReconnect as err: if 0 <= max_retries <= attempts: LOG.error(_('Unable to reconnect to the primary mongodb ' 'after %(retries)d retries. Giving up.') % {'retries': max_retries}) raise LOG.warning(_('Unable to reconnect to the primary ' 'mongodb: %(errmsg)s. Trying again in ' '%(retry_interval)d seconds.') % {'errmsg': err, 'retry_interval': retry_interval}) attempts += 1 time.sleep(retry_interval) return closure class MongoConn(object): def __init__(self, method): self.method = method @safe_mongo_call def __call__(self, *args, **kwargs): return self.method(*args, **kwargs) MONGO_METHODS = set([typ for typ in dir(pymongo.collection.Collection) if not typ.startswith('_')]) MONGO_METHODS.update(set([typ for typ in dir(pymongo.MongoClient) if not typ.startswith('_')])) MONGO_METHODS.update(set([typ for typ in dir(pymongo) if not typ.startswith('_')])) class MongoProxy(object): def __init__(self, conn): self.conn = conn def __getitem__(self, item): """Create and return proxy around the method in the connection. :param item: name of the connection """ return MongoProxy(self.conn[item]) def find(self, *args, **kwargs): # We need this modifying method to return a CursorProxy object so that # we can handle the Cursor next function to catch the AutoReconnect # exception. return CursorProxy(self.conn.find(*args, **kwargs)) def create_index(self, keys, name=None, *args, **kwargs): try: self.conn.create_index(keys, name=name, *args, **kwargs) except pymongo.errors.OperationFailure as e: if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS: LOG.info(_LI("Index %s will be recreate.") % name) self._recreate_index(keys, name, *args, **kwargs) @safe_mongo_call def _recreate_index(self, keys, name, *args, **kwargs): self.conn.drop_index(name) self.conn.create_index(keys, name=name, *args, **kwargs) def __getattr__(self, item): """Wrap MongoDB connection. If item is the name of an executable method, for example find or insert, wrap this method in the MongoConn. Else wrap getting attribute with MongoProxy. """ if item in ('name', 'database'): return getattr(self.conn, item) if item in MONGO_METHODS: return MongoConn(getattr(self.conn, item)) return MongoProxy(getattr(self.conn, item)) def __call__(self, *args, **kwargs): return self.conn(*args, **kwargs) class CursorProxy(pymongo.cursor.Cursor): def __init__(self, cursor): self.cursor = cursor def __getitem__(self, item): return self.cursor[item] @safe_mongo_call def next(self): """Wrap Cursor next method. This method will be executed before each Cursor next method call. """ try: save_cursor = self.cursor.clone() return self.cursor.next() except pymongo.errors.AutoReconnect: self.cursor = save_cursor raise def __getattr__(self, item): return getattr(self.cursor, item) class AggregationFields(object): def __init__(self, version, group, project, finalize=None, parametrized=False, validate=None): self._finalize = finalize or FINALIZE_FLOAT_LAMBDA self.group = lambda *args: group(*args) if parametrized else group self.project = (lambda *args: project(*args) if parametrized else project) self.version = version self.validate = validate or (lambda name, param: True) def finalize(self, name, data, param=None): field = ("%s" % name) + ("/%s" % param if param else "") return {field: (self._finalize(data.get(field)) if self._finalize else data.get(field))} class Aggregation(object): def __init__(self, name, aggregation_fields): self.name = name aggregation_fields = (aggregation_fields if isinstance(aggregation_fields, list) else [aggregation_fields]) self.aggregation_fields = sorted(aggregation_fields, key=lambda af: getattr(af, "version"), reverse=True) def _get_compatible_aggregation_field(self, version_array): if version_array: version_array = version_array[0:2] else: version_array = MINIMUM_COMPATIBLE_MONGODB_VERSION for aggregation_field in self.aggregation_fields: if version_array >= aggregation_field.version: return aggregation_field def group(self, param=None, version_array=None): af = self._get_compatible_aggregation_field(version_array) return af.group(param) def project(self, param=None, version_array=None): af = self._get_compatible_aggregation_field(version_array) return af.project(param) def finalize(self, data, param=None, version_array=None): af = self._get_compatible_aggregation_field(version_array) return af.finalize(self.name, data, param) def validate(self, param=None, version_array=None): af = self._get_compatible_aggregation_field(version_array) return af.validate(self.name, param) SUM_AGGREGATION = Aggregation( "sum", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"sum": {"$sum": "$counter_volume"}}, {"sum": "$sum"}, )) AVG_AGGREGATION = Aggregation( "avg", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"avg": {"$avg": "$counter_volume"}}, {"avg": "$avg"}, )) MIN_AGGREGATION = Aggregation( "min", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"min": {"$min": "$counter_volume"}}, {"min": "$min"}, )) MAX_AGGREGATION = Aggregation( "max", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"max": {"$max": "$counter_volume"}}, {"max": "$max"}, )) COUNT_AGGREGATION = Aggregation( "count", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"count": {"$sum": 1}}, {"count": "$count"}, FINALIZE_INT_LAMBDA)) STDDEV_AGGREGATION = Aggregation( "stddev", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, {"std_square": { "$sum": { "$multiply": ["$counter_volume", "$counter_volume"] }}, "std_count": {"$sum": 1}, "std_sum": {"$sum": "$counter_volume"}}, {"stddev": { "count": "$std_count", "sum": "$std_sum", "square_sum": "$std_square"}}, lambda stddev: ((stddev['square_sum'] * stddev['count'] - stddev["sum"] ** 2) ** 0.5 / stddev['count']))) CARDINALITY_AGGREGATION = Aggregation( "cardinality", # $cond operator available only in MongoDB 2.6+ [AggregationFields(COMPLETE_AGGREGATE_COMPATIBLE_VERSION, lambda field: ({"cardinality/%s" % field: {"$addToSet": "$%s" % field}}), lambda field: { "cardinality/%s" % field: { "$cond": [ {"$eq": ["$cardinality/%s" % field, None]}, 0, {"$size": "$cardinality/%s" % field}] }}, validate=CARDINALITY_VALIDATION, parametrized=True), AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, lambda field: ({"cardinality/%s" % field: {"$addToSet": "$%s" % field}}), lambda field: ({"cardinality/%s" % field: "$cardinality/%s" % field}), finalize=len, validate=CARDINALITY_VALIDATION, parametrized=True)] ) def to_unix_timestamp(timestamp): if isinstance(timestamp, datetime.datetime): return int(time.mktime(timestamp.timetuple())) return timestamp def from_unix_timestamp(timestamp): if (isinstance(timestamp, six.integer_types) or isinstance(timestamp, float)): return datetime.datetime.fromtimestamp(timestamp) return timestamp ceilometer-6.0.0/ceilometer/storage/mongo/__init__.py0000664000567000056710000000000012701406223024015 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/storage/base.py0000664000567000056710000002140312701406224022064 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for storage engines """ import datetime import inspect import math from oslo_utils import timeutils import six from six import moves import ceilometer def iter_period(start, end, period): """Split a time from start to end in periods of a number of seconds. This function yields the (start, end) time for each period composing the time passed as argument. :param start: When the period set start. :param end: When the period end starts. :param period: The duration of the period. """ period_start = start increment = datetime.timedelta(seconds=period) for i in moves.xrange(int(math.ceil( timeutils.delta_seconds(start, end) / float(period)))): next_start = period_start + increment yield (period_start, next_start) period_start = next_start def _handle_sort_key(model_name, sort_key=None): """Generate sort keys according to the passed in sort key from user. :param model_name: Database model name be query.(meter, etc.) :param sort_key: sort key passed from user. return: sort keys list """ sort_keys_extra = {'meter': ['user_id', 'project_id'], 'resource': ['user_id', 'project_id', 'timestamp'], } sort_keys = sort_keys_extra[model_name] if not sort_key: return sort_keys # NOTE(Fengqian): We need to put the sort key from user # in the first place of sort keys list. try: sort_keys.remove(sort_key) except ValueError: pass finally: sort_keys.insert(0, sort_key) return sort_keys class MultipleResultsFound(Exception): pass class NoResultFound(Exception): pass class Model(object): """Base class for storage API models.""" def __init__(self, **kwds): self.fields = list(kwds) for k, v in six.iteritems(kwds): setattr(self, k, v) def as_dict(self): d = {} for f in self.fields: v = getattr(self, f) if isinstance(v, Model): v = v.as_dict() elif isinstance(v, list) and v and isinstance(v[0], Model): v = [sub.as_dict() for sub in v] d[f] = v return d def __eq__(self, other): return self.as_dict() == other.as_dict() @classmethod def get_field_names(cls): fields = inspect.getargspec(cls.__init__)[0] return set(fields) - set(["self"]) class Connection(object): """Base class for storage system connections.""" # A dictionary representing the capabilities of this driver. CAPABILITIES = { 'meters': {'query': {'simple': False, 'metadata': False, 'complex': False}}, 'resources': {'query': {'simple': False, 'metadata': False, 'complex': False}}, 'samples': {'query': {'simple': False, 'metadata': False, 'complex': False}}, 'statistics': {'groupby': False, 'query': {'simple': False, 'metadata': False, 'complex': False}, 'aggregation': {'standard': False, 'selectable': { 'max': False, 'min': False, 'sum': False, 'avg': False, 'count': False, 'stddev': False, 'cardinality': False}} }, } STORAGE_CAPABILITIES = { 'storage': {'production_ready': False}, } def __init__(self, url): pass @staticmethod def upgrade(): """Migrate the database to `version` or the most recent version.""" @staticmethod def record_metering_data(data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter All timestamps must be naive utc datetime object. """ raise ceilometer.NotImplementedError( 'Recording metering data is not implemented') @staticmethod def clear_expired_metering_data(ttl): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. """ raise ceilometer.NotImplementedError( 'Clearing samples not implemented') @staticmethod def get_resources(user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of models.Resource instances. Iterable items containing resource information. :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional timestamp start range operation. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional timestamp end range operation. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ raise ceilometer.NotImplementedError('Resources not implemented') @staticmethod def get_meters(user=None, project=None, resource=None, source=None, metaquery=None, limit=None, unique=False): """Return an iterable of model.Meter instances. Iterable items containing meter information. :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional resource filter. :param source: Optional source filter. :param metaquery: Optional dict with metadata to match on. :param limit: Maximum number of results to return. :param unique: If set to true, return only unique meter information. """ raise ceilometer.NotImplementedError('Meters not implemented') @staticmethod def get_samples(sample_filter, limit=None): """Return an iterable of model.Sample instances. :param sample_filter: Filter. :param limit: Maximum number of results to return. """ raise ceilometer.NotImplementedError('Samples not implemented') @staticmethod def get_meter_statistics(sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of model.Statistics instances. The filter must have a meter value set. """ raise ceilometer.NotImplementedError('Statistics not implemented') @staticmethod def clear(): """Clear database.""" @staticmethod def query_samples(filter_expr=None, orderby=None, limit=None): """Return an iterable of model.Sample objects. :param filter_expr: Filter expression for query. :param orderby: List of field name and direction pairs for order by. :param limit: Maximum number of results to return. """ raise ceilometer.NotImplementedError('Complex query for samples ' 'is not implemented.') @classmethod def get_capabilities(cls): """Return an dictionary with the capabilities of each driver.""" return cls.CAPABILITIES @classmethod def get_storage_capabilities(cls): """Return a dictionary representing the performance capabilities. This is needed to evaluate the performance of each driver. """ return cls.STORAGE_CAPABILITIES ceilometer-6.0.0/ceilometer/storage/impl_db2.py0000664000567000056710000004302212701406224022643 0ustar jenkinsjenkins00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """DB2 storage backend """ from __future__ import division import copy import datetime import itertools import sys import bson.code import bson.objectid from oslo_config import cfg from oslo_utils import timeutils import pymongo import six import ceilometer from ceilometer import storage from ceilometer.storage import base from ceilometer.storage import models from ceilometer.storage.mongo import utils as pymongo_utils from ceilometer.storage import pymongo_base from ceilometer import utils AVAILABLE_CAPABILITIES = { 'resources': {'query': {'simple': True, 'metadata': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True}, 'aggregation': {'standard': True}} } class Connection(pymongo_base.Connection): """The db2 storage for Ceilometer Collections:: - meter - the raw incoming data - resource - the metadata for resources - { _id: uuid of resource, metadata: metadata dictionaries user_id: uuid project_id: uuid meter: [ array of {counter_name: string, counter_type: string, counter_unit: string} ] } """ CAPABILITIES = utils.update_nested(pymongo_base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) CONNECTION_POOL = pymongo_utils.ConnectionPool() GROUP = {'_id': '$counter_name', 'unit': {'$min': '$counter_unit'}, 'min': {'$min': '$counter_volume'}, 'max': {'$max': '$counter_volume'}, 'sum': {'$sum': '$counter_volume'}, 'count': {'$sum': 1}, 'duration_start': {'$min': '$timestamp'}, 'duration_end': {'$max': '$timestamp'}, } PROJECT = {'_id': 0, 'unit': 1, 'min': 1, 'max': 1, 'sum': 1, 'count': 1, 'avg': {'$divide': ['$sum', '$count']}, 'duration_start': 1, 'duration_end': 1, } SORT_OPERATION_MAP = {'desc': pymongo.DESCENDING, 'asc': pymongo.ASCENDING} SECONDS_IN_A_DAY = 86400 def __init__(self, url): # Since we are using pymongo, even though we are connecting to DB2 # we still have to make sure that the scheme which used to distinguish # db2 driver from mongodb driver be replaced so that pymongo will not # produce an exception on the scheme. url = url.replace('db2:', 'mongodb:', 1) self.conn = self.CONNECTION_POOL.connect(url) # Require MongoDB 2.2 to use aggregate(), since we are using mongodb # as backend for test, the following code is necessary to make sure # that the test wont try aggregate on older mongodb during the test. # For db2, the versionArray won't be part of the server_info, so there # will not be exception when real db2 gets used as backend. server_info = self.conn.server_info() if server_info.get('sysInfo'): self._using_mongodb = True else: self._using_mongodb = False if self._using_mongodb and server_info.get('versionArray') < [2, 2]: raise storage.StorageBadVersion("Need at least MongoDB 2.2") connection_options = pymongo.uri_parser.parse_uri(url) self.db = getattr(self.conn, connection_options['database']) if connection_options.get('username'): self.db.authenticate(connection_options['username'], connection_options['password']) self.upgrade() @classmethod def _build_sort_instructions(cls, sort_keys=None, sort_dir='desc'): """Returns a sort_instruction. Sort instructions are used in the query to determine what attributes to sort on and what direction to use. :param q: The query dict passed in. :param sort_keys: array of attributes by which results be sorted. :param sort_dir: direction in which results be sorted (asc, desc). :return: sort parameters """ sort_keys = sort_keys or [] sort_instructions = [] _sort_dir = cls.SORT_OPERATION_MAP.get( sort_dir, cls.SORT_OPERATION_MAP['desc']) for _sort_key in sort_keys: _instruction = (_sort_key, _sort_dir) sort_instructions.append(_instruction) return sort_instructions def _generate_random_str(self, str_len): init_str = str(bson.objectid.ObjectId()) objectid_len = len(init_str) if str_len >= objectid_len: init_str = (init_str * int(str_len/objectid_len) + 'x' * int(str_len % objectid_len)) return init_str def upgrade(self, version=None): # create collection if not present if 'resource' not in self.db.conn.collection_names(): self.db.conn.create_collection('resource') if 'meter' not in self.db.conn.collection_names(): self.db.conn.create_collection('meter') # Establish indexes # # We need variations for user_id vs. project_id because of the # way the indexes are stored in b-trees. The user_id and # project_id values are usually mutually exclusive in the # queries, so the database won't take advantage of an index # including both. if self.db.resource.index_information() == {}: # Initializing a longer resource id to workaround DB2 nosql issue. # Longer resource id is required by compute node's resource as # their id is '_'. DB2 creates a VARCHAR(70) # for resource id when its length < 70. But DB2 can create a # VARCHAR(n) for the resource id which has n(n>70) characters. # Users can adjust 'db2nosql_resource_id_maxlen'(default is 512) # for their ENV. resource_id = self._generate_random_str( cfg.CONF.database.db2nosql_resource_id_maxlen) self.db.resource.insert_one({'_id': resource_id, 'no_key': resource_id}) meter_id = str(bson.objectid.ObjectId()) timestamp = timeutils.utcnow() self.db.meter.insert_one({'_id': meter_id, 'no_key': meter_id, 'timestamp': timestamp}) self.db.resource.create_index([ ('user_id', pymongo.ASCENDING), ('project_id', pymongo.ASCENDING), ('source', pymongo.ASCENDING)], name='resource_idx') self.db.meter.create_index([ ('resource_id', pymongo.ASCENDING), ('user_id', pymongo.ASCENDING), ('project_id', pymongo.ASCENDING), ('counter_name', pymongo.ASCENDING), ('timestamp', pymongo.ASCENDING), ('source', pymongo.ASCENDING)], name='meter_idx') self.db.meter.create_index([('timestamp', pymongo.DESCENDING)], name='timestamp_idx') self.db.resource.remove({'_id': resource_id}) self.db.meter.remove({'_id': meter_id}) def clear(self): # db2 does not support drop_database, remove all collections for col in ['resource', 'meter']: self.db[col].drop() # drop_database command does nothing on db2 database since this has # not been implemented. However calling this method is important for # removal of all the empty dbs created during the test runs since # test run is against mongodb on Jenkins self.conn.drop_database(self.db.name) self.conn.close() def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter """ # Record the updated resource metadata data = copy.deepcopy(data) data['resource_metadata'] = pymongo_utils.improve_keys( data.pop('resource_metadata')) self.db.resource.update_one( {'_id': data['resource_id']}, {'$set': {'project_id': data['project_id'], 'user_id': data['user_id'] or 'null', 'metadata': data['resource_metadata'], 'source': data['source'], }, '$addToSet': {'meter': {'counter_name': data['counter_name'], 'counter_type': data['counter_type'], 'counter_unit': data['counter_unit'], }, }, }, upsert=True, ) # Record the raw data for the meter. Use a copy so we do not # modify a data structure owned by our caller (the driver adds # a new key '_id'). record = copy.copy(data) record['recorded_at'] = timeutils.utcnow() # Make sure that the data does have field _id which db2 wont add # automatically. if record.get('_id') is None: record['_id'] = str(bson.objectid.ObjectId()) self.db.meter.insert_one(record) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ if limit == 0: return metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {} q = {} if user is not None: q['user_id'] = user if project is not None: q['project_id'] = project if source is not None: q['source'] = source if resource is not None: q['resource_id'] = resource # Add resource_ prefix so it matches the field in the db q.update(dict(('resource_' + k, v) for (k, v) in six.iteritems(metaquery))) if start_timestamp or end_timestamp: # Look for resources matching the above criteria and with # samples in the time range we care about, then change the # resource query to return just those resources by id. ts_range = pymongo_utils.make_timestamp_range(start_timestamp, end_timestamp, start_timestamp_op, end_timestamp_op) if ts_range: q['timestamp'] = ts_range sort_keys = base._handle_sort_key('resource', 'timestamp') sort_keys.insert(0, 'resource_id') sort_instructions = self._build_sort_instructions(sort_keys=sort_keys, sort_dir='desc') resource = lambda x: x['resource_id'] if limit is not None: meters = self.db.meter.find(q, sort=sort_instructions, limit=limit) else: meters = self.db.meter.find(q, sort=sort_instructions) for resource_id, r_meters in itertools.groupby(meters, key=resource): # Because we have to know first/last timestamp, and we need a full # list of references to the resource's meters, we need a tuple # here. r_meters = tuple(r_meters) latest_meter = r_meters[0] last_ts = latest_meter['timestamp'] first_ts = r_meters[-1]['timestamp'] yield models.Resource(resource_id=latest_meter['resource_id'], project_id=latest_meter['project_id'], first_sample_timestamp=first_ts, last_sample_timestamp=last_ts, source=latest_meter['source'], user_id=latest_meter['user_id'], metadata=pymongo_utils.unquote_keys( latest_meter['resource_metadata'])) def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of models.Statistics instance. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. """ if (groupby and set(groupby) - set(['user_id', 'project_id', 'resource_id', 'source'])): raise ceilometer.NotImplementedError( "Unable to group by these fields") if aggregate: raise ceilometer.NotImplementedError( 'Selectable aggregates not implemented') q = pymongo_utils.make_query_from_filter(sample_filter) if period: if sample_filter.start_timestamp: period_start = sample_filter.start_timestamp else: period_start = self.db.meter.find( limit=1, sort=[('timestamp', pymongo.ASCENDING)])[0]['timestamp'] if groupby: sort_keys = ['counter_name'] + groupby + ['timestamp'] else: sort_keys = ['counter_name', 'timestamp'] sort_instructions = self._build_sort_instructions(sort_keys=sort_keys, sort_dir='asc') meters = self.db.meter.find(q, sort=sort_instructions) def _group_key(meter): # the method to define a key for groupby call key = {} for y in sort_keys: if y == 'timestamp' and period: key[y] = (timeutils.delta_seconds(period_start, meter[y]) // period) elif y != 'timestamp': key[y] = meter[y] return key def _to_offset(periods): return {'days': (periods * period) // self.SECONDS_IN_A_DAY, 'seconds': (periods * period) % self.SECONDS_IN_A_DAY} for key, grouped_meters in itertools.groupby(meters, key=_group_key): stat = models.Statistics(unit=None, min=sys.maxsize, max=-sys.maxsize, avg=0, sum=0, count=0, period=0, period_start=0, period_end=0, duration=0, duration_start=0, duration_end=0, groupby=None) for meter in grouped_meters: stat.unit = meter.get('counter_unit', '') m_volume = meter.get('counter_volume') if stat.min > m_volume: stat.min = m_volume if stat.max < m_volume: stat.max = m_volume stat.sum += m_volume stat.count += 1 if stat.duration_start == 0: stat.duration_start = meter['timestamp'] stat.duration_end = meter['timestamp'] if groupby and not stat.groupby: stat.groupby = {} for group_key in groupby: stat.groupby[group_key] = meter[group_key] stat.duration = timeutils.delta_seconds(stat.duration_start, stat.duration_end) stat.avg = stat.sum / stat.count if period: stat.period = period periods = key.get('timestamp') stat.period_start = (period_start + datetime. timedelta(**(_to_offset(periods)))) stat.period_end = (period_start + datetime. timedelta(**(_to_offset(periods + 1)))) else: stat.period_start = stat.duration_start stat.period_end = stat.duration_end yield stat ceilometer-6.0.0/ceilometer/storage/impl_hbase.py0000664000567000056710000004500612701406224023262 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import operator import time from oslo_log import log from oslo_utils import timeutils import ceilometer from ceilometer.storage import base from ceilometer.storage.hbase import base as hbase_base from ceilometer.storage.hbase import migration as hbase_migration from ceilometer.storage.hbase import utils as hbase_utils from ceilometer.storage import models from ceilometer import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'meters': {'query': {'simple': True, 'metadata': True}}, 'resources': {'query': {'simple': True, 'metadata': True}}, 'samples': {'query': {'simple': True, 'metadata': True}}, 'statistics': {'query': {'simple': True, 'metadata': True}, 'aggregation': {'standard': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(hbase_base.Connection, base.Connection): """Put the metering data into a HBase database Collections: - meter (describes sample actually): - row-key: consists of reversed timestamp, meter and a message uuid for purposes of uniqueness - Column Families: f: contains the following qualifiers: - counter_name: - counter_type: - counter_unit: - counter_volume: - message: - message_id: - message_signature: - resource_metadata: raw metadata for corresponding resource of the meter - project_id: - resource_id: - user_id: - recorded_at: - flattened metadata with prefix r_metadata. e.g.:: f:r_metadata.display_name or f:r_metadata.tag - rts: - timestamp: - source for meter with prefix 's' - resource: - row_key: uuid of resource - Column Families: f: contains the following qualifiers: - resource_metadata: raw metadata for corresponding resource - project_id: - resource_id: - user_id: - flattened metadata with prefix r_metadata. e.g.:: f:r_metadata.display_name or f:r_metadata.tag - sources for all corresponding meters with prefix 's' - all meters with prefix 'm' for this resource in format: .. code-block:: python "%s:%s:%s:%s:%s" % (rts, source, counter_name, counter_type, counter_unit) """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) _memory_instance = None RESOURCE_TABLE = "resource" METER_TABLE = "meter" def __init__(self, url): super(Connection, self).__init__(url) def upgrade(self): tables = [self.RESOURCE_TABLE, self.METER_TABLE] column_families = {'f': dict(max_versions=1)} with self.conn_pool.connection() as conn: hbase_utils.create_tables(conn, tables, column_families) hbase_migration.migrate_tables(conn, tables) def clear(self): LOG.debug('Dropping HBase schema...') with self.conn_pool.connection() as conn: for table in [self.RESOURCE_TABLE, self.METER_TABLE]: try: conn.disable_table(table) except Exception: LOG.debug('Cannot disable table but ignoring error') try: conn.delete_table(table) except Exception: LOG.debug('Cannot delete table but ignoring error') def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter """ with self.conn_pool.connection() as conn: resource_table = conn.table(self.RESOURCE_TABLE) meter_table = conn.table(self.METER_TABLE) resource_metadata = data.get('resource_metadata', {}) # Determine the name of new meter rts = hbase_utils.timestamp(data['timestamp']) new_meter = hbase_utils.prepare_key( rts, data['source'], data['counter_name'], data['counter_type'], data['counter_unit']) # TODO(nprivalova): try not to store resource_id resource = hbase_utils.serialize_entry(**{ 'source': data['source'], 'meter': {new_meter: data['timestamp']}, 'resource_metadata': resource_metadata, 'resource_id': data['resource_id'], 'project_id': data['project_id'], 'user_id': data['user_id']}) # Here we put entry in HBase with our own timestamp. This is needed # when samples arrive out-of-order # If we use timestamp=data['timestamp'] the newest data will be # automatically 'on the top'. It is needed to keep metadata # up-to-date: metadata from newest samples is considered as actual. ts = int(time.mktime(data['timestamp'].timetuple()) * 1000) resource_table.put(hbase_utils.encode_unicode(data['resource_id']), resource, ts) # Rowkey consists of reversed timestamp, meter and a # message uuid for purposes of uniqueness row = hbase_utils.prepare_key(data['counter_name'], rts, data['message_id']) record = hbase_utils.serialize_entry( data, **{'source': data['source'], 'rts': rts, 'message': data, 'recorded_at': timeutils.utcnow()}) meter_table.put(row, record) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like ge, gt. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ if limit == 0: return q = hbase_utils.make_query(metaquery=metaquery, user_id=user, project_id=project, resource_id=resource, source=source) q = hbase_utils.make_meter_query_for_resource(start_timestamp, start_timestamp_op, end_timestamp, end_timestamp_op, source, q) with self.conn_pool.connection() as conn: resource_table = conn.table(self.RESOURCE_TABLE) LOG.debug("Query Resource table: %s", q) for resource_id, data in resource_table.scan(filter=q, limit=limit): f_res, meters, md = hbase_utils.deserialize_entry( data) resource_id = hbase_utils.encode_unicode(resource_id) # Unfortunately happybase doesn't keep ordered result from # HBase. So that's why it's needed to find min and max # manually first_ts = min(meters, key=operator.itemgetter(1))[1] last_ts = max(meters, key=operator.itemgetter(1))[1] source = meters[0][0][1] # If we use QualifierFilter then HBase returnes only # qualifiers filtered by. It will not return the whole entry. # That's why if we need to ask additional qualifiers manually. if 'project_id' not in f_res and 'user_id' not in f_res: row = resource_table.row( resource_id, columns=['f:project_id', 'f:user_id', 'f:resource_metadata']) f_res, _m, md = hbase_utils.deserialize_entry(row) yield models.Resource( resource_id=resource_id, first_sample_timestamp=first_ts, last_sample_timestamp=last_ts, project_id=f_res['project_id'], source=source, user_id=f_res['user_id'], metadata=md) def get_meters(self, user=None, project=None, resource=None, source=None, metaquery=None, limit=None, unique=False): """Return an iterable of models.Meter instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional resource filter. :param source: Optional source filter. :param metaquery: Optional dict with metadata to match on. :param limit: Maximum number of results to return. :param unique: If set to true, return only unique meter information. """ if limit == 0: return metaquery = metaquery or {} with self.conn_pool.connection() as conn: resource_table = conn.table(self.RESOURCE_TABLE) q = hbase_utils.make_query(metaquery=metaquery, user_id=user, project_id=project, resource_id=resource, source=source) LOG.debug("Query Resource table: %s", q) gen = resource_table.scan(filter=q) # We need result set to be sure that user doesn't receive several # same meters. Please see bug # https://bugs.launchpad.net/ceilometer/+bug/1301371 result = set() for ignored, data in gen: flatten_result, meters, md = hbase_utils.deserialize_entry( data) for m in meters: if limit and len(result) >= limit: return _m_rts, m_source, name, m_type, unit = m[0] if unique: meter_dict = {'name': name, 'type': m_type, 'unit': unit, 'resource_id': None, 'project_id': None, 'user_id': None, 'source': None} else: meter_dict = {'name': name, 'type': m_type, 'unit': unit, 'resource_id': flatten_result['resource_id'], 'project_id': flatten_result['project_id'], 'user_id': flatten_result['user_id']} frozen_meter = frozenset(meter_dict.items()) if frozen_meter in result: continue result.add(frozen_meter) if not unique: meter_dict.update({'source': m_source if m_source else None}) yield models.Meter(**meter_dict) def get_samples(self, sample_filter, limit=None): """Return an iterable of models.Sample instances. :param sample_filter: Filter. :param limit: Maximum number of results to return. """ if limit == 0: return with self.conn_pool.connection() as conn: meter_table = conn.table(self.METER_TABLE) q, start, stop, columns = (hbase_utils. make_sample_query_from_filter (sample_filter, require_meter=False)) LOG.debug("Query Meter Table: %s", q) gen = meter_table.scan(filter=q, row_start=start, row_stop=stop, limit=limit, columns=columns) for ignored, meter in gen: d_meter = hbase_utils.deserialize_entry(meter)[0] d_meter['message']['counter_volume'] = ( float(d_meter['message']['counter_volume'])) d_meter['message']['recorded_at'] = d_meter['recorded_at'] yield models.Sample(**d_meter['message']) @staticmethod def _update_meter_stats(stat, meter): """Do the stats calculation on a requested time bucket in stats dict :param stats: dict where aggregated stats are kept :param index: time bucket index in stats :param meter: meter record as returned from HBase :param start_time: query start time :param period: length of the time bucket """ vol = meter['counter_volume'] ts = meter['timestamp'] stat.unit = meter['counter_unit'] stat.min = min(vol, stat.min or vol) stat.max = max(vol, stat.max) stat.sum = vol + (stat.sum or 0) stat.count += 1 stat.avg = (stat.sum / float(stat.count)) stat.duration_start = min(ts, stat.duration_start or ts) stat.duration_end = max(ts, stat.duration_end or ts) stat.duration = (timeutils.delta_seconds(stat.duration_start, stat.duration_end)) def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of models.Statistics instances. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. .. note:: Due to HBase limitations the aggregations are implemented in the driver itself, therefore this method will be quite slow because of all the Thrift traffic it is going to create. """ if groupby: raise ceilometer.NotImplementedError("Group by not implemented.") if aggregate: raise ceilometer.NotImplementedError( 'Selectable aggregates not implemented') with self.conn_pool.connection() as conn: meter_table = conn.table(self.METER_TABLE) q, start, stop, columns = (hbase_utils. make_sample_query_from_filter (sample_filter)) # These fields are used in statistics' calculating columns.extend(['f:timestamp', 'f:counter_volume', 'f:counter_unit']) meters = map(hbase_utils.deserialize_entry, list(meter for (ignored, meter) in meter_table.scan( filter=q, row_start=start, row_stop=stop, columns=columns))) if sample_filter.start_timestamp: start_time = sample_filter.start_timestamp elif meters: start_time = meters[-1][0]['timestamp'] else: start_time = None if sample_filter.end_timestamp: end_time = sample_filter.end_timestamp elif meters: end_time = meters[0][0]['timestamp'] else: end_time = None results = [] if not period: period = 0 period_start = start_time period_end = end_time # As our HBase meters are stored as newest-first, we need to iterate # in the reverse order for meter in meters[::-1]: ts = meter[0]['timestamp'] if period: offset = int(timeutils.delta_seconds( start_time, ts) / period) * period period_start = start_time + datetime.timedelta(0, offset) if not results or not results[-1].period_start == period_start: if period: period_end = period_start + datetime.timedelta( 0, period) results.append( models.Statistics(unit='', count=0, min=0, max=0, avg=0, sum=0, period=period, period_start=period_start, period_end=period_end, duration=None, duration_start=None, duration_end=None, groupby=None) ) self._update_meter_stats(results[-1], meter[0]) return results ceilometer-6.0.0/ceilometer/storage/impl_mongodb.py0000664000567000056710000007027712701406224023635 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # Copyright 2014 Red Hat, Inc # # Authors: Doug Hellmann # Julien Danjou # Eoghan Glynn # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """MongoDB storage backend""" import copy import datetime import uuid import bson.code import bson.objectid from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import pymongo import six import ceilometer from ceilometer import storage from ceilometer.storage import base from ceilometer.storage import models from ceilometer.storage.mongo import utils as pymongo_utils from ceilometer.storage import pymongo_base from ceilometer import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'resources': {'query': {'simple': True, 'metadata': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True}, 'aggregation': {'standard': True, 'selectable': {'max': True, 'min': True, 'sum': True, 'avg': True, 'count': True, 'stddev': True, 'cardinality': True}}} } class Connection(pymongo_base.Connection): """Put the data into a MongoDB database Collections:: - meter - the raw incoming data - resource - the metadata for resources - { _id: uuid of resource, metadata: metadata dictionaries user_id: uuid project_id: uuid meter: [ array of {counter_name: string, counter_type: string, counter_unit: string} ] } """ CAPABILITIES = utils.update_nested(pymongo_base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) CONNECTION_POOL = pymongo_utils.ConnectionPool() STANDARD_AGGREGATES = dict([(a.name, a) for a in [ pymongo_utils.SUM_AGGREGATION, pymongo_utils.AVG_AGGREGATION, pymongo_utils.MIN_AGGREGATION, pymongo_utils.MAX_AGGREGATION, pymongo_utils.COUNT_AGGREGATION, ]]) AGGREGATES = dict([(a.name, a) for a in [ pymongo_utils.SUM_AGGREGATION, pymongo_utils.AVG_AGGREGATION, pymongo_utils.MIN_AGGREGATION, pymongo_utils.MAX_AGGREGATION, pymongo_utils.COUNT_AGGREGATION, pymongo_utils.STDDEV_AGGREGATION, pymongo_utils.CARDINALITY_AGGREGATION, ]]) SORT_OPERATION_MAPPING = {'desc': (pymongo.DESCENDING, '$lt'), 'asc': (pymongo.ASCENDING, '$gt')} MAP_RESOURCES = bson.code.Code(""" function () { emit(this.resource_id, {user_id: this.user_id, project_id: this.project_id, source: this.source, first_timestamp: this.timestamp, last_timestamp: this.timestamp, metadata: this.resource_metadata}) }""") REDUCE_RESOURCES = bson.code.Code(""" function (key, values) { var merge = {user_id: values[0].user_id, project_id: values[0].project_id, source: values[0].source, first_timestamp: values[0].first_timestamp, last_timestamp: values[0].last_timestamp, metadata: values[0].metadata} values.forEach(function(value) { if (merge.first_timestamp - value.first_timestamp > 0) { merge.first_timestamp = value.first_timestamp; merge.user_id = value.user_id; merge.project_id = value.project_id; merge.source = value.source; } else if (merge.last_timestamp - value.last_timestamp <= 0) { merge.last_timestamp = value.last_timestamp; merge.metadata = value.metadata; } }); return merge; }""") _GENESIS = datetime.datetime(year=datetime.MINYEAR, month=1, day=1) _APOCALYPSE = datetime.datetime(year=datetime.MAXYEAR, month=12, day=31, hour=23, minute=59, second=59) def __init__(self, url): # NOTE(jd) Use our own connection pooling on top of the Pymongo one. # We need that otherwise we overflow the MongoDB instance with new # connection since we instantiate a Pymongo client each time someone # requires a new storage connection. self.conn = self.CONNECTION_POOL.connect(url) self.version = self.conn.server_info()['versionArray'] # Require MongoDB 2.4 to use $setOnInsert if self.version < pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION: raise storage.StorageBadVersion( "Need at least MongoDB %s" % pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION) connection_options = pymongo.uri_parser.parse_uri(url) self.db = getattr(self.conn, connection_options['database']) if connection_options.get('username'): self.db.authenticate(connection_options['username'], connection_options['password']) # NOTE(jd) Upgrading is just about creating index, so let's do this # on connection to be sure at least the TTL is correctly updated if # needed. self.upgrade() @staticmethod def update_ttl(ttl, ttl_index_name, index_field, coll): """Update or create time_to_live indexes. :param ttl: time to live in seconds. :param ttl_index_name: name of the index we want to update or create. :param index_field: field with the index that we need to update. :param coll: collection which indexes need to be updated. """ indexes = coll.index_information() if ttl <= 0: if ttl_index_name in indexes: coll.drop_index(ttl_index_name) return if ttl_index_name in indexes: return coll.database.command( 'collMod', coll.name, index={'keyPattern': {index_field: pymongo.ASCENDING}, 'expireAfterSeconds': ttl}) coll.create_index([(index_field, pymongo.ASCENDING)], expireAfterSeconds=ttl, name=ttl_index_name) def upgrade(self): # Establish indexes # # We need variations for user_id vs. project_id because of the # way the indexes are stored in b-trees. The user_id and # project_id values are usually mutually exclusive in the # queries, so the database won't take advantage of an index # including both. # create collection if not present if 'resource' not in self.db.conn.collection_names(): self.db.conn.create_collection('resource') if 'meter' not in self.db.conn.collection_names(): self.db.conn.create_collection('meter') name_qualifier = dict(user_id='', project_id='project_') background = dict(user_id=False, project_id=True) for primary in ['user_id', 'project_id']: name = 'meter_%sidx' % name_qualifier[primary] self.db.meter.create_index([ ('resource_id', pymongo.ASCENDING), (primary, pymongo.ASCENDING), ('counter_name', pymongo.ASCENDING), ('timestamp', pymongo.ASCENDING), ], name=name, background=background[primary]) self.db.meter.create_index([('timestamp', pymongo.DESCENDING)], name='timestamp_idx') # NOTE(ityaptin) This index covers get_resource requests sorting # and MongoDB uses part of this compound index for different # queries based on any of user_id, project_id, last_sample_timestamp # fields self.db.resource.create_index([('user_id', pymongo.DESCENDING), ('project_id', pymongo.DESCENDING), ('last_sample_timestamp', pymongo.DESCENDING)], name='resource_user_project_timestamp',) self.db.resource.create_index([('last_sample_timestamp', pymongo.DESCENDING)], name='last_sample_timestamp_idx') # update or create time_to_live index ttl = cfg.CONF.database.metering_time_to_live self.update_ttl(ttl, 'meter_ttl', 'timestamp', self.db.meter) self.update_ttl(ttl, 'resource_ttl', 'last_sample_timestamp', self.db.resource) def clear(self): self.conn.drop_database(self.db.name) # Connection will be reopened automatically if needed self.conn.close() def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter """ # Record the updated resource metadata - we use $setOnInsert to # unconditionally insert sample timestamps and resource metadata # (in the update case, this must be conditional on the sample not # being out-of-order) data = copy.deepcopy(data) data['resource_metadata'] = pymongo_utils.improve_keys( data.pop('resource_metadata')) resource = self.db.resource.find_one_and_update( {'_id': data['resource_id']}, {'$set': {'project_id': data['project_id'], 'user_id': data['user_id'], 'source': data['source'], }, '$setOnInsert': {'metadata': data['resource_metadata'], 'first_sample_timestamp': data['timestamp'], 'last_sample_timestamp': data['timestamp'], }, '$addToSet': {'meter': {'counter_name': data['counter_name'], 'counter_type': data['counter_type'], 'counter_unit': data['counter_unit'], }, }, }, upsert=True, return_document=pymongo.ReturnDocument.AFTER, ) # only update last sample timestamp if actually later (the usual # in-order case) last_sample_timestamp = resource.get('last_sample_timestamp') if (last_sample_timestamp is None or last_sample_timestamp <= data['timestamp']): self.db.resource.update_one( {'_id': data['resource_id']}, {'$set': {'metadata': data['resource_metadata'], 'last_sample_timestamp': data['timestamp']}} ) # only update first sample timestamp if actually earlier (the unusual # out-of-order case) # NOTE: a null first sample timestamp is not updated as this indicates # a pre-existing resource document dating from before we started # recording these timestamps in the resource collection first_sample_timestamp = resource.get('first_sample_timestamp') if (first_sample_timestamp is not None and first_sample_timestamp > data['timestamp']): self.db.resource.update_one( {'_id': data['resource_id']}, {'$set': {'first_sample_timestamp': data['timestamp']}} ) # Record the raw data for the meter. Use a copy so we do not # modify a data structure owned by our caller (the driver adds # a new key '_id'). record = copy.copy(data) record['recorded_at'] = timeutils.utcnow() self.db.meter.insert_one(record) def clear_expired_metering_data(self, ttl): """Clear expired data from the backend storage system. Clearing occurs with native MongoDB time-to-live feature. """ LOG.debug("Clearing expired metering data is based on native " "MongoDB time to live feature and going in background.") @classmethod def _build_sort_instructions(cls, sort_keys=None, sort_dir='desc'): """Returns a sort_instruction and paging operator. Sort instructions are used in the query to determine what attributes to sort on and what direction to use. :param q: The query dict passed in. :param sort_keys: array of attributes by which results be sorted. :param sort_dir: direction in which results be sorted (asc, desc). :return: sort instructions and paging operator """ sort_keys = sort_keys or [] sort_instructions = [] _sort_dir, operation = cls.SORT_OPERATION_MAPPING.get( sort_dir, cls.SORT_OPERATION_MAPPING['desc']) for _sort_key in sort_keys: _instruction = (_sort_key, _sort_dir) sort_instructions.append(_instruction) return sort_instructions, operation def _get_time_constrained_resources(self, query, start_timestamp, start_timestamp_op, end_timestamp, end_timestamp_op, metaquery, resource, limit): """Return an iterable of models.Resource instances Items are constrained by sample timestamp. :param query: project/user/source query :param start_timestamp: modified timestamp start range. :param start_timestamp_op: start time operator, like gt, ge. :param end_timestamp: modified timestamp end range. :param end_timestamp_op: end time operator, like lt, le. :param metaquery: dict with metadata to match on. :param resource: resource filter. """ if resource is not None: query['resource_id'] = resource # Add resource_ prefix so it matches the field in the db query.update(dict(('resource_' + k, v) for (k, v) in six.iteritems(metaquery))) # FIXME(dhellmann): This may not perform very well, # but doing any better will require changing the database # schema and that will need more thought than I have time # to put into it today. # Look for resources matching the above criteria and with # samples in the time range we care about, then change the # resource query to return just those resources by id. ts_range = pymongo_utils.make_timestamp_range(start_timestamp, end_timestamp, start_timestamp_op, end_timestamp_op) if ts_range: query['timestamp'] = ts_range sort_keys = base._handle_sort_key('resource') sort_instructions = self._build_sort_instructions(sort_keys)[0] # use a unique collection name for the results collection, # as result post-sorting (as oppposed to reduce pre-sorting) # is not possible on an inline M-R out = 'resource_list_%s' % uuid.uuid4() self.db.meter.map_reduce(self.MAP_RESOURCES, self.REDUCE_RESOURCES, out=out, sort={'resource_id': 1}, query=query) try: if limit is not None: results = self.db[out].find(sort=sort_instructions, limit=limit) else: results = self.db[out].find(sort=sort_instructions) for r in results: resource = r['value'] yield models.Resource( resource_id=r['_id'], user_id=resource['user_id'], project_id=resource['project_id'], first_sample_timestamp=resource['first_timestamp'], last_sample_timestamp=resource['last_timestamp'], source=resource['source'], metadata=pymongo_utils.unquote_keys(resource['metadata'])) finally: self.db[out].drop() def _get_floating_resources(self, query, metaquery, resource, limit): """Return an iterable of models.Resource instances Items are unconstrained by timestamp. :param query: project/user/source query :param metaquery: dict with metadata to match on. :param resource: resource filter. """ if resource is not None: query['_id'] = resource query.update(dict((k, v) for (k, v) in six.iteritems(metaquery))) keys = base._handle_sort_key('resource') sort_keys = ['last_sample_timestamp' if i == 'timestamp' else i for i in keys] sort_instructions = self._build_sort_instructions(sort_keys)[0] if limit is not None: results = self.db.resource.find(query, sort=sort_instructions, limit=limit) else: results = self.db.resource.find(query, sort=sort_instructions) for r in results: yield models.Resource( resource_id=r['_id'], user_id=r['user_id'], project_id=r['project_id'], first_sample_timestamp=r.get('first_sample_timestamp', self._GENESIS), last_sample_timestamp=r.get('last_sample_timestamp', self._APOCALYPSE), source=r['source'], metadata=pymongo_utils.unquote_keys(r['metadata'])) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ if limit == 0: return metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {} query = {} if user is not None: query['user_id'] = user if project is not None: query['project_id'] = project if source is not None: query['source'] = source if start_timestamp or end_timestamp: return self._get_time_constrained_resources(query, start_timestamp, start_timestamp_op, end_timestamp, end_timestamp_op, metaquery, resource, limit) else: return self._get_floating_resources(query, metaquery, resource, limit) @staticmethod def _make_period_dict(period, first_ts): """Create a period field for _id of grouped fields. :param period: Period duration in seconds :param first_ts: First timestamp for first period :return: """ if period >= 0: period_unique_dict = { "period_start": { "$divide": [ {"$subtract": [ {"$subtract": ["$timestamp", first_ts]}, {"$mod": [{"$subtract": ["$timestamp", first_ts]}, period * 1000] } ]}, period * 1000 ] } } else: # Note(ityaptin) Hack for older MongoDB versions (2.4.+ and older). # Since 2.6+ we could use $literal operator period_unique_dict = {"$period_start": {"$add": [0, 0]}} return period_unique_dict def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of models.Statistics instance. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. """ if (groupby and set(groupby) - set(['user_id', 'project_id', 'resource_id', 'source', 'resource_metadata.instance_type'])): raise ceilometer.NotImplementedError( "Unable to group by these fields") q = pymongo_utils.make_query_from_filter(sample_filter) group_stage = {} project_stage = { "unit": "$_id.unit", "name": "$_id.name", "first_timestamp": "$first_timestamp", "last_timestamp": "$last_timestamp", "period_start": "$_id.period_start", } # Add timestamps to $group stage group_stage.update({"first_timestamp": {"$min": "$timestamp"}, "last_timestamp": {"$max": "$timestamp"}}) # Define a _id field for grouped documents unique_group_field = {"name": "$counter_name", "unit": "$counter_unit"} # Define a first timestamp for periods if sample_filter.start_timestamp: first_timestamp = sample_filter.start_timestamp else: first_timestamp_cursor = self.db.meter.find( limit=1, sort=[('timestamp', pymongo.ASCENDING)]) if first_timestamp_cursor.count(): first_timestamp = first_timestamp_cursor[0]['timestamp'] else: first_timestamp = utils.EPOCH_TIME # Add a start_period field to unique identifier of grouped documents if period: period_dict = self._make_period_dict(period, first_timestamp) unique_group_field.update(period_dict) # Add a groupby fields to unique identifier of grouped documents if groupby: unique_group_field.update(dict((field.replace(".", "/"), "$%s" % field) for field in groupby)) group_stage.update({"_id": unique_group_field}) self._compile_aggregate_stages(aggregate, group_stage, project_stage) # Aggregation stages list. It's work one by one and uses documents # from previous stages. aggregation_query = [{'$match': q}, {"$sort": {"timestamp": 1}}, {"$group": group_stage}, {"$sort": {"_id.period_start": 1}}, {"$project": project_stage}] # results is dict in pymongo<=2.6.3 and CommandCursor in >=3.0 results = self.db.meter.aggregate(aggregation_query, **self._make_aggregation_params()) return [self._stats_result_to_model(point, groupby, aggregate, period, first_timestamp) for point in self._get_results(results)] def _stats_result_aggregates(self, result, aggregate): stats_args = {} for attr, func in Connection.STANDARD_AGGREGATES.items(): if attr in result: stats_args.update(func.finalize(result, version_array=self.version)) if aggregate: stats_args['aggregate'] = {} for agr in aggregate: stats_args['aggregate'].update( Connection.AGGREGATES[agr.func].finalize( result, agr.param, self.version)) return stats_args def _stats_result_to_model(self, result, groupby, aggregate, period, first_timestamp): if period is None: period = 0 first_timestamp = pymongo_utils.from_unix_timestamp(first_timestamp) stats_args = self._stats_result_aggregates(result, aggregate) stats_args['unit'] = result['unit'] stats_args['duration'] = (result["last_timestamp"] - result["first_timestamp"]).total_seconds() stats_args['duration_start'] = result['first_timestamp'] stats_args['duration_end'] = result['last_timestamp'] stats_args['period'] = period start = result.get("period_start", 0) * period stats_args['period_start'] = (first_timestamp + datetime.timedelta(seconds=start)) stats_args['period_end'] = (first_timestamp + datetime.timedelta(seconds=start + period) if period else result['last_timestamp']) stats_args['groupby'] = ( dict((g, result['_id'].get(g.replace(".", "/"))) for g in groupby) if groupby else None) return models.Statistics(**stats_args) def _compile_aggregate_stages(self, aggregate, group_stage, project_stage): if not aggregate: for aggregation in Connection.STANDARD_AGGREGATES.values(): group_stage.update( aggregation.group(version_array=self.version) ) project_stage.update( aggregation.project( version_array=self.version ) ) else: for description in aggregate: aggregation = Connection.AGGREGATES.get(description.func) if aggregation: if not aggregation.validate(description.param): raise storage.StorageBadAggregate( 'Bad aggregate: %s.%s' % (description.func, description.param)) group_stage.update( aggregation.group(description.param, version_array=self.version) ) project_stage.update( aggregation.project(description.param, version_array=self.version) ) @staticmethod def _get_results(results): if isinstance(results, dict): return results.get('result', []) else: return results def _make_aggregation_params(self): if self.version >= pymongo_utils.COMPLETE_AGGREGATE_COMPATIBLE_VERSION: return {"allowDiskUse": True} return {} ceilometer-6.0.0/ceilometer/storage/models.py0000664000567000056710000001455512701406223022446 0ustar jenkinsjenkins00000000000000# # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes for use in the storage API. """ from ceilometer.storage import base class Resource(base.Model): """Something for which sample data has been collected.""" def __init__(self, resource_id, project_id, first_sample_timestamp, last_sample_timestamp, source, user_id, metadata): """Create a new resource. :param resource_id: UUID of the resource :param project_id: UUID of project owning the resource :param first_sample_timestamp: first sample timestamp captured :param last_sample_timestamp: last sample timestamp captured :param source: the identifier for the user/project id definition :param user_id: UUID of user owning the resource :param metadata: most current metadata for the resource (a dict) """ base.Model.__init__(self, resource_id=resource_id, first_sample_timestamp=first_sample_timestamp, last_sample_timestamp=last_sample_timestamp, project_id=project_id, source=source, user_id=user_id, metadata=metadata, ) class Meter(base.Model): """Definition of a meter for which sample data has been collected.""" def __init__(self, name, type, unit, resource_id, project_id, source, user_id): """Create a new meter. :param name: name of the meter :param type: type of the meter (gauge, delta, cumulative) :param unit: unit of the meter :param resource_id: UUID of the resource :param project_id: UUID of project owning the resource :param source: the identifier for the user/project id definition :param user_id: UUID of user owning the resource """ base.Model.__init__(self, name=name, type=type, unit=unit, resource_id=resource_id, project_id=project_id, source=source, user_id=user_id, ) class Sample(base.Model): """One collected data point.""" def __init__(self, source, counter_name, counter_type, counter_unit, counter_volume, user_id, project_id, resource_id, timestamp, resource_metadata, message_id, message_signature, recorded_at, ): """Create a new sample. :param source: the identifier for the user/project id definition :param counter_name: the name of the measurement being taken :param counter_type: the type of the measurement :param counter_unit: the units for the measurement :param counter_volume: the measured value :param user_id: the user that triggered the measurement :param project_id: the project that owns the resource :param resource_id: the thing on which the measurement was taken :param timestamp: the time of the measurement :param resource_metadata: extra details about the resource :param message_id: a message identifier :param recorded_at: sample record timestamp :param message_signature: a hash created from the rest of the message data """ base.Model.__init__(self, source=source, counter_name=counter_name, counter_type=counter_type, counter_unit=counter_unit, counter_volume=counter_volume, user_id=user_id, project_id=project_id, resource_id=resource_id, timestamp=timestamp, resource_metadata=resource_metadata, message_id=message_id, message_signature=message_signature, recorded_at=recorded_at) class Statistics(base.Model): """Computed statistics based on a set of sample data.""" def __init__(self, unit, period, period_start, period_end, duration, duration_start, duration_end, groupby, **data): """Create a new statistics object. :param unit: The unit type of the data set :param period: The length of the time range covered by these stats :param period_start: The timestamp for the start of the period :param period_end: The timestamp for the end of the period :param duration: The total time for the matching samples :param duration_start: The earliest time for the matching samples :param duration_end: The latest time for the matching samples :param groupby: The fields used to group the samples. :param data: some or all of the following aggregates min: The smallest volume found max: The largest volume found avg: The average of all volumes found sum: The total of all volumes found count: The number of samples found aggregate: name-value pairs for selectable aggregates """ base.Model.__init__(self, unit=unit, period=period, period_start=period_start, period_end=period_end, duration=duration, duration_start=duration_start, duration_end=duration_end, groupby=groupby, **data) ceilometer-6.0.0/ceilometer/storage/pymongo_base.py0000664000567000056710000001531612701406223023641 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Authors: Ildiko Vancsa # Balazs Gibizer # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common functions for MongoDB and DB2 backends """ import pymongo from ceilometer.storage import base from ceilometer.storage import models from ceilometer.storage.mongo import utils as pymongo_utils from ceilometer import utils COMMON_AVAILABLE_CAPABILITIES = { 'meters': {'query': {'simple': True, 'metadata': True}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(base.Connection): """Base Connection class for MongoDB and DB2 drivers.""" CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, COMMON_AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def get_meters(self, user=None, project=None, resource=None, source=None, metaquery=None, limit=None, unique=False): """Return an iterable of models.Meter instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional resource filter. :param source: Optional source filter. :param metaquery: Optional dict with metadata to match on. :param limit: Maximum number of results to return. :param unique: If set to true, return only unique meter information. """ if limit == 0: return metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {} q = {} if user == 'None': q['user_id'] = None elif user is not None: q['user_id'] = user if project == 'None': q['project_id'] = None elif project is not None: q['project_id'] = project if resource == 'None': q['_id'] = None elif resource is not None: q['_id'] = resource if source is not None: q['source'] = source q.update(metaquery) count = 0 if unique: meter_names = set() for r in self.db.resource.find(q): for r_meter in r['meter']: if unique: if r_meter['counter_name'] in meter_names: continue else: meter_names.add(r_meter['counter_name']) if limit and count >= limit: return else: count += 1 if unique: yield models.Meter( name=r_meter['counter_name'], type=r_meter['counter_type'], # Return empty string if 'counter_unit' is not valid # for backward compatibility. unit=r_meter.get('counter_unit', ''), resource_id=None, project_id=None, source=None, user_id=None) else: yield models.Meter( name=r_meter['counter_name'], type=r_meter['counter_type'], # Return empty string if 'counter_unit' is not valid # for backward compatibility. unit=r_meter.get('counter_unit', ''), resource_id=r['_id'], project_id=r['project_id'], source=r['source'], user_id=r['user_id']) def get_samples(self, sample_filter, limit=None): """Return an iterable of model.Sample instances. :param sample_filter: Filter. :param limit: Maximum number of results to return. """ if limit == 0: return [] q = pymongo_utils.make_query_from_filter(sample_filter, require_meter=False) return self._retrieve_samples(q, [("timestamp", pymongo.DESCENDING)], limit) def query_samples(self, filter_expr=None, orderby=None, limit=None): if limit == 0: return [] query_filter = {} orderby_filter = [("timestamp", pymongo.DESCENDING)] transformer = pymongo_utils.QueryTransformer() if orderby is not None: orderby_filter = transformer.transform_orderby(orderby) if filter_expr is not None: query_filter = transformer.transform_filter(filter_expr) return self._retrieve_samples(query_filter, orderby_filter, limit) def _retrieve_samples(self, query, orderby, limit): if limit is not None: samples = self.db.meter.find(query, limit=limit, sort=orderby) else: samples = self.db.meter.find(query, sort=orderby) for s in samples: # Remove the ObjectId generated by the database when # the sample was inserted. It is an implementation # detail that should not leak outside of the driver. del s['_id'] # Backward compatibility for samples without units s['counter_unit'] = s.get('counter_unit', '') # Compatibility with MongoDB 3.+ s['counter_volume'] = float(s.get('counter_volume')) # Tolerate absence of recorded_at in older datapoints s['recorded_at'] = s.get('recorded_at') # Check samples for metadata and "unquote" key if initially it # was started with '$'. if s.get('resource_metadata'): s['resource_metadata'] = pymongo_utils.unquote_keys( s.get('resource_metadata')) yield models.Sample(**s) ceilometer-6.0.0/ceilometer/storage/hbase/0000775000567000056710000000000012701406364021667 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/storage/hbase/inmemory.py0000664000567000056710000002252212701406223024075 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is a very crude version of "in-memory HBase", which implements just enough functionality of HappyBase API to support testing of our driver. """ import copy import re from oslo_log import log import six import ceilometer LOG = log.getLogger(__name__) class MTable(object): """HappyBase.Table mock.""" def __init__(self, name, families): self.name = name self.families = families self._rows_with_ts = {} def row(self, key, columns=None): if key not in self._rows_with_ts: return {} res = copy.copy(sorted(six.iteritems( self._rows_with_ts.get(key)))[-1][1]) if columns: keys = res.keys() for key in keys: if key not in columns: res.pop(key) return res def rows(self, keys): return ((k, self.row(k)) for k in keys) def put(self, key, data, ts=None): # Note: Now we use 'timestamped' but only for one Resource table. # That's why we may put ts='0' in case when ts is None. If it is # needed to use 2 types of put in one table ts=0 cannot be used. if ts is None: ts = "0" if key not in self._rows_with_ts: self._rows_with_ts[key] = {ts: data} else: if ts in self._rows_with_ts[key]: self._rows_with_ts[key][ts].update(data) else: self._rows_with_ts[key].update({ts: data}) def delete(self, key): del self._rows_with_ts[key] def _get_latest_dict(self, row): # The idea here is to return latest versions of columns. # In _rows_with_ts we store {row: {ts_1: {data}, ts_2: {data}}}. # res will contain a list of tuples [(ts_1, {data}), (ts_2, {data})] # sorted by ts, i.e. in this list ts_2 is the most latest. # To get result as HBase provides we should iterate in reverse order # and get from "latest" data only key-values that are not in newer data data = {} for i in sorted(six.iteritems(self._rows_with_ts[row])): data.update(i[1]) return data def scan(self, filter=None, columns=None, row_start=None, row_stop=None, limit=None): columns = columns or [] sorted_keys = sorted(self._rows_with_ts) # copy data between row_start and row_stop into a dict rows = {} for row in sorted_keys: if row_start and row < row_start: continue if row_stop and row > row_stop: break rows[row] = self._get_latest_dict(row) if columns: ret = {} for row, data in six.iteritems(rows): for key in data: if key in columns: ret[row] = data rows = ret if filter: # TODO(jdanjou): we should really parse this properly, # but at the moment we are only going to support AND here filters = filter.split('AND') for f in filters: # Extract filter name and its arguments g = re.search("(.*)\((.*),?\)", f) fname = g.group(1).strip() fargs = [s.strip().replace('\'', '') for s in g.group(2).split(',')] m = getattr(self, fname) if callable(m): # overwrite rows for filtering to take effect # in case of multiple filters rows = m(fargs, rows) else: raise ceilometer.NotImplementedError( "%s filter is not implemented, " "you may want to add it!") for k in sorted(rows)[:limit]: yield k, rows[k] @staticmethod def SingleColumnValueFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'SingleColumnValueFilter' is found in the 'filter' argument. """ op = args[2] column = "%s:%s" % (args[0], args[1]) value = args[3] if value.startswith('binary:'): value = value[7:] r = {} for row in rows: data = rows[row] if op == '=': if column in data and data[column] == value: r[row] = data elif op == '<': if column in data and data[column] < value: r[row] = data elif op == '<=': if column in data and data[column] <= value: r[row] = data elif op == '>': if column in data and data[column] > value: r[row] = data elif op == '>=': if column in data and data[column] >= value: r[row] = data elif op == '!=': if column in data and data[column] != value: r[row] = data return r @staticmethod def ColumnPrefixFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'ColumnPrefixFilter' is found in the 'filter' argument. :param args: a list of filter arguments, contain prefix of column :param rows: a dict of row prefixes for filtering """ value = args[0] column = 'f:' + value r = {} for row, data in rows.items(): column_dict = {} for key in data: if key.startswith(column): column_dict[key] = data[key] r[row] = column_dict return r @staticmethod def RowFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'RowFilter' is found in the 'filter' argument. :param args: a list of filter arguments, it contains operator and sought string :param rows: a dict of rows which are filtered """ op = args[0] value = args[1] if value.startswith('regexstring:'): value = value[len('regexstring:'):] r = {} for row, data in rows.items(): try: g = re.search(value, row).group() if op == '=': if g == row: r[row] = data else: raise ceilometer.NotImplementedError( "In-memory " "RowFilter doesn't support " "the %s operation yet" % op) except AttributeError: pass return r @staticmethod def QualifierFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'QualifierFilter' is found in the 'filter' argument """ op = args[0] value = args[1] is_regex = False if value.startswith('binaryprefix:'): value = value[len('binaryprefix:'):] if value.startswith('regexstring:'): value = value[len('regexstring:'):] is_regex = True column = 'f:' + value r = {} for row in rows: data = rows[row] r_data = {} for key in data: if ((op == '=' and key.startswith(column)) or (op == '>=' and key >= column) or (op == '<=' and key <= column) or (op == '>' and key > column) or (op == '<' and key < column) or (is_regex and re.search(value, key))): r_data[key] = data[key] else: raise ceilometer.NotImplementedError( "In-memory QualifierFilter " "doesn't support the %s " "operation yet" % op) if r_data: r[row] = r_data return r class MConnectionPool(object): def __init__(self): self.conn = MConnection() def connection(self): return self.conn class MConnection(object): """HappyBase.Connection mock.""" def __init__(self): self.tables = {} def __enter__(self, *args, **kwargs): return self def __exit__(self, exc_type, exc_val, exc_tb): pass @staticmethod def open(): LOG.debug("Opening in-memory HBase connection") def create_table(self, n, families=None): families = families or {} if n in self.tables: return self.tables[n] t = MTable(n, families) self.tables[n] = t return t def delete_table(self, name, use_prefix=True): del self.tables[name] def table(self, name): return self.create_table(name) ceilometer-6.0.0/ceilometer/storage/hbase/utils.py0000664000567000056710000004342112701406224023400 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Various HBase helpers """ import copy import datetime import json import bson.json_util from happybase.hbase import ttypes from oslo_log import log import six from ceilometer.i18n import _ from ceilometer import utils LOG = log.getLogger(__name__) EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3, 'datetime': 4} OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='} # We need this additional dictionary because we have reverted timestamp in # row-keys for stored metrics OP_SIGN_REV = {'eq': '=', 'lt': '>', 'le': '>=', 'ne': '!=', 'gt': '<', 'ge': '<='} def _QualifierFilter(op, qualifier): return "QualifierFilter (%s, 'binaryprefix:m_%s')" % (op, qualifier) def timestamp(dt, reverse=True): """Timestamp is count of milliseconds since start of epoch. If reverse=True then timestamp will be reversed. Such a technique is used in HBase rowkey design when period queries are required. Because of the fact that rows are sorted lexicographically it's possible to vary whether the 'oldest' entries will be on top of the table or it should be the newest ones (reversed timestamp case). :param dt: datetime which is translated to timestamp :param reverse: a boolean parameter for reverse or straight count of timestamp in milliseconds :return: count or reversed count of milliseconds since start of epoch """ epoch = datetime.datetime(1970, 1, 1) td = dt - epoch ts = td.microseconds + td.seconds * 1000000 + td.days * 86400000000 return 0x7fffffffffffffff - ts if reverse else ts def make_events_query_from_filter(event_filter): """Return start and stop row for filtering and a query. Query is based on the selected parameter. :param event_filter: storage.EventFilter object. """ start = "%s" % (timestamp(event_filter.start_timestamp, reverse=False) if event_filter.start_timestamp else "") stop = "%s" % (timestamp(event_filter.end_timestamp, reverse=False) if event_filter.end_timestamp else "") kwargs = {'event_type': event_filter.event_type, 'event_id': event_filter.message_id} res_q = make_query(**kwargs) if event_filter.traits_filter: for trait_filter in event_filter.traits_filter: q_trait = make_query(trait_query=True, **trait_filter) if q_trait: if res_q: res_q += " AND " + q_trait else: res_q = q_trait return res_q, start, stop def make_timestamp_query(func, start=None, start_op=None, end=None, end_op=None, bounds_only=False, **kwargs): """Return a filter start and stop row for filtering and a query. Query is based on the fact that CF-name is 'rts'. :param start: Optional start timestamp :param start_op: Optional start timestamp operator, like gt, ge :param end: Optional end timestamp :param end_op: Optional end timestamp operator, like lt, le :param bounds_only: if True than query will not be returned :param func: a function that provide a format of row :param kwargs: kwargs for :param func """ # We don't need to dump here because get_start_end_rts returns strings rts_start, rts_end = get_start_end_rts(start, end) start_row, end_row = func(rts_start, rts_end, **kwargs) if bounds_only: return start_row, end_row q = [] start_op = start_op or 'ge' end_op = end_op or 'lt' if rts_start: q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" % (OP_SIGN_REV[start_op], rts_start)) if rts_end: q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" % (OP_SIGN_REV[end_op], rts_end)) res_q = None if len(q): res_q = " AND ".join(q) return start_row, end_row, res_q def get_start_end_rts(start, end): rts_start = str(timestamp(start)) if start else "" rts_end = str(timestamp(end)) if end else "" return rts_start, rts_end def make_query(metaquery=None, trait_query=None, **kwargs): """Return a filter query string based on the selected parameters. :param metaquery: optional metaquery dict :param trait_query: optional boolean, for trait_query from kwargs :param kwargs: key-value pairs to filter on. Key should be a real column name in db """ q = [] res_q = None # Query for traits differs from others. It is constructed with # SingleColumnValueFilter with the possibility to choose comparison # operator if trait_query: trait_name = kwargs.pop('key') op = kwargs.pop('op', 'eq') for k, v in kwargs.items(): if v is not None: res_q = ("SingleColumnValueFilter " "('f', '%s', %s, 'binary:%s', true, true)" % (prepare_key(trait_name, EVENT_TRAIT_TYPES[k]), OP_SIGN[op], dump(v))) return res_q # Note: we use extended constructor for SingleColumnValueFilter here. # It is explicitly specified that entry should not be returned if CF is not # found in table. for key, value in sorted(kwargs.items()): if value is not None: if key == 'source': q.append("SingleColumnValueFilter " "('f', 's_%s', =, 'binary:%s', true, true)" % (value, dump('1'))) elif key == 'trait_type': q.append("ColumnPrefixFilter('%s')" % value) elif key == 'event_id': q.append("RowFilter ( = , 'regexstring:\d*:%s')" % value) else: q.append("SingleColumnValueFilter " "('f', '%s', =, 'binary:%s', true, true)" % (quote(key), dump(value))) res_q = None if len(q): res_q = " AND ".join(q) if metaquery: meta_q = [] for k, v in metaquery.items(): meta_q.append( "SingleColumnValueFilter ('f', '%s', =, 'binary:%s', " "true, true)" % ('r_' + k, dump(v))) meta_q = " AND ".join(meta_q) # join query and metaquery if res_q is not None: res_q += " AND " + meta_q else: res_q = meta_q # metaquery only return res_q def get_meter_columns(metaquery=None, need_timestamp=False, **kwargs): """Return a list of required columns in meter table to be scanned. SingleColumnFilter has 'columns' filter that should be used to determine what columns we are interested in. But if we want to use 'filter' and 'columns' together we have to include columns we are filtering by to columns list. Please see an example: If we make scan with filter "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')" and columns ['f:rts'], the output will be always empty because only 'rts' will be returned and filter will be applied to this data so 's_test-1' cannot be find. To make this request correct it should be fixed as follows: filter = "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')", columns = ['f:rts','f:s_test-1']} :param metaquery: optional metaquery dict :param need_timestamp: flag, which defines the need for timestamp columns :param kwargs: key-value pairs to filter on. Key should be a real column name in db """ columns = ['f:message', 'f:recorded_at'] columns.extend("f:%s" % k for k, v in kwargs.items() if v is not None) if metaquery: columns.extend("f:r_%s" % k for k, v in metaquery.items() if v is not None) source = kwargs.get('source') if source: columns.append("f:s_%s" % source) if need_timestamp: columns.extend(['f:rts', 'f:timestamp']) return columns def make_sample_query_from_filter(sample_filter, require_meter=True): """Return a query dictionary based on the settings in the filter. :param sample_filter: SampleFilter instance :param require_meter: If true and the filter does not have a meter, raise an error. """ meter = sample_filter.meter if not meter and require_meter: raise RuntimeError('Missing required meter specifier') start_row, end_row, ts_query = make_timestamp_query( make_general_rowkey_scan, start=sample_filter.start_timestamp, start_op=sample_filter.start_timestamp_op, end=sample_filter.end_timestamp, end_op=sample_filter.end_timestamp_op, some_id=meter) kwargs = dict(user_id=sample_filter.user, project_id=sample_filter.project, counter_name=meter, resource_id=sample_filter.resource, source=sample_filter.source, message_id=sample_filter.message_id) q = make_query(metaquery=sample_filter.metaquery, **kwargs) if q: res_q = q + " AND " + ts_query if ts_query else q else: res_q = ts_query if ts_query else None need_timestamp = (sample_filter.start_timestamp or sample_filter.end_timestamp) is not None columns = get_meter_columns(metaquery=sample_filter.metaquery, need_timestamp=need_timestamp, **kwargs) return res_q, start_row, end_row, columns def make_meter_query_for_resource(start_timestamp, start_timestamp_op, end_timestamp, end_timestamp_op, source, query=None): """This method is used when Resource table should be filtered by meters. In this method we are looking into all qualifiers with m_ prefix. :param start_timestamp: meter's timestamp start range. :param start_timestamp_op: meter's start time operator, like ge, gt. :param end_timestamp: meter's timestamp end range. :param end_timestamp_op: meter's end time operator, like lt, le. :param source: source filter. :param query: a query string to concatenate with. """ start_rts, end_rts = get_start_end_rts(start_timestamp, end_timestamp) mq = [] start_op = start_timestamp_op or 'ge' end_op = end_timestamp_op or 'lt' if start_rts: filter_value = (start_rts + ':' + quote(source) if source else start_rts) mq.append(_QualifierFilter(OP_SIGN_REV[start_op], filter_value)) if end_rts: filter_value = (end_rts + ':' + quote(source) if source else end_rts) mq.append(_QualifierFilter(OP_SIGN_REV[end_op], filter_value)) if mq: meter_q = " AND ".join(mq) # If there is a filtering on time_range we need to point that # qualifiers should start with m_. Overwise in case e.g. # QualifierFilter (>=, 'binaryprefix:m_9222030811134775808') # qualifier 's_test' satisfies the filter and will be returned. meter_q = _QualifierFilter("=", '') + " AND " + meter_q query = meter_q if not query else query + " AND " + meter_q return query def make_general_rowkey_scan(rts_start=None, rts_end=None, some_id=None): """If it's filter on some_id without start and end. start_row = some_id while end_row = some_id + MAX_BYTE. """ if some_id is None: return None, None if not rts_start: # NOTE(idegtiarov): Here we could not use chr > 122 because chr >= 123 # will be quoted and character will be turn in a composition that is # started with '%' (chr(37)) that lexicographically is less then chr # of number rts_start = chr(122) end_row = prepare_key(some_id, rts_start) start_row = prepare_key(some_id, rts_end) return start_row, end_row def prepare_key(*args): """Prepares names for rows and columns with correct separator. :param args: strings or numbers that we want our key construct of :return: key with quoted args that are separated with character ":" """ key_quote = [] for key in args: if isinstance(key, six.integer_types): key = str(key) key_quote.append(quote(key)) return ":".join(key_quote) def timestamp_from_record_tuple(record): """Extract timestamp from HBase tuple record.""" return record[0]['timestamp'] def resource_id_from_record_tuple(record): """Extract resource_id from HBase tuple record.""" return record[0]['resource_id'] def deserialize_entry(entry, get_raw_meta=True): """Return a list of flatten_result, sources, meters and metadata. Flatten_result contains a dict of simple structures such as 'resource_id':1 sources/meters are the lists of sources and meters correspondingly. metadata is metadata dict. This dict may be returned as flattened if get_raw_meta is False. :param entry: entry from HBase, without row name and timestamp :param get_raw_meta: If true then raw metadata will be returned, if False metadata will be constructed from 'f:r_metadata.' fields """ flatten_result = {} sources = [] meters = [] metadata_flattened = {} for k, v in entry.items(): if k.startswith('f:s_'): sources.append(decode_unicode(k[4:])) elif k.startswith('f:r_metadata.'): qualifier = decode_unicode(k[len('f:r_metadata.'):]) metadata_flattened[qualifier] = load(v) elif k.startswith("f:m_"): meter = ([unquote(i) for i in k[4:].split(':')], load(v)) meters.append(meter) else: if ':' in k[2:]: key = tuple([unquote(i) for i in k[2:].split(':')]) else: key = unquote(k[2:]) flatten_result[key] = load(v) if get_raw_meta: metadata = flatten_result.get('resource_metadata', {}) else: metadata = metadata_flattened return flatten_result, meters, metadata def serialize_entry(data=None, **kwargs): """Return a dict that is ready to be stored to HBase :param data: dict to be serialized :param kwargs: additional args """ data = data or {} entry_dict = copy.copy(data) entry_dict.update(**kwargs) result = {} for k, v in entry_dict.items(): if k == 'source': # user, project and resource tables may contain several sources. # Besides, resource table may contain several meters. # To make insertion safe we need to store all meters and sources in # a separate cell. For this purpose s_ and m_ prefixes are # introduced. qualifier = encode_unicode('f:s_%s' % v) result[qualifier] = dump('1') elif k == 'meter': for meter, ts in v.items(): qualifier = encode_unicode('f:m_%s' % meter) result[qualifier] = dump(ts) elif k == 'resource_metadata': # keep raw metadata as well as flattened to provide # capability with API v2. It will be flattened in another # way on API level. But we need flattened too for quick filtering. flattened_meta = dump_metadata(v) for key, m in flattened_meta.items(): metadata_qualifier = encode_unicode('f:r_metadata.' + key) result[metadata_qualifier] = dump(m) result['f:resource_metadata'] = dump(v) else: result['f:' + quote(k, ':')] = dump(v) return result def dump_metadata(meta): resource_metadata = {} for key, v in utils.dict_to_keyval(meta): resource_metadata[key] = v return resource_metadata def dump(data): return json.dumps(data, default=bson.json_util.default) def load(data): return json.loads(data, object_hook=object_hook) def encode_unicode(data): return data.encode('utf-8') if isinstance(data, six.text_type) else data def decode_unicode(data): return data.decode('utf-8') if isinstance(data, six.string_types) else data # We don't want to have tzinfo in decoded json.This object_hook is # overwritten json_util.object_hook for $date def object_hook(dct): if "$date" in dct: dt = bson.json_util.object_hook(dct) return dt.replace(tzinfo=None) return bson.json_util.object_hook(dct) def create_tables(conn, tables, column_families): for table in tables: try: conn.create_table(table, column_families) except ttypes.AlreadyExists: if conn.table_prefix: table = ("%(table_prefix)s" "%(separator)s" "%(table_name)s" % dict(table_prefix=conn.table_prefix, separator=conn.table_prefix_separator, table_name=table)) LOG.warning(_("Cannot create table %(table_name)s " "it already exists. Ignoring error") % {'table_name': table}) def quote(s, *args): """Return quoted string even if it is unicode one. :param s: string that should be quoted :param args: any symbol we want to stay unquoted """ s_en = s.encode('utf8') return six.moves.urllib.parse.quote(s_en, *args) def unquote(s): """Return unquoted and decoded string. :param s: string that should be unquoted """ s_de = six.moves.urllib.parse.unquote(s) return s_de.decode('utf8') ceilometer-6.0.0/ceilometer/storage/hbase/__init__.py0000664000567000056710000000000012701406223023760 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/storage/hbase/base.py0000664000567000056710000000630112701406223023145 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import happybase from oslo_log import log from oslo_utils import netutils from six.moves.urllib import parse as urlparse from ceilometer.storage.hbase import inmemory as hbase_inmemory LOG = log.getLogger(__name__) class Connection(object): """Base connection class for HBase.""" _memory_instance = None def __init__(self, url): """Hbase Connection Initialization.""" opts = self._parse_connection_url(url) if opts['host'] == '__test__': url = os.environ.get('CEILOMETER_TEST_HBASE_URL') if url: # Reparse URL, but from the env variable now opts = self._parse_connection_url(url) self.conn_pool = self._get_connection_pool(opts) else: # This is a in-memory usage for unit tests if Connection._memory_instance is None: LOG.debug('Creating a new in-memory HBase ' 'Connection object') Connection._memory_instance = (hbase_inmemory. MConnectionPool()) self.conn_pool = Connection._memory_instance else: self.conn_pool = self._get_connection_pool(opts) @staticmethod def _get_connection_pool(conf): """Return a connection pool to the database. .. note:: The tests use a subclass to override this and return an in-memory connection pool. """ LOG.debug('connecting to HBase on %(host)s:%(port)s', {'host': conf['host'], 'port': conf['port']}) return happybase.ConnectionPool( size=100, host=conf['host'], port=conf['port'], table_prefix=conf['table_prefix'], table_prefix_separator=conf['table_prefix_separator']) @staticmethod def _parse_connection_url(url): """Parse connection parameters from a database url. .. note:: HBase Thrift does not support authentication and there is no database name, so we are not looking for these in the url. """ opts = {} result = netutils.urlsplit(url) opts['table_prefix'] = urlparse.parse_qs( result.query).get('table_prefix', [None])[0] opts['table_prefix_separator'] = urlparse.parse_qs( result.query).get('table_prefix_separator', ['_'])[0] opts['dbtype'] = result.scheme if ':' in result.netloc: opts['host'], port = result.netloc.split(':') else: opts['host'] = result.netloc port = 9090 opts['port'] = port and int(port) or 9090 return opts ceilometer-6.0.0/ceilometer/storage/hbase/migration.py0000664000567000056710000000734412701406223024234 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HBase storage backend migrations """ import re from ceilometer.storage.hbase import utils as hbase_utils def migrate_resource_table(conn, table): """Migrate table 'resource' in HBase. Change qualifiers format from "%s+%s+%s!%s!%s" % (rts, source, counter_name, counter_type,counter_unit) in columns with meters f:m_* to new separator format "%s:%s:%s:%s:%s" % (rts, source, counter_name, counter_type,counter_unit) """ resource_table = conn.table(table) resource_filter = ("QualifierFilter(=, " "'regexstring:m_\\d{19}\\+" "[\\w-\\._]*\\+[\\w-\\._!]')") gen = resource_table.scan(filter=resource_filter) for row, data in gen: columns = [] updated_columns = dict() column_prefix = "f:" for column, value in data.items(): if column.startswith('f:m_'): columns.append(column) parts = column[2:].split("+", 2) parts.extend(parts.pop(2).split("!")) column = hbase_utils.prepare_key(*parts) updated_columns[column_prefix + column] = value resource_table.put(row, updated_columns) resource_table.delete(row, columns) def migrate_meter_table(conn, table): """Migrate table 'meter' in HBase. Change row format from "%s_%d_%s" % (counter_name, rts, message_signature) to new separator format "%s:%s:%s" % (counter_name, rts, message_signature) """ meter_table = conn.table(table) meter_filter = ("RowFilter(=, " "'regexstring:[\\w\\._-]*_\\d{19}_\\w*')") gen = meter_table.scan(filter=meter_filter) for row, data in gen: parts = row.rsplit('_', 2) new_row = hbase_utils.prepare_key(*parts) meter_table.put(new_row, data) meter_table.delete(row) def migrate_event_table(conn, table): """Migrate table 'event' in HBase. Change row format from ""%d_%s" % timestamp, event_id, to new separator format "%s:%s" % timestamp, event_id Also change trait columns from %s+%s % trait.name, trait.dtype to %s:%s % trait.name, trait.dtype """ event_table = conn.table(table) event_filter = "RowFilter(=, 'regexstring:\\d*_\\w*')" gen = event_table.scan(filter=event_filter) trait_pattern = re.compile("f:[\w\-_]*\+\w") column_prefix = "f:" for row, data in gen: row_parts = row.split("_", 1) update_data = {} for column, value in data.items(): if trait_pattern.match(column): trait_parts = column[2:].rsplit('+', 1) column = hbase_utils.prepare_key(*trait_parts) update_data[column_prefix + column] = value new_row = hbase_utils.prepare_key(*row_parts) event_table.put(new_row, update_data) event_table.delete(row) TABLE_MIGRATION_FUNCS = {'resource': migrate_resource_table, 'meter': migrate_meter_table, 'event': migrate_event_table} def migrate_tables(conn, tables): if type(tables) is not list: tables = [tables] for table in tables: if table in TABLE_MIGRATION_FUNCS: TABLE_MIGRATION_FUNCS.get(table)(conn, table) ceilometer-6.0.0/ceilometer/notification.py0000664000567000056710000003336612701406224022207 0ustar jenkinsjenkins00000000000000# # Copyright 2012-2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import threading from oslo_config import cfg from oslo_context import context from oslo_log import log import oslo_messaging from stevedore import extension from ceilometer.agent import plugin_base as base from ceilometer import coordination from ceilometer.event import endpoint as event_endpoint from ceilometer import exchange_control from ceilometer.i18n import _, _LI, _LW from ceilometer import messaging from ceilometer import pipeline from ceilometer import service_base from ceilometer import utils LOG = log.getLogger(__name__) OPTS = [ cfg.IntOpt('pipeline_processing_queues', default=10, min=1, help='Number of queues to parallelize workload across. This ' 'value should be larger than the number of active ' 'notification agents for optimal results.'), cfg.BoolOpt('ack_on_event_error', default=True, deprecated_group='collector', help='Acknowledge message when event persistence fails.'), cfg.BoolOpt('store_events', deprecated_group='collector', default=False, help='Save event details.'), cfg.BoolOpt('disable_non_metric_meters', default=True, help='WARNING: Ceilometer historically offered the ability to ' 'store events as meters. This usage is NOT advised as it ' 'can flood the metering database and cause performance ' 'degradation.'), cfg.BoolOpt('workload_partitioning', default=False, help='Enable workload partitioning, allowing multiple ' 'notification agents to be run simultaneously.'), cfg.MultiStrOpt('messaging_urls', default=[], secret=True, help="Messaging URLs to listen for notifications. " "Example: transport://user:pass@host1:port" "[,hostN:portN]/virtual_host " "(DEFAULT/transport_url is used if empty)"), cfg.IntOpt('batch_size', default=1, help='Number of notification messages to wait before ' 'publishing them'), cfg.IntOpt('batch_timeout', default=None, help='Number of seconds to wait before publishing samples' 'when batch_size is not reached (None means indefinitely)'), ] cfg.CONF.register_opts(exchange_control.EXCHANGE_OPTS) cfg.CONF.register_opts(OPTS, group="notification") cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', group='publisher_notifier') class NotificationService(service_base.BaseService): """Notification service. When running multiple agents, additional queuing sequence is required for inter process communication. Each agent has two listeners: one to listen to the main OpenStack queue and another listener(and notifier) for IPC to divide pipeline sink endpoints. Coordination should be enabled to have proper active/active HA. """ NOTIFICATION_NAMESPACE = 'ceilometer.notification' NOTIFICATION_IPC = 'ceilometer-pipe' @classmethod def _get_notifications_manager(cls, pm): return extension.ExtensionManager( namespace=cls.NOTIFICATION_NAMESPACE, invoke_on_load=True, invoke_args=(pm, ) ) def _get_notifiers(self, transport, pipe): notifiers = [] for x in range(cfg.CONF.notification.pipeline_processing_queues): notifiers.append(oslo_messaging.Notifier( transport, driver=cfg.CONF.publisher_notifier.telemetry_driver, publisher_id='ceilometer.notification', topic='%s-%s-%s' % (self.NOTIFICATION_IPC, pipe.name, x))) return notifiers def _get_pipe_manager(self, transport, pipeline_manager): if cfg.CONF.notification.workload_partitioning: pipe_manager = pipeline.SamplePipelineTransportManager() for pipe in pipeline_manager.pipelines: key = pipeline.get_pipeline_grouping_key(pipe) pipe_manager.add_transporter( (pipe.source.support_meter, key or ['resource_id'], self._get_notifiers(transport, pipe))) else: pipe_manager = pipeline_manager return pipe_manager def _get_event_pipeline_manager(self, transport): if cfg.CONF.notification.store_events: if cfg.CONF.notification.workload_partitioning: event_pipe_manager = pipeline.EventPipelineTransportManager() for pipe in self.event_pipeline_manager.pipelines: event_pipe_manager.add_transporter( (pipe.source.support_event, ['event_type'], self._get_notifiers(transport, pipe))) else: event_pipe_manager = self.event_pipeline_manager return event_pipe_manager def start(self): super(NotificationService, self).start() self.partition_coordinator = None self.coord_lock = threading.Lock() self.listeners, self.pipeline_listeners = [], [] self.pipeline_manager = pipeline.setup_pipeline() if cfg.CONF.notification.store_events: self.event_pipeline_manager = pipeline.setup_event_pipeline() self.transport = messaging.get_transport() if cfg.CONF.notification.workload_partitioning: self.ctxt = context.get_admin_context() self.group_id = self.NOTIFICATION_NAMESPACE self.partition_coordinator = coordination.PartitionCoordinator() self.partition_coordinator.start() else: # FIXME(sileht): endpoint uses the notification_topics option # and it should not because this is an oslo_messaging option # not a ceilometer. Until we have something to get the # notification_topics in another way, we must create a transport # to ensure the option has been registered by oslo_messaging. messaging.get_notifier(self.transport, '') self.group_id = None self.pipe_manager = self._get_pipe_manager(self.transport, self.pipeline_manager) self.event_pipe_manager = self._get_event_pipeline_manager( self.transport) self.listeners, self.pipeline_listeners = [], [] self._configure_main_queue_listeners(self.pipe_manager, self.event_pipe_manager) if cfg.CONF.notification.workload_partitioning: # join group after all manager set up is configured self.partition_coordinator.join_group(self.group_id) self.partition_coordinator.watch_group(self.group_id, self._refresh_agent) self.tg.add_timer(cfg.CONF.coordination.heartbeat, self.partition_coordinator.heartbeat) self.tg.add_timer(cfg.CONF.coordination.check_watchers, self.partition_coordinator.run_watchers) # configure pipelines after all coordination is configured. self._configure_pipeline_listeners() if not cfg.CONF.notification.disable_non_metric_meters: LOG.warning(_LW('Non-metric meters may be collected. It is highly ' 'advisable to disable these meters using ' 'ceilometer.conf or the pipeline.yaml')) # Add a dummy thread to have wait() working self.tg.add_timer(604800, lambda: None) self.init_pipeline_refresh() def _configure_main_queue_listeners(self, pipe_manager, event_pipe_manager): notification_manager = self._get_notifications_manager(pipe_manager) if not list(notification_manager): LOG.warning(_('Failed to load any notification handlers for %s'), self.NOTIFICATION_NAMESPACE) ack_on_error = cfg.CONF.notification.ack_on_event_error endpoints = [] if cfg.CONF.notification.store_events: endpoints.append( event_endpoint.EventsNotificationEndpoint(event_pipe_manager)) targets = [] for ext in notification_manager: handler = ext.obj if (cfg.CONF.notification.disable_non_metric_meters and isinstance(handler, base.NonMetricNotificationBase)): continue LOG.debug('Event types from %(name)s: %(type)s' ' (ack_on_error=%(error)s)', {'name': ext.name, 'type': ', '.join(handler.event_types), 'error': ack_on_error}) # NOTE(gordc): this could be a set check but oslo_messaging issue # https://bugs.launchpad.net/oslo.messaging/+bug/1398511 # This ensures we don't create multiple duplicate consumers. for new_tar in handler.get_targets(cfg.CONF): if new_tar not in targets: targets.append(new_tar) endpoints.append(handler) urls = cfg.CONF.notification.messaging_urls or [None] for url in urls: transport = messaging.get_transport(url) listener = messaging.get_batch_notification_listener( transport, targets, endpoints, batch_size=cfg.CONF.notification.batch_size, batch_timeout=cfg.CONF.notification.batch_timeout) listener.start() self.listeners.append(listener) def _refresh_agent(self, event): self._configure_pipeline_listeners(True) def _configure_pipeline_listeners(self, reuse_listeners=False): with self.coord_lock: ev_pipes = [] if cfg.CONF.notification.store_events: ev_pipes = self.event_pipeline_manager.pipelines pipelines = self.pipeline_manager.pipelines + ev_pipes transport = messaging.get_transport() partitioned = self.partition_coordinator.extract_my_subset( self.group_id, range(cfg.CONF.notification.pipeline_processing_queues)) queue_set = {} for pipe_set, pipe in itertools.product(partitioned, pipelines): queue_set['%s-%s-%s' % (self.NOTIFICATION_IPC, pipe.name, pipe_set)] = pipe if reuse_listeners: topics = queue_set.keys() kill_list = [] for listener in self.pipeline_listeners: if listener.dispatcher.targets[0].topic in topics: queue_set.pop(listener.dispatcher.targets[0].topic) else: kill_list.append(listener) for listener in kill_list: utils.kill_listeners([listener]) self.pipeline_listeners.remove(listener) else: utils.kill_listeners(self.pipeline_listeners) self.pipeline_listeners = [] for topic, pipe in queue_set.items(): LOG.debug('Pipeline endpoint: %s from set: %s', pipe.name, pipe_set) pipe_endpoint = (pipeline.EventPipelineEndpoint if isinstance(pipe, pipeline.EventPipeline) else pipeline.SamplePipelineEndpoint) listener = messaging.get_batch_notification_listener( transport, [oslo_messaging.Target(topic=topic)], [pipe_endpoint(self.ctxt, pipe)], batch_size=cfg.CONF.notification.batch_size, batch_timeout=cfg.CONF.notification.batch_timeout) listener.start() self.pipeline_listeners.append(listener) def stop(self): if getattr(self, 'partition_coordinator', None): self.partition_coordinator.stop() listeners = [] if getattr(self, 'listeners', None): listeners.extend(self.listeners) if getattr(self, 'pipeline_listeners', None): listeners.extend(self.pipeline_listeners) utils.kill_listeners(listeners) super(NotificationService, self).stop() def reload_pipeline(self): LOG.info(_LI("Reloading notification agent and listeners.")) if self.pipeline_validated: self.pipe_manager = self._get_pipe_manager( self.transport, self.pipeline_manager) if self.event_pipeline_validated: self.event_pipe_manager = self._get_event_pipeline_manager( self.transport) # re-start the main queue listeners. utils.kill_listeners(self.listeners) self._configure_main_queue_listeners( self.pipe_manager, self.event_pipe_manager) # re-start the pipeline listeners if workload partitioning # is enabled. if cfg.CONF.notification.workload_partitioning: self._configure_pipeline_listeners() ceilometer-6.0.0/ceilometer/service_base.py0000664000567000056710000001364112701406223022144 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Hewlett Packard # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log from oslo_service import service as os_service import six from ceilometer.i18n import _LE, _LI from ceilometer import pipeline LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class BaseService(os_service.Service): def clear_pipeline_validation_status(self): """Clears pipeline validation status flags.""" self.pipeline_validated = False self.event_pipeline_validated = False def init_pipeline_refresh(self): """Initializes pipeline refresh state.""" self.clear_pipeline_validation_status() if cfg.CONF.refresh_pipeline_cfg: self.set_pipeline_mtime(pipeline.get_pipeline_mtime()) self.set_pipeline_hash(pipeline.get_pipeline_hash()) if cfg.CONF.refresh_event_pipeline_cfg: self.set_pipeline_mtime(pipeline.get_pipeline_mtime( pipeline.EVENT_TYPE), pipeline.EVENT_TYPE) self.set_pipeline_hash(pipeline.get_pipeline_hash( pipeline.EVENT_TYPE), pipeline.EVENT_TYPE) if (cfg.CONF.refresh_pipeline_cfg or cfg.CONF.refresh_event_pipeline_cfg): self.tg.add_timer(cfg.CONF.pipeline_polling_interval, self.refresh_pipeline) def get_pipeline_mtime(self, p_type=pipeline.SAMPLE_TYPE): return (self.event_pipeline_mtime if p_type == pipeline.EVENT_TYPE else self.pipeline_mtime) def set_pipeline_mtime(self, mtime, p_type=pipeline.SAMPLE_TYPE): if p_type == pipeline.EVENT_TYPE: self.event_pipeline_mtime = mtime else: self.pipeline_mtime = mtime def get_pipeline_hash(self, p_type=pipeline.SAMPLE_TYPE): return (self.event_pipeline_hash if p_type == pipeline.EVENT_TYPE else self.pipeline_hash) def set_pipeline_hash(self, _hash, p_type=pipeline.SAMPLE_TYPE): if p_type == pipeline.EVENT_TYPE: self.event_pipeline_hash = _hash else: self.pipeline_hash = _hash @abc.abstractmethod def reload_pipeline(self): """Reload pipeline in the agents.""" def pipeline_changed(self, p_type=pipeline.SAMPLE_TYPE): """Returns hash of changed pipeline else False.""" pipeline_mtime = self.get_pipeline_mtime(p_type) mtime = pipeline.get_pipeline_mtime(p_type) if mtime > pipeline_mtime: LOG.info(_LI('Pipeline configuration file has been updated.')) self.set_pipeline_mtime(mtime, p_type) _hash = pipeline.get_pipeline_hash(p_type) pipeline_hash = self.get_pipeline_hash(p_type) if _hash != pipeline_hash: LOG.info(_LI("Detected change in pipeline configuration.")) return _hash return False def refresh_pipeline(self): """Refreshes appropriate pipeline, then delegates to agent.""" if cfg.CONF.refresh_pipeline_cfg: pipeline_hash = self.pipeline_changed() if pipeline_hash: try: # Pipeline in the notification agent. if hasattr(self, 'pipeline_manager'): self.pipeline_manager = pipeline.setup_pipeline() # Polling in the polling agent. elif hasattr(self, 'polling_manager'): self.polling_manager = pipeline.setup_polling() LOG.debug("Pipeline has been refreshed. " "old hash: %(old)s, new hash: %(new)s", {'old': self.pipeline_hash, 'new': pipeline_hash}) self.set_pipeline_hash(pipeline_hash) self.pipeline_validated = True except Exception as err: LOG.debug("Active pipeline config's hash is %s", self.pipeline_hash) LOG.exception(_LE('Unable to load changed pipeline: %s') % err) if cfg.CONF.refresh_event_pipeline_cfg: ev_pipeline_hash = self.pipeline_changed(pipeline.EVENT_TYPE) if ev_pipeline_hash: try: # Pipeline in the notification agent. if hasattr(self, 'event_pipeline_manager'): self.event_pipeline_manager = (pipeline. setup_event_pipeline()) LOG.debug("Event Pipeline has been refreshed. " "old hash: %(old)s, new hash: %(new)s", {'old': self.event_pipeline_hash, 'new': ev_pipeline_hash}) self.set_pipeline_hash(ev_pipeline_hash, pipeline.EVENT_TYPE) self.event_pipeline_validated = True except Exception as err: LOG.debug("Active event pipeline config's hash is %s", self.event_pipeline_hash) LOG.exception(_LE('Unable to load changed event pipeline:' ' %s') % err) if self.pipeline_validated or self.event_pipeline_validated: self.reload_pipeline() self.clear_pipeline_validation_status() ceilometer-6.0.0/ceilometer/version.py0000664000567000056710000000121112701406223021165 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('ceilometer') ceilometer-6.0.0/ceilometer/api/0000775000567000056710000000000012701406364017712 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/api/__init__.py0000664000567000056710000000234012701406223022014 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg # Register options for the service OPTS = [ cfg.PortOpt('port', default=8777, deprecated_name='metering_api_port', deprecated_group='DEFAULT', help='The port for the ceilometer API server.', ), cfg.StrOpt('host', default='0.0.0.0', help='The listen IP for the ceilometer API server.', ), ] CONF = cfg.CONF opt_group = cfg.OptGroup(name='api', title='Options for the ceilometer-api service') CONF.register_group(opt_group) CONF.register_opts(OPTS, opt_group) ceilometer-6.0.0/ceilometer/api/controllers/0000775000567000056710000000000012701406364022260 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/api/controllers/root.py0000664000567000056710000000355112701406223023613 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from ceilometer.api.controllers.v2 import root as v2 MEDIA_TYPE_JSON = 'application/vnd.openstack.telemetry-%s+json' MEDIA_TYPE_XML = 'application/vnd.openstack.telemetry-%s+xml' class RootController(object): def __init__(self): self.v2 = v2.V2Controller() @pecan.expose('json') def index(self): base_url = pecan.request.application_url available = [{'tag': 'v2', 'date': '2013-02-13T00:00:00Z', }] collected = [version_descriptor(base_url, v['tag'], v['date']) for v in available] versions = {'versions': {'values': collected}} return versions def version_descriptor(base_url, version, released_on): url = version_url(base_url, version) return { 'id': version, 'links': [ {'href': url, 'rel': 'self', }, {'href': 'http://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html', }], 'media-types': [ {'base': 'application/json', 'type': MEDIA_TYPE_JSON % version, }, {'base': 'application/xml', 'type': MEDIA_TYPE_XML % version, }], 'status': 'stable', 'updated': released_on, } def version_url(base_url, version_number): return '%s/%s' % (base_url, version_number) ceilometer-6.0.0/ceilometer/api/controllers/__init__.py0000664000567000056710000000000012701406223024351 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/api/controllers/v2/0000775000567000056710000000000012701406364022607 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/api/controllers/v2/resources.py0000664000567000056710000001335112701406224025171 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import pecan from pecan import rest import six from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import utils from ceilometer.api import rbac from ceilometer.i18n import _ class Resource(base.Base): """An externally defined object for which samples have been received.""" resource_id = wtypes.text "The unique identifier for the resource" project_id = wtypes.text "The ID of the owning project or tenant" user_id = wtypes.text "The ID of the user who created the resource or updated it last" first_sample_timestamp = datetime.datetime "UTC date & time not later than the first sample known for this resource" last_sample_timestamp = datetime.datetime "UTC date & time not earlier than the last sample known for this resource" metadata = {wtypes.text: wtypes.text} "Arbitrary metadata associated with the resource" links = [base.Link] "A list containing a self link and associated meter links" source = wtypes.text "The source where the resource come from" def __init__(self, metadata=None, **kwds): metadata = metadata or {} metadata = utils.flatten_metadata(metadata) super(Resource, self).__init__(metadata=metadata, **kwds) @classmethod def sample(cls): return cls( resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', project_id='35b17138-b364-4e6a-a131-8f3099c5be68', user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', timestamp=datetime.datetime.utcnow(), source="openstack", metadata={'name1': 'value1', 'name2': 'value2'}, links=[ base.Link(href=('http://localhost:8777/v2/resources/' 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), rel='self'), base.Link(href=('http://localhost:8777/v2/meters/volume?' 'q.field=resource_id&q.value=' 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), rel='volume') ], ) class ResourcesController(rest.RestController): """Works on resources.""" @staticmethod def _make_link(rel_name, url, type, type_arg, query=None): query_str = '' if query: query_str = '?q.field=%s&q.value=%s' % (query['field'], query['value']) return base.Link(href='%s/v2/%s/%s%s' % (url, type, type_arg, query_str), rel=rel_name) def _resource_links(self, resource_id, meter_links=1): links = [self._make_link('self', pecan.request.application_url, 'resources', resource_id)] if meter_links: for meter in pecan.request.storage_conn.get_meters( resource=resource_id): query = {'field': 'resource_id', 'value': resource_id} links.append(self._make_link(meter.name, pecan.request.application_url, 'meters', meter.name, query=query)) return links @wsme_pecan.wsexpose(Resource, six.text_type) def get_one(self, resource_id): """Retrieve details about one resource. :param resource_id: The UUID of the resource. """ rbac.enforce('get_resource', pecan.request) authorized_project = rbac.get_limited_to_project(pecan.request.headers) resources = list(pecan.request.storage_conn.get_resources( resource=resource_id, project=authorized_project)) if not resources: raise base.EntityNotFound(_('Resource'), resource_id) return Resource.from_db_and_links(resources[0], self._resource_links(resource_id)) @wsme_pecan.wsexpose([Resource], [base.Query], int, int) def get_all(self, q=None, limit=None, meter_links=1): """Retrieve definitions of all of the resources. :param q: Filter rules for the resources to be returned. :param meter_links: option to include related meter links """ rbac.enforce('get_resources', pecan.request) q = q or [] limit = utils.enforce_limit(limit) kwargs = utils.query_to_kwargs( q, pecan.request.storage_conn.get_resources, ['limit']) resources = [ Resource.from_db_and_links(r, self._resource_links(r.resource_id, meter_links)) for r in pecan.request.storage_conn.get_resources(limit=limit, **kwargs)] return resources ceilometer-6.0.0/ceilometer/api/controllers/v2/meters.py0000664000567000056710000004421012701406223024453 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import datetime from oslo_config import cfg from oslo_context import context from oslo_log import log from oslo_utils import strutils from oslo_utils import timeutils import pecan from pecan import rest import six import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import utils as v2_utils from ceilometer.api import rbac from ceilometer.i18n import _ from ceilometer.publisher import utils as publisher_utils from ceilometer import sample from ceilometer import storage from ceilometer import utils LOG = log.getLogger(__name__) class OldSample(base.Base): """A single measurement for a given meter and resource. This class is deprecated in favor of Sample. """ source = wtypes.text "The ID of the source that identifies where the sample comes from" counter_name = wsme.wsattr(wtypes.text, mandatory=True) "The name of the meter" # FIXME(dhellmann): Make this meter_name? counter_type = wsme.wsattr(wtypes.text, mandatory=True) "The type of the meter (see :ref:`measurements`)" # FIXME(dhellmann): Make this meter_type? counter_unit = wsme.wsattr(wtypes.text, mandatory=True) "The unit of measure for the value in counter_volume" # FIXME(dhellmann): Make this meter_unit? counter_volume = wsme.wsattr(float, mandatory=True) "The actual measured value" user_id = wtypes.text "The ID of the user who last triggered an update to the resource" project_id = wtypes.text "The ID of the project or tenant that owns the resource" resource_id = wsme.wsattr(wtypes.text, mandatory=True) "The ID of the :class:`Resource` for which the measurements are taken" timestamp = datetime.datetime "UTC date and time when the measurement was made" recorded_at = datetime.datetime "When the sample has been recorded." resource_metadata = {wtypes.text: wtypes.text} "Arbitrary metadata associated with the resource" message_id = wtypes.text "A unique identifier for the sample" def __init__(self, counter_volume=None, resource_metadata=None, timestamp=None, **kwds): resource_metadata = resource_metadata or {} if counter_volume is not None: counter_volume = float(counter_volume) resource_metadata = v2_utils.flatten_metadata(resource_metadata) # this is to make it easier for clients to pass a timestamp in if timestamp and isinstance(timestamp, six.string_types): timestamp = timeutils.parse_isotime(timestamp) super(OldSample, self).__init__(counter_volume=counter_volume, resource_metadata=resource_metadata, timestamp=timestamp, **kwds) if self.resource_metadata in (wtypes.Unset, None): self.resource_metadata = {} @classmethod def sample(cls): return cls(source='openstack', counter_name='instance', counter_type='gauge', counter_unit='instance', counter_volume=1, resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', project_id='35b17138-b364-4e6a-a131-8f3099c5be68', user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', recorded_at=datetime.datetime.utcnow(), timestamp=datetime.datetime.utcnow(), resource_metadata={'name1': 'value1', 'name2': 'value2'}, message_id='5460acce-4fd6-480d-ab18-9735ec7b1996', ) class Statistics(base.Base): """Computed statistics for a query.""" groupby = {wtypes.text: wtypes.text} "Dictionary of field names for group, if groupby statistics are requested" unit = wtypes.text "The unit type of the data set" min = float "The minimum volume seen in the data" max = float "The maximum volume seen in the data" avg = float "The average of all of the volume values seen in the data" sum = float "The total of all of the volume values seen in the data" count = int "The number of samples seen" aggregate = {wtypes.text: float} "The selectable aggregate value(s)" duration = float "The difference, in seconds, between the oldest and newest timestamp" duration_start = datetime.datetime "UTC date and time of the earliest timestamp, or the query start time" duration_end = datetime.datetime "UTC date and time of the oldest timestamp, or the query end time" period = int "The difference, in seconds, between the period start and end" period_start = datetime.datetime "UTC date and time of the period start" period_end = datetime.datetime "UTC date and time of the period end" def __init__(self, start_timestamp=None, end_timestamp=None, **kwds): super(Statistics, self).__init__(**kwds) self._update_duration(start_timestamp, end_timestamp) def _update_duration(self, start_timestamp, end_timestamp): # "Clamp" the timestamps we return to the original time # range, excluding the offset. if (start_timestamp and self.duration_start and self.duration_start < start_timestamp): self.duration_start = start_timestamp LOG.debug('clamping min timestamp to range') if (end_timestamp and self.duration_end and self.duration_end > end_timestamp): self.duration_end = end_timestamp LOG.debug('clamping max timestamp to range') # If we got valid timestamps back, compute a duration in seconds. # # If the min > max after clamping then we know the # timestamps on the samples fell outside of the time # range we care about for the query, so treat them as # "invalid." # # If the timestamps are invalid, return None as a # sentinel indicating that there is something "funny" # about the range. if (self.duration_start and self.duration_end and self.duration_start <= self.duration_end): self.duration = timeutils.delta_seconds(self.duration_start, self.duration_end) else: self.duration_start = self.duration_end = self.duration = None @classmethod def sample(cls): return cls(unit='GiB', min=1, max=9, avg=4.5, sum=45, count=10, duration_start=datetime.datetime(2013, 1, 4, 16, 42), duration_end=datetime.datetime(2013, 1, 4, 16, 47), period=7200, period_start=datetime.datetime(2013, 1, 4, 16, 00), period_end=datetime.datetime(2013, 1, 4, 18, 00), ) class Aggregate(base.Base): func = wsme.wsattr(wtypes.text, mandatory=True) "The aggregation function name" param = wsme.wsattr(wtypes.text, default=None) "The paramter to the aggregation function" def __init__(self, **kwargs): super(Aggregate, self).__init__(**kwargs) @staticmethod def validate(aggregate): return aggregate @classmethod def sample(cls): return cls(func='cardinality', param='resource_id') def _validate_groupby_fields(groupby_fields): """Checks that the list of groupby fields from request is valid. If all fields are valid, returns fields with duplicates removed. """ # NOTE(terriyu): Currently, metadata fields are supported in our # group by statistics implementation only for mongodb valid_fields = set(['user_id', 'resource_id', 'project_id', 'source', 'resource_metadata.instance_type']) invalid_fields = set(groupby_fields) - valid_fields if invalid_fields: raise wsme.exc.UnknownArgument(invalid_fields, "Invalid groupby fields") # Remove duplicate fields # NOTE(terriyu): This assumes that we don't care about the order of the # group by fields. return list(set(groupby_fields)) class MeterController(rest.RestController): """Manages operations on a single meter.""" _custom_actions = { 'statistics': ['GET'], } def __init__(self, meter_name): pecan.request.context['meter_name'] = meter_name self.meter_name = meter_name @wsme_pecan.wsexpose([OldSample], [base.Query], int) def get_all(self, q=None, limit=None): """Return samples for the meter. :param q: Filter rules for the data to be returned. :param limit: Maximum number of samples to return. """ rbac.enforce('get_samples', pecan.request) q = q or [] limit = v2_utils.enforce_limit(limit) kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__) kwargs['meter'] = self.meter_name f = storage.SampleFilter(**kwargs) return [OldSample.from_db_model(e) for e in pecan.request.storage_conn.get_samples(f, limit=limit) ] @wsme_pecan.wsexpose([OldSample], str, body=[OldSample], status_code=201) def post(self, direct='', samples=None): """Post a list of new Samples to Telemetry. :param direct: a flag indicates whether the samples will be posted directly to storage or not. :param samples: a list of samples within the request body. """ rbac.enforce('create_samples', pecan.request) direct = strutils.bool_from_string(direct) if not samples: msg = _('Samples should be included in request body') raise base.ClientSideError(msg) now = timeutils.utcnow() auth_project = rbac.get_limited_to_project(pecan.request.headers) def_source = pecan.request.cfg.sample_source def_project_id = pecan.request.headers.get('X-Project-Id') def_user_id = pecan.request.headers.get('X-User-Id') published_samples = [] for s in samples: if self.meter_name != s.counter_name: raise wsme.exc.InvalidInput('counter_name', s.counter_name, 'should be %s' % self.meter_name) if s.message_id: raise wsme.exc.InvalidInput('message_id', s.message_id, 'The message_id must not be set') if s.counter_type not in sample.TYPES: raise wsme.exc.InvalidInput('counter_type', s.counter_type, 'The counter type must be: ' + ', '.join(sample.TYPES)) s.user_id = (s.user_id or def_user_id) s.project_id = (s.project_id or def_project_id) s.source = '%s:%s' % (s.project_id, (s.source or def_source)) s.timestamp = (s.timestamp or now) if auth_project and auth_project != s.project_id: # non admin user trying to cross post to another project_id auth_msg = 'can not post samples to other projects' raise wsme.exc.InvalidInput('project_id', s.project_id, auth_msg) published_sample = sample.Sample( name=s.counter_name, type=s.counter_type, unit=s.counter_unit, volume=s.counter_volume, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp.isoformat(), resource_metadata=utils.restore_nesting(s.resource_metadata, separator='.'), source=s.source) s.message_id = published_sample.id sample_dict = publisher_utils.meter_message_from_counter( published_sample, cfg.CONF.publisher.telemetry_secret) if direct: ts = timeutils.parse_isotime(sample_dict['timestamp']) sample_dict['timestamp'] = timeutils.normalize_time(ts) pecan.request.storage_conn.record_metering_data(sample_dict) else: published_samples.append(sample_dict) if not direct: ctxt = context.RequestContext(user=def_user_id, tenant=def_project_id, is_admin=True) notifier = pecan.request.notifier notifier.sample(ctxt.to_dict(), 'telemetry.api', {'samples': published_samples}) return samples @wsme_pecan.wsexpose([Statistics], [base.Query], [six.text_type], int, [Aggregate]) def statistics(self, q=None, groupby=None, period=None, aggregate=None): """Computes the statistics of the samples in the time range given. :param q: Filter rules for the data to be returned. :param groupby: Fields for group by aggregation :param period: Returned result will be an array of statistics for a period long of that number of seconds. :param aggregate: The selectable aggregation functions to be applied. """ rbac.enforce('compute_statistics', pecan.request) q = q or [] groupby = groupby or [] aggregate = aggregate or [] if period and period < 0: raise base.ClientSideError(_("Period must be positive.")) kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__) kwargs['meter'] = self.meter_name f = storage.SampleFilter(**kwargs) g = _validate_groupby_fields(groupby) aggregate = utils.uniq(aggregate, ['func', 'param']) # Find the original timestamp in the query to use for clamping # the duration returned in the statistics. start = end = None for i in q: if i.field == 'timestamp' and i.op in ('lt', 'le'): end = timeutils.parse_isotime(i.value).replace( tzinfo=None) elif i.field == 'timestamp' and i.op in ('gt', 'ge'): start = timeutils.parse_isotime(i.value).replace( tzinfo=None) try: computed = pecan.request.storage_conn.get_meter_statistics( f, period, g, aggregate) return [Statistics(start_timestamp=start, end_timestamp=end, **c.as_dict()) for c in computed] except OverflowError as e: params = dict(period=period, err=e) raise base.ClientSideError( _("Invalid period %(period)s: %(err)s") % params) class Meter(base.Base): """One category of measurements.""" name = wtypes.text "The unique name for the meter" type = wtypes.Enum(str, *sample.TYPES) "The meter type (see :ref:`measurements`)" unit = wtypes.text "The unit of measure" resource_id = wtypes.text "The ID of the :class:`Resource` for which the measurements are taken" project_id = wtypes.text "The ID of the project or tenant that owns the resource" user_id = wtypes.text "The ID of the user who last triggered an update to the resource" source = wtypes.text "The ID of the source that identifies where the meter comes from" meter_id = wtypes.text "The unique identifier for the meter" def __init__(self, **kwargs): meter_id = '%s+%s' % (kwargs['resource_id'], kwargs['name']) # meter_id is of type Unicode but base64.encodestring() only accepts # strings. See bug #1333177 meter_id = base64.b64encode(meter_id.encode('utf-8')) kwargs['meter_id'] = meter_id super(Meter, self).__init__(**kwargs) @classmethod def sample(cls): return cls(name='instance', type='gauge', unit='instance', resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', project_id='35b17138-b364-4e6a-a131-8f3099c5be68', user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', source='openstack', ) class MetersController(rest.RestController): """Works on meters.""" @pecan.expose() def _lookup(self, meter_name, *remainder): return MeterController(meter_name), remainder @wsme_pecan.wsexpose([Meter], [base.Query], int, str) def get_all(self, q=None, limit=None, unique=''): """Return all known meters, based on the data recorded so far. :param q: Filter rules for the meters to be returned. :param unique: flag to indicate unique meters to be returned. """ rbac.enforce('get_meters', pecan.request) q = q or [] # Timestamp field is not supported for Meter queries limit = v2_utils.enforce_limit(limit) kwargs = v2_utils.query_to_kwargs( q, pecan.request.storage_conn.get_meters, ['limit'], allow_timestamps=False) return [Meter.from_db_model(m) for m in pecan.request.storage_conn.get_meters( limit=limit, unique=strutils.bool_from_string(unique), **kwargs)] ceilometer-6.0.0/ceilometer/api/controllers/v2/utils.py0000664000567000056710000003357412701406223024327 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import functools import inspect from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import pecan import six import wsme from ceilometer.api.controllers.v2 import base from ceilometer.api import rbac from ceilometer.i18n import _, _LI from ceilometer import utils LOG = log.getLogger(__name__) cfg.CONF.import_opt('default_api_return_limit', 'ceilometer.api.app', group='api') def enforce_limit(limit): """Ensure limit is defined and is valid. if not, set a default.""" if limit is None: limit = cfg.CONF.api.default_api_return_limit LOG.info(_LI('No limit value provided, result set will be' ' limited to %(limit)d.'), {'limit': limit}) if not limit or limit <= 0: raise base.ClientSideError(_("Limit must be positive")) return limit def get_auth_project(on_behalf_of=None): auth_project = rbac.get_limited_to_project(pecan.request.headers) created_by = pecan.request.headers.get('X-Project-Id') is_admin = auth_project is None if is_admin and on_behalf_of != created_by: auth_project = on_behalf_of return auth_project def sanitize_query(query, db_func, on_behalf_of=None): """Check the query. See if: 1) the request is coming from admin - then allow full visibility 2) non-admin - make sure that the query includes the requester's project. """ q = copy.copy(query) auth_project = get_auth_project(on_behalf_of) if auth_project: _verify_query_segregation(q, auth_project) proj_q = [i for i in q if i.field == 'project_id'] valid_keys = inspect.getargspec(db_func)[0] if not proj_q and 'on_behalf_of' not in valid_keys: # The user is restricted, but they didn't specify a project # so add it for them. q.append(base.Query(field='project_id', op='eq', value=auth_project)) return q def _verify_query_segregation(query, auth_project=None): """Ensure non-admin queries are not constrained to another project.""" auth_project = (auth_project or rbac.get_limited_to_project(pecan.request.headers)) if not auth_project: return for q in query: if q.field in ('project', 'project_id') and auth_project != q.value: raise base.ProjectNotAuthorized(q.value) def validate_query(query, db_func, internal_keys=None, allow_timestamps=True): """Validates the syntax of the query and verifies the query. Verification check if the query request is authorized for the included project. :param query: Query expression that should be validated :param db_func: the function on the storage level, of which arguments will form the valid_keys list, which defines the valid fields for a query expression :param internal_keys: internally used field names, that should not be used for querying :param allow_timestamps: defines whether the timestamp-based constraint is applicable for this query or not :raises InvalidInput: if an operator is not supported for a given field :raises InvalidInput: if timestamp constraints are allowed, but search_offset was included without timestamp constraint :raises: UnknownArgument: if a field name is not a timestamp field, nor in the list of valid keys """ internal_keys = internal_keys or [] _verify_query_segregation(query) valid_keys = inspect.getargspec(db_func)[0] internal_timestamp_keys = ['end_timestamp', 'start_timestamp', 'end_timestamp_op', 'start_timestamp_op'] if 'start_timestamp' in valid_keys: internal_keys += internal_timestamp_keys valid_keys += ['timestamp', 'search_offset'] internal_keys.append('self') internal_keys.append('metaquery') valid_keys = set(valid_keys) - set(internal_keys) translation = {'user_id': 'user', 'project_id': 'project', 'resource_id': 'resource'} has_timestamp_query = _validate_timestamp_fields(query, 'timestamp', ('lt', 'le', 'gt', 'ge'), allow_timestamps) has_search_offset_query = _validate_timestamp_fields(query, 'search_offset', 'eq', allow_timestamps) if has_search_offset_query and not has_timestamp_query: raise wsme.exc.InvalidInput('field', 'search_offset', "search_offset cannot be used without " + "timestamp") def _is_field_metadata(field): return (field.startswith('metadata.') or field.startswith('resource_metadata.')) for i in query: if i.field not in ('timestamp', 'search_offset'): key = translation.get(i.field, i.field) operator = i.op if key in valid_keys or _is_field_metadata(i.field): if operator == 'eq': if key == 'enabled': i._get_value_as_type('boolean') elif _is_field_metadata(key): i._get_value_as_type() else: raise wsme.exc.InvalidInput('op', i.op, 'unimplemented operator for ' '%s' % i.field) else: msg = ("unrecognized field in query: %s, " "valid keys: %s") % (query, sorted(valid_keys)) raise wsme.exc.UnknownArgument(key, msg) def _validate_timestamp_fields(query, field_name, operator_list, allow_timestamps): """Validates the timestamp related constraints in a query if there are any. :param query: query expression that may contain the timestamp fields :param field_name: timestamp name, which should be checked (timestamp, search_offset) :param operator_list: list of operators that are supported for that timestamp, which was specified in the parameter field_name :param allow_timestamps: defines whether the timestamp-based constraint is applicable to this query or not :returns: True, if there was a timestamp constraint, containing a timestamp field named as defined in field_name, in the query and it was allowed and syntactically correct. :returns: False, if there wasn't timestamp constraint, containing a timestamp field named as defined in field_name, in the query :raises InvalidInput: if an operator is unsupported for a given timestamp field :raises UnknownArgument: if the timestamp constraint is not allowed in the query """ for item in query: if item.field == field_name: # If *timestamp* or *search_offset* field was specified in the # query, but timestamp is not supported on that resource, on # which the query was invoked, then raise an exception. if not allow_timestamps: raise wsme.exc.UnknownArgument(field_name, "not valid for " + "this resource") if item.op not in operator_list: raise wsme.exc.InvalidInput('op', item.op, 'unimplemented operator for %s' % item.field) return True return False def query_to_kwargs(query, db_func, internal_keys=None, allow_timestamps=True): validate_query(query, db_func, internal_keys=internal_keys, allow_timestamps=allow_timestamps) query = sanitize_query(query, db_func) translation = {'user_id': 'user', 'project_id': 'project', 'resource_id': 'resource'} stamp = {} metaquery = {} kwargs = {} for i in query: if i.field == 'timestamp': if i.op in ('lt', 'le'): stamp['end_timestamp'] = i.value stamp['end_timestamp_op'] = i.op elif i.op in ('gt', 'ge'): stamp['start_timestamp'] = i.value stamp['start_timestamp_op'] = i.op else: if i.op == 'eq': if i.field == 'search_offset': stamp['search_offset'] = i.value elif i.field == 'enabled': kwargs[i.field] = i._get_value_as_type('boolean') elif i.field.startswith('metadata.'): metaquery[i.field] = i._get_value_as_type() elif i.field.startswith('resource_metadata.'): metaquery[i.field[9:]] = i._get_value_as_type() else: key = translation.get(i.field, i.field) kwargs[key] = i.value if metaquery and 'metaquery' in inspect.getargspec(db_func)[0]: kwargs['metaquery'] = metaquery if stamp: kwargs.update(_get_query_timestamps(stamp)) return kwargs def _get_query_timestamps(args=None): """Return any optional timestamp information in the request. Determine the desired range, if any, from the GET arguments. Set up the query range using the specified offset. [query_start ... start_timestamp ... end_timestamp ... query_end] Returns a dictionary containing: start_timestamp: First timestamp to use for query start_timestamp_op: First timestamp operator to use for query end_timestamp: Final timestamp to use for query end_timestamp_op: Final timestamp operator to use for query """ if args is None: return {} search_offset = int(args.get('search_offset', 0)) def _parse_timestamp(timestamp): if not timestamp: return None try: iso_timestamp = timeutils.parse_isotime(timestamp) iso_timestamp = iso_timestamp.replace(tzinfo=None) except ValueError: raise wsme.exc.InvalidInput('timestamp', timestamp, 'invalid timestamp format') return iso_timestamp start_timestamp = _parse_timestamp(args.get('start_timestamp')) end_timestamp = _parse_timestamp(args.get('end_timestamp')) start_timestamp = start_timestamp - datetime.timedelta( minutes=search_offset) if start_timestamp else None end_timestamp = end_timestamp + datetime.timedelta( minutes=search_offset) if end_timestamp else None return {'start_timestamp': start_timestamp, 'end_timestamp': end_timestamp, 'start_timestamp_op': args.get('start_timestamp_op'), 'end_timestamp_op': args.get('end_timestamp_op')} def flatten_metadata(metadata): """Return flattened resource metadata. Metadata is returned with flattened nested structures (except nested sets) and with all values converted to unicode strings. """ if metadata: # After changing recursive_keypairs` output we need to keep # flattening output unchanged. # Example: recursive_keypairs({'a': {'b':{'c':'d'}}}, '.') # output before: a.b:c=d # output now: a.b.c=d # So to keep the first variant just replace all dots except the first return dict((k.replace('.', ':').replace(':', '.', 1), six.text_type(v)) for k, v in utils.recursive_keypairs(metadata, separator='.') if type(v) is not set) return {} # TODO(fabiog): this decorator should disappear and have a more unified # way of controlling access and scope. Before messing with this, though # I feel this file should be re-factored in smaller chunks one for each # controller (e.g. meters and so on ...). Right now its size is # overwhelming. def requires_admin(func): @functools.wraps(func) def wrapped(*args, **kwargs): usr_limit, proj_limit = rbac.get_limited_to(pecan.request.headers) # If User and Project are None, you have full access. if usr_limit and proj_limit: # since this decorator get's called out of wsme context # raising exception results internal error so call abort # for handling the error ex = base.ProjectNotAuthorized(proj_limit) pecan.core.abort(status_code=ex.code, detail=ex.msg) return func(*args, **kwargs) return wrapped def requires_context(func): @functools.wraps(func) def wrapped(*args, **kwargs): req_usr = pecan.request.headers.get('X-User-Id') proj_usr = pecan.request.headers.get('X-Project-Id') if ((not req_usr) or (not proj_usr)): pecan.core.abort(status_code=403, detail='RBAC Authorization Failed') return func(*args, **kwargs) return wrapped ceilometer-6.0.0/ceilometer/api/controllers/v2/root.py0000664000567000056710000001671112701406223024144 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log from oslo_utils import strutils import pecan from ceilometer.api.controllers.v2 import capabilities from ceilometer.api.controllers.v2 import events from ceilometer.api.controllers.v2 import meters from ceilometer.api.controllers.v2 import query from ceilometer.api.controllers.v2 import resources from ceilometer.api.controllers.v2 import samples from ceilometer.i18n import _, _LW from ceilometer import keystone_client API_OPTS = [ cfg.BoolOpt('gnocchi_is_enabled', default=None, help=('Set True to disable resource/meter/sample URLs. ' 'Default autodetection by querying keystone.')), cfg.BoolOpt('aodh_is_enabled', default=None, help=('Set True to redirect alarms URLs to aodh. ' 'Default autodetection by querying keystone.')), cfg.StrOpt('aodh_url', default=None, help=('The endpoint of Aodh to redirect alarms URLs ' 'to Aodh API. Default autodetection by querying ' 'keystone.')), ] cfg.CONF.register_opts(API_OPTS, group='api') cfg.CONF.import_opt('meter_dispatchers', 'ceilometer.dispatcher') LOG = log.getLogger(__name__) def gnocchi_abort(): pecan.abort(410, ("This telemetry installation is configured to use " "Gnocchi. Please use the Gnocchi API available on " "the metric endpoint to retrieve data.")) def aodh_abort(): pecan.abort(410, _("alarms URLs is unavailable when Aodh is " "disabled or unavailable.")) def aodh_redirect(url): # NOTE(sileht): we use 307 and not 301 or 302 to allow # client to redirect POST/PUT/DELETE/... # FIXME(sileht): it would be better to use 308, but webob # doesn't handle it :( # https://github.com/Pylons/webob/pull/207 pecan.redirect(location=url + pecan.request.path_qs, code=307) class QueryController(object): def __init__(self, gnocchi_is_enabled=False, aodh_url=None): self.gnocchi_is_enabled = gnocchi_is_enabled self.aodh_url = aodh_url @pecan.expose() def _lookup(self, kind, *remainder): if kind == 'alarms' and self.aodh_url: aodh_redirect(self.aodh_url) elif kind == 'alarms': aodh_abort() elif kind == 'samples' and self.gnocchi_is_enabled: gnocchi_abort() elif kind == 'samples': return query.QuerySamplesController(), remainder else: pecan.abort(404) class V2Controller(object): """Version 2 API controller root.""" event_types = events.EventTypesController() events = events.EventsController() capabilities = capabilities.CapabilitiesController() def __init__(self): self._gnocchi_is_enabled = None self._aodh_is_enabled = None self._aodh_url = None @property def gnocchi_is_enabled(self): if self._gnocchi_is_enabled is None: if cfg.CONF.api.gnocchi_is_enabled is not None: self._gnocchi_is_enabled = cfg.CONF.api.gnocchi_is_enabled elif ("gnocchi" not in cfg.CONF.meter_dispatchers or "database" in cfg.CONF.meter_dispatchers): self._gnocchi_is_enabled = False else: try: catalog = keystone_client.get_service_catalog( keystone_client.get_client()) catalog.url_for(service_type='metric') except exceptions.EndpointNotFound: self._gnocchi_is_enabled = False except exceptions.ClientException: LOG.warning(_LW("Can't connect to keystone, assuming " "gnocchi is disabled and retry later")) else: self._gnocchi_is_enabled = True LOG.warning(_LW("ceilometer-api started with gnocchi " "enabled. The resources/meters/samples " "URLs are disabled.")) return self._gnocchi_is_enabled @property def aodh_url(self): if self._aodh_url is None: if cfg.CONF.api.aodh_is_enabled is False: self._aodh_url = "" elif cfg.CONF.api.aodh_url is not None: self._aodh_url = self._normalize_aodh_url( cfg.CONF.api.aodh_url) else: try: catalog = keystone_client.get_service_catalog( keystone_client.get_client()) self._aodh_url = self._normalize_aodh_url( catalog.url_for(service_type='alarming')) except exceptions.EndpointNotFound: self._aodh_url = "" except exceptions.ClientException: LOG.warning(_LW("Can't connect to keystone, assuming aodh " "is disabled and retry later.")) else: LOG.warning(_LW("ceilometer-api started with aodh " "enabled. Alarms URLs will be redirected " "to aodh endpoint.")) return self._aodh_url @pecan.expose() def _lookup(self, kind, *remainder): if (kind in ['meters', 'resources', 'samples'] and self.gnocchi_is_enabled): if kind == 'meters' and pecan.request.method == 'POST': direct = pecan.request.params.get('direct', '') if strutils.bool_from_string(direct): pecan.abort(400, _('direct option cannot be true when ' 'Gnocchi is enabled.')) return meters.MetersController(), remainder gnocchi_abort() elif kind == 'meters': return meters.MetersController(), remainder elif kind == 'resources': return resources.ResourcesController(), remainder elif kind == 'samples': return samples.SamplesController(), remainder elif kind == 'query': return QueryController( gnocchi_is_enabled=self.gnocchi_is_enabled, aodh_url=self.aodh_url, ), remainder elif kind == 'alarms' and (not self.aodh_url): aodh_abort() elif kind == 'alarms' and self.aodh_url: aodh_redirect(self.aodh_url) else: pecan.abort(404) @staticmethod def _normalize_aodh_url(url): if url.endswith("/"): return url[:-1] return url ceilometer-6.0.0/ceilometer/api/controllers/v2/__init__.py0000664000567000056710000000000012701406223024700 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/api/controllers/v2/capabilities.py0000664000567000056710000001045612701406223025612 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer import utils def _flatten_capabilities(capabilities): return dict((k, v) for k, v in utils.recursive_keypairs(capabilities)) class Capabilities(base.Base): """A representation of the API and storage capabilities. Usually constrained by restrictions imposed by the storage driver. """ api = {wtypes.text: bool} "A flattened dictionary of API capabilities" storage = {wtypes.text: bool} "A flattened dictionary of storage capabilities" event_storage = {wtypes.text: bool} "A flattened dictionary of event storage capabilities" @classmethod def sample(cls): return cls( api=_flatten_capabilities({ 'meters': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'resources': {'query': {'simple': True, 'metadata': True, 'complex': False}}, 'samples': {'query': {'simple': True, 'metadata': True, 'complex': True}}, 'statistics': {'groupby': True, 'query': {'simple': True, 'metadata': True, 'complex': False}, 'aggregation': {'standard': True, 'selectable': { 'max': True, 'min': True, 'sum': True, 'avg': True, 'count': True, 'stddev': True, 'cardinality': True, 'quartile': False}}}, 'events': {'query': {'simple': True}}, }), storage=_flatten_capabilities( {'storage': {'production_ready': True}}), event_storage=_flatten_capabilities( {'storage': {'production_ready': True}}), ) class CapabilitiesController(rest.RestController): """Manages capabilities queries.""" @wsme_pecan.wsexpose(Capabilities) def get(self): """Returns a flattened dictionary of API capabilities. Capabilities supported by the currently configured storage driver. """ # variation in API capabilities is effectively determined by # the lack of strict feature parity across storage drivers conn = pecan.request.storage_conn event_conn = pecan.request.event_storage_conn driver_capabilities = conn.get_capabilities().copy() driver_capabilities['events'] = event_conn.get_capabilities()['events'] driver_perf = conn.get_storage_capabilities() event_driver_perf = event_conn.get_storage_capabilities() return Capabilities(api=_flatten_capabilities(driver_capabilities), storage=_flatten_capabilities(driver_perf), event_storage=_flatten_capabilities( event_driver_perf)) ceilometer-6.0.0/ceilometer/api/controllers/v2/events.py0000664000567000056710000002475312701406223024472 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_log import log import pecan from pecan import rest import six import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import utils as v2_utils from ceilometer.api import rbac from ceilometer.event.storage import models as event_models from ceilometer.i18n import _ from ceilometer import storage LOG = log.getLogger(__name__) class TraitDescription(base.Base): """A description of a trait, with no associated value.""" type = wtypes.text "the data type, defaults to string" name = wtypes.text "the name of the trait" @classmethod def sample(cls): return cls(name='service', type='string' ) class EventQuery(base.Query): """Query arguments for Event Queries.""" _supported_types = ['integer', 'float', 'string', 'datetime'] type = wsme.wsattr(wtypes.text, default='string') "the type of the trait filter, defaults to string" def __repr__(self): # for logging calls return '' % (self.field, self.op, self._get_value_as_type(), self.type) @classmethod def sample(cls): return cls(field="event_type", type="string", op="eq", value="compute.instance.create.start") class Trait(base.Base): """A Trait associated with an event.""" name = wtypes.text "The name of the trait" value = wtypes.text "the value of the trait" type = wtypes.text "the type of the trait (string, integer, float or datetime)" @staticmethod def _convert_storage_trait(trait): """Helper method to convert a storage model into an API trait instance. If an API trait instance is passed in, just return it. """ if isinstance(trait, Trait): return trait value = (six.text_type(trait.value) if not trait.dtype == event_models.Trait.DATETIME_TYPE else trait.value.isoformat()) trait_type = event_models.Trait.get_name_by_type(trait.dtype) return Trait(name=trait.name, type=trait_type, value=value) @classmethod def sample(cls): return cls(name='service', type='string', value='compute.hostname' ) class Event(base.Base): """A System event.""" message_id = wtypes.text "The message ID for the notification" event_type = wtypes.text "The type of the event" _traits = None def get_traits(self): return self._traits def set_traits(self, traits): self._traits = map(Trait._convert_storage_trait, traits) traits = wsme.wsproperty(wtypes.ArrayType(Trait), get_traits, set_traits) "Event specific properties" generated = datetime.datetime "The time the event occurred" raw = base.JsonType() "The raw copy of notification" @classmethod def sample(cls): return cls( event_type='compute.instance.update', generated=datetime.datetime(2015, 1, 1, 12, 30, 59, 123456), message_id='94834db1-8f1b-404d-b2ec-c35901f1b7f0', traits={ Trait(name='request_id', value='req-4e2d67b8-31a4-48af-bb2f-9df72a353a72'), Trait(name='service', value='conductor.tem-devstack-01'), Trait(name='tenant_id', value='7f13f2b17917463b9ee21aa92c4b36d6') }, raw={'status': {'nested': 'started'}} ) def _build_rbac_query_filters(): filters = {'t_filter': [], 'admin_proj': None} # Returns user_id, proj_id for non-admins user_id, proj_id = rbac.get_limited_to(pecan.request.headers) # If non-admin, filter events by user and project if user_id and proj_id: filters['t_filter'].append({"key": "project_id", "string": proj_id, "op": "eq"}) filters['t_filter'].append({"key": "user_id", "string": user_id, "op": "eq"}) elif not user_id and not proj_id: filters['admin_proj'] = pecan.request.headers.get('X-Project-Id') return filters def _event_query_to_event_filter(q): evt_model_filter = { 'event_type': None, 'message_id': None, 'start_timestamp': None, 'end_timestamp': None } filters = _build_rbac_query_filters() traits_filter = filters['t_filter'] admin_proj = filters['admin_proj'] for i in q: if not i.op: i.op = 'eq' elif i.op not in base.operation_kind: error = (_('Operator %(operator)s is not supported. The supported' ' operators are: %(supported)s') % {'operator': i.op, 'supported': base.operation_kind}) raise base.ClientSideError(error) if i.field in evt_model_filter: if i.op != 'eq': error = (_('Operator %(operator)s is not supported. Only' ' equality operator is available for field' ' %(field)s') % {'operator': i.op, 'field': i.field}) raise base.ClientSideError(error) evt_model_filter[i.field] = i.value else: trait_type = i.type or 'string' traits_filter.append({"key": i.field, trait_type: i._get_value_as_type(), "op": i.op}) return storage.EventFilter(traits_filter=traits_filter, admin_proj=admin_proj, **evt_model_filter) class TraitsController(rest.RestController): """Works on Event Traits.""" @v2_utils.requires_admin @wsme_pecan.wsexpose([Trait], wtypes.text, wtypes.text) def get_one(self, event_type, trait_name): """Return all instances of a trait for an event type. :param event_type: Event type to filter traits by :param trait_name: Trait to return values for """ LOG.debug("Getting traits for %s", event_type) return [Trait._convert_storage_trait(t) for t in pecan.request.event_storage_conn .get_traits(event_type, trait_name)] @v2_utils.requires_admin @wsme_pecan.wsexpose([TraitDescription], wtypes.text) def get_all(self, event_type): """Return all trait names for an event type. :param event_type: Event type to filter traits by """ get_trait_name = event_models.Trait.get_name_by_type return [TraitDescription(name=t['name'], type=get_trait_name(t['data_type'])) for t in pecan.request.event_storage_conn .get_trait_types(event_type)] class EventTypesController(rest.RestController): """Works on Event Types in the system.""" traits = TraitsController() @v2_utils.requires_admin @wsme_pecan.wsexpose(None, wtypes.text) def get_one(self, event_type): """Unused API, will always return 404. :param event_type: A event type """ pecan.abort(404) @v2_utils.requires_admin @wsme_pecan.wsexpose([six.text_type]) def get_all(self): """Get all event types.""" return list(pecan.request.event_storage_conn.get_event_types()) class EventsController(rest.RestController): """Works on Events.""" @v2_utils.requires_context @wsme_pecan.wsexpose([Event], [EventQuery], int) def get_all(self, q=None, limit=None): """Return all events matching the query filters. :param q: Filter arguments for which Events to return :param limit: Maximum number of samples to be returned. """ rbac.enforce("events:index", pecan.request) q = q or [] limit = v2_utils.enforce_limit(limit) event_filter = _event_query_to_event_filter(q) return [Event(message_id=event.message_id, event_type=event.event_type, generated=event.generated, traits=event.traits, raw=event.raw) for event in pecan.request.event_storage_conn.get_events(event_filter, limit)] @v2_utils.requires_context @wsme_pecan.wsexpose(Event, wtypes.text) def get_one(self, message_id): """Return a single event with the given message id. :param message_id: Message ID of the Event to be returned """ rbac.enforce("events:show", pecan.request) filters = _build_rbac_query_filters() t_filter = filters['t_filter'] admin_proj = filters['admin_proj'] event_filter = storage.EventFilter(traits_filter=t_filter, admin_proj=admin_proj, message_id=message_id) events = [event for event in pecan.request.event_storage_conn.get_events(event_filter)] if not events: raise base.EntityNotFound(_("Event"), message_id) if len(events) > 1: LOG.error(_("More than one event with " "id %s returned from storage driver") % message_id) event = events[0] return Event(message_id=event.message_id, event_type=event.event_type, generated=event.generated, traits=event.traits, raw=event.raw) ceilometer-6.0.0/ceilometer/api/controllers/v2/query.py0000664000567000056710000003273212701406223024327 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import jsonschema from oslo_log import log from oslo_utils import timeutils import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import samples from ceilometer.api.controllers.v2 import utils as v2_utils from ceilometer.api import rbac from ceilometer.i18n import _ from ceilometer import storage from ceilometer import utils LOG = log.getLogger(__name__) class ComplexQuery(base.Base): """Holds a sample query encoded in json.""" filter = wtypes.text "The filter expression encoded in json." orderby = wtypes.text "List of single-element dicts for specifying the ordering of the results." limit = int "The maximum number of results to be returned." @classmethod def sample(cls): return cls(filter='{"and": [{"and": [{"=": ' + '{"counter_name": "cpu_util"}}, ' + '{">": {"counter_volume": 0.23}}, ' + '{"<": {"counter_volume": 0.26}}]}, ' + '{"or": [{"and": [{">": ' + '{"timestamp": "2013-12-01T18:00:00"}}, ' + '{"<": ' + '{"timestamp": "2013-12-01T18:15:00"}}]}, ' + '{"and": [{">": ' + '{"timestamp": "2013-12-01T18:30:00"}}, ' + '{"<": ' + '{"timestamp": "2013-12-01T18:45:00"}}]}]}]}', orderby='[{"counter_volume": "ASC"}, ' + '{"timestamp": "DESC"}]', limit=42 ) def _list_to_regexp(items, regexp_prefix=""): regexp = ["^%s$" % item for item in items] regexp = regexp_prefix + "|".join(regexp) return regexp class ValidatedComplexQuery(object): complex_operators = ["and", "or"] order_directions = ["asc", "desc"] simple_ops = ["=", "!=", "<", ">", "<=", "=<", ">=", "=>", "=~"] regexp_prefix = "(?i)" complex_ops = _list_to_regexp(complex_operators, regexp_prefix) simple_ops = _list_to_regexp(simple_ops, regexp_prefix) order_directions = _list_to_regexp(order_directions, regexp_prefix) timestamp_fields = ["timestamp", "state_timestamp"] def __init__(self, query, db_model, additional_name_mapping=None, metadata_allowed=False): additional_name_mapping = additional_name_mapping or {} self.name_mapping = {"user": "user_id", "project": "project_id"} self.name_mapping.update(additional_name_mapping) valid_keys = db_model.get_field_names() valid_keys = list(valid_keys) + list(self.name_mapping.keys()) valid_fields = _list_to_regexp(valid_keys) if metadata_allowed: valid_filter_fields = valid_fields + "|^metadata\.[\S]+$" else: valid_filter_fields = valid_fields schema_value = { "oneOf": [{"type": "string"}, {"type": "number"}, {"type": "boolean"}], "minProperties": 1, "maxProperties": 1} schema_value_in = { "type": "array", "items": {"oneOf": [{"type": "string"}, {"type": "number"}]}, "minItems": 1} schema_field = { "type": "object", "patternProperties": {valid_filter_fields: schema_value}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_field_in = { "type": "object", "patternProperties": {valid_filter_fields: schema_value_in}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_leaf_in = { "type": "object", "patternProperties": {"(?i)^in$": schema_field_in}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_leaf_simple_ops = { "type": "object", "patternProperties": {self.simple_ops: schema_field}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_and_or_array = { "type": "array", "items": {"$ref": "#"}, "minItems": 2} schema_and_or = { "type": "object", "patternProperties": {self.complex_ops: schema_and_or_array}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_not = { "type": "object", "patternProperties": {"(?i)^not$": {"$ref": "#"}}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} self.schema = { "oneOf": [{"$ref": "#/definitions/leaf_simple_ops"}, {"$ref": "#/definitions/leaf_in"}, {"$ref": "#/definitions/and_or"}, {"$ref": "#/definitions/not"}], "minProperties": 1, "maxProperties": 1, "definitions": {"leaf_simple_ops": schema_leaf_simple_ops, "leaf_in": schema_leaf_in, "and_or": schema_and_or, "not": schema_not}} self.orderby_schema = { "type": "array", "items": { "type": "object", "patternProperties": {valid_fields: {"type": "string", "pattern": self.order_directions}}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1}} self.original_query = query def validate(self, visibility_field): """Validates the query content and does the necessary conversions.""" if self.original_query.filter is wtypes.Unset: self.filter_expr = None else: try: self.filter_expr = json.loads(self.original_query.filter) self._validate_filter(self.filter_expr) except (ValueError, jsonschema.exceptions.ValidationError) as e: raise base.ClientSideError( _("Filter expression not valid: %s") % e) self._replace_isotime_with_datetime(self.filter_expr) self._convert_operator_to_lower_case(self.filter_expr) self._normalize_field_names_for_db_model(self.filter_expr) self._force_visibility(visibility_field) if self.original_query.orderby is wtypes.Unset: self.orderby = None else: try: self.orderby = json.loads(self.original_query.orderby) self._validate_orderby(self.orderby) except (ValueError, jsonschema.exceptions.ValidationError) as e: raise base.ClientSideError( _("Order-by expression not valid: %s") % e) self._convert_orderby_to_lower_case(self.orderby) self._normalize_field_names_in_orderby(self.orderby) self.limit = (None if self.original_query.limit is wtypes.Unset else self.original_query.limit) self.limit = v2_utils.enforce_limit(self.limit) @staticmethod def _convert_orderby_to_lower_case(orderby): for orderby_field in orderby: utils.lowercase_values(orderby_field) def _normalize_field_names_in_orderby(self, orderby): for orderby_field in orderby: self._replace_field_names(orderby_field) def _traverse_postorder(self, tree, visitor): op = list(tree.keys())[0] if op.lower() in self.complex_operators: for i, operand in enumerate(tree[op]): self._traverse_postorder(operand, visitor) if op.lower() == "not": self._traverse_postorder(tree[op], visitor) visitor(tree) def _check_cross_project_references(self, own_project_id, visibility_field): """Do not allow other than own_project_id.""" def check_project_id(subfilter): op, value = list(subfilter.items())[0] if (op.lower() not in self.complex_operators and list(value.keys())[0] == visibility_field and value[visibility_field] != own_project_id): raise base.ProjectNotAuthorized(value[visibility_field]) self._traverse_postorder(self.filter_expr, check_project_id) def _force_visibility(self, visibility_field): """Force visibility field. If the tenant is not admin insert an extra "and =" clause to the query. """ authorized_project = rbac.get_limited_to_project(pecan.request.headers) is_admin = authorized_project is None if not is_admin: self._restrict_to_project(authorized_project, visibility_field) self._check_cross_project_references(authorized_project, visibility_field) def _restrict_to_project(self, project_id, visibility_field): restriction = {"=": {visibility_field: project_id}} if self.filter_expr is None: self.filter_expr = restriction else: self.filter_expr = {"and": [restriction, self.filter_expr]} def _replace_isotime_with_datetime(self, filter_expr): def replace_isotime(subfilter): op, value = list(subfilter.items())[0] if op.lower() not in self.complex_operators: field = list(value.keys())[0] if field in self.timestamp_fields: date_time = self._convert_to_datetime(subfilter[op][field]) subfilter[op][field] = date_time self._traverse_postorder(filter_expr, replace_isotime) def _normalize_field_names_for_db_model(self, filter_expr): def _normalize_field_names(subfilter): op, value = list(subfilter.items())[0] if op.lower() not in self.complex_operators: self._replace_field_names(value) self._traverse_postorder(filter_expr, _normalize_field_names) def _replace_field_names(self, subfilter): field, value = list(subfilter.items())[0] if field in self.name_mapping: del subfilter[field] subfilter[self.name_mapping[field]] = value if field.startswith("metadata."): del subfilter[field] subfilter["resource_" + field] = value def _convert_operator_to_lower_case(self, filter_expr): self._traverse_postorder(filter_expr, utils.lowercase_keys) @staticmethod def _convert_to_datetime(isotime): try: date_time = timeutils.parse_isotime(isotime) date_time = date_time.replace(tzinfo=None) return date_time except ValueError: LOG.exception(_("String %s is not a valid isotime") % isotime) msg = _('Failed to parse the timestamp value %s') % isotime raise base.ClientSideError(msg) def _validate_filter(self, filter_expr): jsonschema.validate(filter_expr, self.schema) def _validate_orderby(self, orderby_expr): jsonschema.validate(orderby_expr, self.orderby_schema) class QuerySamplesController(rest.RestController): """Provides complex query possibilities for samples.""" @wsme_pecan.wsexpose([samples.Sample], body=ComplexQuery) def post(self, body): """Define query for retrieving Sample data. :param body: Query rules for the samples to be returned. """ rbac.enforce('query_sample', pecan.request) sample_name_mapping = {"resource": "resource_id", "meter": "counter_name", "type": "counter_type", "unit": "counter_unit", "volume": "counter_volume"} query = ValidatedComplexQuery(body, storage.models.Sample, sample_name_mapping, metadata_allowed=True) query.validate(visibility_field="project_id") conn = pecan.request.storage_conn return [samples.Sample.from_db_model(s) for s in conn.query_samples(query.filter_expr, query.orderby, query.limit)] class QueryController(rest.RestController): samples = QuerySamplesController() ceilometer-6.0.0/ceilometer/api/controllers/v2/base.py0000664000567000056710000002067412701406223024076 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import datetime import functools import inspect import json from oslo_utils import strutils from oslo_utils import timeutils import pecan import six import wsme from wsme import types as wtypes from ceilometer.i18n import _ operation_kind = ('lt', 'le', 'eq', 'ne', 'ge', 'gt') operation_kind_enum = wtypes.Enum(str, *operation_kind) class ClientSideError(wsme.exc.ClientSideError): def __init__(self, error, status_code=400): pecan.response.translatable_error = error super(ClientSideError, self).__init__(error, status_code) class EntityNotFound(ClientSideError): def __init__(self, entity, id): super(EntityNotFound, self).__init__( _("%(entity)s %(id)s Not Found") % {'entity': entity, 'id': id}, status_code=404) class ProjectNotAuthorized(ClientSideError): def __init__(self, id, aspect='project'): params = dict(aspect=aspect, id=id) super(ProjectNotAuthorized, self).__init__( _("Not Authorized to access %(aspect)s %(id)s") % params, status_code=401) class AdvEnum(wtypes.wsproperty): """Handle default and mandatory for wtypes.Enum.""" def __init__(self, name, *args, **kwargs): self._name = '_advenum_%s' % name self._default = kwargs.pop('default', None) mandatory = kwargs.pop('mandatory', False) enum = wtypes.Enum(*args, **kwargs) super(AdvEnum, self).__init__(datatype=enum, fget=self._get, fset=self._set, mandatory=mandatory) def _get(self, parent): if hasattr(parent, self._name): value = getattr(parent, self._name) return value or self._default return self._default def _set(self, parent, value): try: if self.datatype.validate(value): setattr(parent, self._name, value) except ValueError as e: raise wsme.exc.InvalidInput(self._name.replace('_advenum_', '', 1), value, e) class Base(wtypes.DynamicBase): @classmethod def from_db_model(cls, m): return cls(**(m.as_dict())) @classmethod def from_db_and_links(cls, m, links): return cls(links=links, **(m.as_dict())) def as_dict(self, db_model): valid_keys = inspect.getargspec(db_model.__init__)[0] if 'self' in valid_keys: valid_keys.remove('self') return self.as_dict_from_keys(valid_keys) def as_dict_from_keys(self, keys): return dict((k, getattr(self, k)) for k in keys if hasattr(self, k) and getattr(self, k) != wsme.Unset) class Link(Base): """A link representation.""" href = wtypes.text "The url of a link" rel = wtypes.text "The name of a link" @classmethod def sample(cls): return cls(href=('http://localhost:8777/v2/meters/volume?' 'q.field=resource_id&' 'q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), rel='volume' ) class Query(Base): """Query filter.""" # The data types supported by the query. _supported_types = ['integer', 'float', 'string', 'boolean', 'datetime'] # Functions to convert the data field to the correct type. _type_converters = {'integer': int, 'float': float, 'boolean': functools.partial( strutils.bool_from_string, strict=True), 'string': six.text_type, 'datetime': timeutils.parse_isotime} _op = None # provide a default def get_op(self): return self._op or 'eq' def set_op(self, value): self._op = value field = wsme.wsattr(wtypes.text, mandatory=True) "The name of the field to test" # op = wsme.wsattr(operation_kind, default='eq') # this ^ doesn't seem to work. op = wsme.wsproperty(operation_kind_enum, get_op, set_op) "The comparison operator. Defaults to 'eq'." value = wsme.wsattr(wtypes.text, mandatory=True) "The value to compare against the stored data" type = wtypes.text "The data type of value to compare against the stored data" def __repr__(self): # for logging calls return '' % (self.field, self.op, self.value, self.type) @classmethod def sample(cls): return cls(field='resource_id', op='eq', value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', type='string' ) def as_dict(self): return self.as_dict_from_keys(['field', 'op', 'type', 'value']) def _get_value_as_type(self, forced_type=None): """Convert metadata value to the specified data type. This method is called during metadata query to help convert the querying metadata to the data type specified by user. If there is no data type given, the metadata will be parsed by ast.literal_eval to try to do a smart converting. NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised from wsmeext/sphinxext.py. It's OK to call it outside the Query class. Because the "public" side of that class is actually the outside of the API, and the "private" side is the API implementation. The method is only used in the API implementation, so it's OK. :returns: metadata value converted with the specified data type. """ type = forced_type or self.type try: converted_value = self.value if not type: try: converted_value = ast.literal_eval(self.value) except (ValueError, SyntaxError): # Unable to convert the metadata value automatically # let it default to self.value pass else: if type not in self._supported_types: # Types must be explicitly declared so the # correct type converter may be used. Subclasses # of Query may define _supported_types and # _type_converters to define their own types. raise TypeError() converted_value = self._type_converters[type](self.value) if isinstance(converted_value, datetime.datetime): converted_value = timeutils.normalize_time(converted_value) except ValueError: msg = (_('Unable to convert the value %(value)s' ' to the expected data type %(type)s.') % {'value': self.value, 'type': type}) raise ClientSideError(msg) except TypeError: msg = (_('The data type %(type)s is not supported. The supported' ' data type list is: %(supported)s') % {'type': type, 'supported': self._supported_types}) raise ClientSideError(msg) except Exception: msg = (_('Unexpected exception converting %(value)s to' ' the expected data type %(type)s.') % {'value': self.value, 'type': type}) raise ClientSideError(msg) return converted_value class JsonType(wtypes.UserType): """A simple JSON type.""" basetype = wtypes.text name = 'json' @staticmethod def validate(value): # check that value can be serialised json.dumps(value) return value ceilometer-6.0.0/ceilometer/api/controllers/v2/samples.py0000664000567000056710000001104112701406223024614 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo_utils import timeutils import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from ceilometer.api.controllers.v2 import base from ceilometer.api.controllers.v2 import utils from ceilometer.api import rbac from ceilometer.i18n import _ from ceilometer import sample from ceilometer import storage class Sample(base.Base): """One measurement.""" id = wtypes.text "The unique identifier for the sample." meter = wtypes.text "The meter name this sample is for." type = wtypes.Enum(str, *sample.TYPES) "The meter type (see :ref:`meter_types`)" unit = wtypes.text "The unit of measure." volume = float "The metered value." user_id = wtypes.text "The user this sample was taken for." project_id = wtypes.text "The project this sample was taken for." resource_id = wtypes.text "The :class:`Resource` this sample was taken for." source = wtypes.text "The source that identifies where the sample comes from." timestamp = datetime.datetime "When the sample has been generated." recorded_at = datetime.datetime "When the sample has been recorded." metadata = {wtypes.text: wtypes.text} "Arbitrary metadata associated with the sample." @classmethod def from_db_model(cls, m): return cls(id=m.message_id, meter=m.counter_name, type=m.counter_type, unit=m.counter_unit, volume=m.counter_volume, user_id=m.user_id, project_id=m.project_id, resource_id=m.resource_id, source=m.source, timestamp=m.timestamp, recorded_at=m.recorded_at, metadata=utils.flatten_metadata(m.resource_metadata)) @classmethod def sample(cls): return cls(id=str(uuid.uuid1()), meter='instance', type='gauge', unit='instance', volume=1, resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', project_id='35b17138-b364-4e6a-a131-8f3099c5be68', user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', timestamp=timeutils.utcnow(), recorded_at=datetime.datetime.utcnow(), source='openstack', metadata={'name1': 'value1', 'name2': 'value2'}, ) class SamplesController(rest.RestController): """Controller managing the samples.""" @wsme_pecan.wsexpose([Sample], [base.Query], int) def get_all(self, q=None, limit=None): """Return all known samples, based on the data recorded so far. :param q: Filter rules for the samples to be returned. :param limit: Maximum number of samples to be returned. """ rbac.enforce('get_samples', pecan.request) q = q or [] limit = utils.enforce_limit(limit) kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) f = storage.SampleFilter(**kwargs) return map(Sample.from_db_model, pecan.request.storage_conn.get_samples(f, limit=limit)) @wsme_pecan.wsexpose(Sample, wtypes.text) def get_one(self, sample_id): """Return a sample. :param sample_id: the id of the sample. """ rbac.enforce('get_sample', pecan.request) f = storage.SampleFilter(message_id=sample_id) samples = list(pecan.request.storage_conn.get_samples(f)) if len(samples) < 1: raise base.EntityNotFound(_('Sample'), sample_id) return Sample.from_db_model(samples[0]) ceilometer-6.0.0/ceilometer/api/middleware.py0000664000567000056710000001231612701406223022376 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp. # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Middleware to replace the plain text message body of an error response with one formatted so the client can parse it. Based on pecan.middleware.errordocument """ import json from lxml import etree from oslo_log import log import six import webob from ceilometer import i18n from ceilometer.i18n import _ LOG = log.getLogger(__name__) class ParsableErrorMiddleware(object): """Replace error body with something the client can parse.""" @staticmethod def best_match_language(accept_language): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not accept_language: return None all_languages = i18n.get_available_languages() return accept_language.best_match(all_languages) def __init__(self, app): self.app = app def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception(( 'ErrorDocumentMiddleware received an invalid ' 'status %s' % status )) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type') ] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): req = webob.Request(environ) error = environ.get('translatable_error') user_locale = self.best_match_language(req.accept_language) if (req.accept.best_match(['application/json', 'application/xml']) == 'application/xml'): content_type = 'application/xml' try: # simple check xml is valid fault = etree.fromstring(b'\n'.join(app_iter)) # Add the translated error to the xml data if error is not None: for fault_string in fault.findall('faultstring'): fault_string.text = i18n.translate(error, user_locale) error_message = etree.tostring(fault) body = b''.join((b'', error_message, b'')) except etree.XMLSyntaxError as err: LOG.error(_('Error parsing HTTP response: %s'), err) error_message = state['status_code'] body = '%s' % error_message if six.PY3: body = body.encode('utf-8') else: content_type = 'application/json' app_data = b'\n'.join(app_iter) if six.PY3: app_data = app_data.decode('utf-8') try: fault = json.loads(app_data) if error is not None and 'faultstring' in fault: fault['faultstring'] = i18n.translate(error, user_locale) except ValueError as err: fault = app_data body = json.dumps({'error_message': fault}) if six.PY3: body = body.encode('utf-8') state['headers'].append(('Content-Length', str(len(body)))) state['headers'].append(('Content-Type', content_type)) body = [body] else: body = app_iter return body ceilometer-6.0.0/ceilometer/api/app.py0000664000567000056710000000726412701406223021047 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os from oslo_config import cfg from oslo_log import log from paste import deploy import pecan from werkzeug import serving from ceilometer.api import hooks from ceilometer.api import middleware from ceilometer.i18n import _LI, _LW LOG = log.getLogger(__name__) CONF = cfg.CONF OPTS = [ cfg.StrOpt('api_paste_config', default="api_paste.ini", help="Configuration file for WSGI definition of API." ), ] API_OPTS = [ cfg.BoolOpt('pecan_debug', default=False, help='Toggle Pecan Debug Middleware.'), cfg.IntOpt('default_api_return_limit', min=1, default=100, help='Default maximum number of items returned by API request.' ), ] CONF.register_opts(OPTS) CONF.register_opts(API_OPTS, group='api') def setup_app(pecan_config=None): # FIXME: Replace DBHook with a hooks.TransactionHook app_hooks = [hooks.ConfigHook(), hooks.DBHook(), hooks.NotifierHook(), hooks.TranslationHook()] pecan_config = pecan_config or { "app": { 'root': 'ceilometer.api.controllers.root.RootController', 'modules': ['ceilometer.api'], } } pecan.configuration.set_config(dict(pecan_config), overwrite=True) # NOTE(sileht): pecan debug won't work in multi-process environment pecan_debug = CONF.api.pecan_debug if CONF.api.workers and CONF.api.workers != 1 and pecan_debug: pecan_debug = False LOG.warning(_LW('pecan_debug cannot be enabled, if workers is > 1, ' 'the value is overrided with False')) app = pecan.make_app( pecan_config['app']['root'], debug=pecan_debug, hooks=app_hooks, wrap_app=middleware.ParsableErrorMiddleware, guess_content_type_from_ext=False ) return app def load_app(): # Build the WSGI app cfg_file = None cfg_path = cfg.CONF.api_paste_config if not os.path.isabs(cfg_path): cfg_file = CONF.find_file(cfg_path) elif os.path.exists(cfg_path): cfg_file = cfg_path if not cfg_file: raise cfg.ConfigFilesNotFoundError([cfg.CONF.api_paste_config]) LOG.info("Full WSGI config used: %s" % cfg_file) return deploy.loadapp("config:" + cfg_file) def build_server(): app = load_app() # Create the WSGI server and start it host, port = cfg.CONF.api.host, cfg.CONF.api.port LOG.info(_LI('Starting server in PID %s') % os.getpid()) LOG.info(_LI("Configuration:")) cfg.CONF.log_opt_values(LOG, logging.INFO) if host == '0.0.0.0': LOG.info(_LI( 'serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s') % ({'sport': port, 'vport': port})) else: LOG.info(_LI("serving on http://%(host)s:%(port)s") % ( {'host': host, 'port': port})) serving.run_simple(cfg.CONF.api.host, cfg.CONF.api.port, app, processes=CONF.api.workers) def app_factory(global_config, **local_conf): return setup_app() ceilometer-6.0.0/ceilometer/api/hooks.py0000664000567000056710000000617712701406223021414 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import oslo_messaging from pecan import hooks from ceilometer.i18n import _LE from ceilometer import messaging from ceilometer import storage LOG = log.getLogger(__name__) cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', group='publisher_notifier') class ConfigHook(hooks.PecanHook): """Attach the configuration object to the request. That allows controllers to get it. """ @staticmethod def before(state): state.request.cfg = cfg.CONF class DBHook(hooks.PecanHook): def __init__(self): self.storage_connection = DBHook.get_connection('metering') self.event_storage_connection = DBHook.get_connection('event') if (not self.storage_connection and not self.event_storage_connection): raise Exception("Api failed to start. Failed to connect to " "databases, purpose: %s" % ', '.join(['metering', 'event'])) def before(self, state): state.request.storage_conn = self.storage_connection state.request.event_storage_conn = self.event_storage_connection @staticmethod def get_connection(purpose): try: return storage.get_connection_from_config(cfg.CONF, purpose) except Exception as err: params = {"purpose": purpose, "err": err} LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s " "retry later: %(err)s") % params) class NotifierHook(hooks.PecanHook): """Create and attach a notifier to the request. Usually, samples will be push to notification bus by notifier when they are posted via /v2/meters/ API. """ def __init__(self): transport = messaging.get_transport() self.notifier = oslo_messaging.Notifier( transport, driver=cfg.CONF.publisher_notifier.telemetry_driver, publisher_id="ceilometer.api") def before(self, state): state.request.notifier = self.notifier class TranslationHook(hooks.PecanHook): def after(self, state): # After a request has been done, we need to see if # ClientSideError has added an error onto the response. # If it has we need to get it info the thread-safe WSGI # environ to be used by the ParsableErrorMiddleware. if hasattr(state.response, 'translatable_error'): state.request.environ['translatable_error'] = ( state.response.translatable_error) ceilometer-6.0.0/ceilometer/api/rbac.py0000664000567000056710000000637312701406223021176 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2014 Hewlett-Packard Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Access Control Lists (ACL's) control access the API server.""" from oslo_config import cfg from oslo_policy import policy import pecan _ENFORCER = None CONF = cfg.CONF def reset(): global _ENFORCER if _ENFORCER: _ENFORCER.clear() _ENFORCER = None def _has_rule(name): return name in _ENFORCER.rules.keys() def enforce(policy_name, request): """Return the user and project the request should be limited to. :param request: HTTP request :param policy_name: the policy name to validate authz against. """ global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(CONF) _ENFORCER.load_rules() rule_method = "telemetry:" + policy_name headers = request.headers policy_dict = dict() policy_dict['roles'] = headers.get('X-Roles', "").split(",") policy_dict['user_id'] = (headers.get('X-User-Id')) policy_dict['project_id'] = (headers.get('X-Project-Id')) # maintain backward compat with Juno and previous by allowing the action if # there is no rule defined for it if ((_has_rule('default') or _has_rule(rule_method)) and not _ENFORCER.enforce(rule_method, {}, policy_dict)): pecan.core.abort(status_code=403, detail='RBAC Authorization Failed') # TODO(fabiog): these methods are still used because the scoping part is really # convoluted and difficult to separate out. def get_limited_to(headers): """Return the user and project the request should be limited to. :param headers: HTTP headers dictionary :return: A tuple of (user, project), set to None if there's no limit on one of these. """ global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(CONF) _ENFORCER.load_rules() policy_dict = dict() policy_dict['roles'] = headers.get('X-Roles', "").split(",") policy_dict['user_id'] = (headers.get('X-User-Id')) policy_dict['project_id'] = (headers.get('X-Project-Id')) # maintain backward compat with Juno and previous by using context_is_admin # rule if the segregation rule (added in Kilo) is not defined rule_name = 'segregation' if _has_rule( 'segregation') else 'context_is_admin' if not _ENFORCER.enforce(rule_name, {}, policy_dict): return headers.get('X-User-Id'), headers.get('X-Project-Id') return None, None def get_limited_to_project(headers): """Return the project the request should be limited to. :param headers: HTTP headers dictionary :return: A project, or None if there's no limit on it. """ return get_limited_to(headers)[1] ceilometer-6.0.0/ceilometer/api/app.wsgi0000664000567000056710000000164412701406223021364 0ustar jenkinsjenkins00000000000000# -*- mode: python -*- # # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Use this file for deploying the API under mod_wsgi. See http://pecan.readthedocs.org/en/latest/deployment.html for details. """ from ceilometer import service from ceilometer.api import app # Initialize the oslo configuration library and logging service.prepare_service([]) application = app.load_app() ceilometer-6.0.0/ceilometer/energy/0000775000567000056710000000000012701406364020432 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/energy/__init__.py0000664000567000056710000000000012701406223022523 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer/energy/kwapi.py0000664000567000056710000001034712701406223022116 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log import requests import six from ceilometer.agent import plugin_base from ceilometer import keystone_client from ceilometer import sample LOG = log.getLogger(__name__) SERVICE_OPTS = [ cfg.StrOpt('kwapi', default='energy', help='Kwapi service type.'), ] cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') class KwapiClient(object): """Kwapi API client.""" def __init__(self, url, token=None): """Initializes client.""" self.url = url self.token = token def iter_probes(self): """Returns a list of dicts describing all probes.""" probes_url = self.url + '/probes/' headers = {} if self.token is not None: headers = {'X-Auth-Token': self.token} timeout = cfg.CONF.http_timeout request = requests.get(probes_url, headers=headers, timeout=timeout) message = request.json() probes = message['probes'] for key, value in six.iteritems(probes): probe_dict = value probe_dict['id'] = key yield probe_dict class _Base(plugin_base.PollsterBase): """Base class for the Kwapi pollster, derived from PollsterBase.""" @property def default_discovery(self): return 'endpoint:%s' % cfg.CONF.service_types.kwapi @staticmethod def get_kwapi_client(ksclient, endpoint): """Returns a KwapiClient configured with the proper url and token.""" return KwapiClient(endpoint, keystone_client.get_auth_token(ksclient)) CACHE_KEY_PROBE = 'kwapi.probes' def _iter_probes(self, ksclient, cache, endpoint): """Iterate over all probes.""" key = '%s-%s' % (endpoint, self.CACHE_KEY_PROBE) if key not in cache: cache[key] = self._get_probes(ksclient, endpoint) return iter(cache[key]) def _get_probes(self, ksclient, endpoint): try: client = self.get_kwapi_client(ksclient, endpoint) except exceptions.EndpointNotFound: LOG.debug("Kwapi endpoint not found") return [] return list(client.iter_probes()) class EnergyPollster(_Base): """Measures energy consumption.""" def get_samples(self, manager, cache, resources): """Returns all samples.""" for endpoint in resources: for probe in self._iter_probes(manager.keystone, cache, endpoint): yield sample.Sample( name='energy', type=sample.TYPE_CUMULATIVE, unit='kWh', volume=probe['kwh'], user_id=None, project_id=None, resource_id=probe['id'], timestamp=datetime.datetime.fromtimestamp( probe['timestamp']).isoformat(), resource_metadata={} ) class PowerPollster(_Base): """Measures power consumption.""" def get_samples(self, manager, cache, resources): """Returns all samples.""" for endpoint in resources: for probe in self._iter_probes(manager.keystone, cache, endpoint): yield sample.Sample( name='power', type=sample.TYPE_GAUGE, unit='W', volume=probe['w'], user_id=None, project_id=None, resource_id=probe['id'], timestamp=datetime.datetime.fromtimestamp( probe['timestamp']).isoformat(), resource_metadata={} ) ceilometer-6.0.0/ceilometer/nova_client.py0000664000567000056710000001271512701406224022015 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import logging import novaclient from novaclient import client as nova_client from oslo_config import cfg from oslo_log import log from ceilometer import keystone_client OPTS = [ cfg.BoolOpt('nova_http_log_debug', default=False, # Added in Mikita deprecated_for_removal=True, help=('Allow novaclient\'s debug log output. ' '(Use default_log_levels instead)')), ] SERVICE_OPTS = [ cfg.StrOpt('nova', default='compute', help='Nova service type.'), ] cfg.CONF.register_opts(OPTS) cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') cfg.CONF.import_opt('http_timeout', 'ceilometer.service') cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') LOG = log.getLogger(__name__) def logged(func): @functools.wraps(func) def with_logging(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: LOG.exception(e) raise return with_logging class Client(object): """A client which gets information via python-novaclient.""" def __init__(self, endpoint_override=None, auth=None): """Initialize a nova client object.""" conf = cfg.CONF.service_credentials logger = None if cfg.CONF.nova_http_log_debug: logger = logging.getLogger("novaclient-debug") logger.setLevel(log.DEBUG) self.nova_client = nova_client.Client( version=2, session=keystone_client.get_session(), # nova adapter options region_name=conf.region_name, interface=conf.interface, service_type=cfg.CONF.service_types.nova, # keystone adapter options endpoint_override=endpoint_override, auth=auth, logger=logger) def _with_flavor_and_image(self, instances): flavor_cache = {} image_cache = {} for instance in instances: self._with_flavor(instance, flavor_cache) self._with_image(instance, image_cache) return instances def _with_flavor(self, instance, cache): fid = instance.flavor['id'] if fid in cache: flavor = cache.get(fid) else: try: flavor = self.nova_client.flavors.get(fid) except novaclient.exceptions.NotFound: flavor = None cache[fid] = flavor attr_defaults = [('name', 'unknown-id-%s' % fid), ('vcpus', 0), ('ram', 0), ('disk', 0), ('ephemeral', 0)] for attr, default in attr_defaults: if not flavor: instance.flavor[attr] = default continue instance.flavor[attr] = getattr(flavor, attr, default) def _with_image(self, instance, cache): try: iid = instance.image['id'] except TypeError: instance.image = None instance.kernel_id = None instance.ramdisk_id = None return if iid in cache: image = cache.get(iid) else: try: image = self.nova_client.images.get(iid) except novaclient.exceptions.NotFound: image = None cache[iid] = image attr_defaults = [('kernel_id', None), ('ramdisk_id', None)] instance.image['name'] = ( getattr(image, 'name') if image else 'unknown-id-%s' % iid) image_metadata = getattr(image, 'metadata', None) for attr, default in attr_defaults: ameta = image_metadata.get(attr) if image_metadata else default setattr(instance, attr, ameta) @logged def instance_get_all_by_host(self, hostname, since=None): """Returns list of instances on particular host. If since is supplied, it will return the instances changed since that datetime. since should be in ISO Format '%Y-%m-%dT%H:%M:%SZ' """ search_opts = {'host': hostname, 'all_tenants': True} if since: search_opts['changes-since'] = since return self._with_flavor_and_image(self.nova_client.servers.list( detailed=True, search_opts=search_opts)) @logged def instance_get_all(self, since=None): """Returns list of all instances. If since is supplied, it will return the instances changes since that datetime. since should be in ISO Format '%Y-%m-%dT%H:%M:%SZ' """ search_opts = {'all_tenants': True} if since: search_opts['changes-since'] = since return self.nova_client.servers.list( detailed=True, search_opts=search_opts) @logged def floating_ip_get_all(self): """Returns all floating ips.""" return self.nova_client.floating_ips.list() ceilometer-6.0.0/ceilometer/messaging.py0000664000567000056710000000730712701406223021471 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_context.context import oslo_messaging from oslo_messaging import serializer as oslo_serializer DEFAULT_URL = "__default__" TRANSPORTS = {} def setup(): oslo_messaging.set_transport_defaults('ceilometer') def get_transport(url=None, optional=False, cache=True): """Initialise the oslo_messaging layer.""" global TRANSPORTS, DEFAULT_URL cache_key = url or DEFAULT_URL transport = TRANSPORTS.get(cache_key) if not transport or not cache: try: transport = oslo_messaging.get_transport(cfg.CONF, url) except oslo_messaging.InvalidTransportURL as e: if not optional or e.url: # NOTE(sileht): oslo_messaging is configured but unloadable # so reraise the exception raise return None else: if cache: TRANSPORTS[cache_key] = transport return transport def cleanup(): """Cleanup the oslo_messaging layer.""" global TRANSPORTS, NOTIFIERS NOTIFIERS = {} for url in TRANSPORTS: TRANSPORTS[url].cleanup() del TRANSPORTS[url] class RequestContextSerializer(oslo_messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): return context.to_dict() def deserialize_context(self, context): return oslo_context.context.RequestContext.from_dict(context) _SERIALIZER = RequestContextSerializer( oslo_serializer.JsonPayloadSerializer()) def get_batch_notification_listener(transport, targets, endpoints, allow_requeue=False, batch_size=1, batch_timeout=None): """Return a configured oslo_messaging notification listener.""" return oslo_messaging.get_batch_notification_listener( transport, targets, endpoints, executor='threading', allow_requeue=allow_requeue, batch_size=batch_size, batch_timeout=batch_timeout) def get_notifier(transport, publisher_id): """Return a configured oslo_messaging notifier.""" notifier = oslo_messaging.Notifier(transport, serializer=_SERIALIZER) return notifier.prepare(publisher_id=publisher_id) def convert_to_old_notification_format(priority, notification): # FIXME(sileht): temporary convert notification to old format # to focus on oslo_messaging migration before refactoring the code to # use the new oslo_messaging facilities notification = notification.copy() notification['priority'] = priority notification.update(notification["metadata"]) for k in notification['ctxt']: notification['_context_' + k] = notification['ctxt'][k] del notification['ctxt'] del notification['metadata'] return notification ceilometer-6.0.0/pylintrc0000664000567000056710000000304212701406223016571 0ustar jenkinsjenkins00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. ignore=openstack [Messages Control] # NOTE(justinsb): We might want to have a 2nd strict pylintrc in future # C0111: Don't require docstrings on every method # W0511: TODOs in code comments are fine. # W0142: *args and **kwargs are fine. # W0622: Redefining id is fine. # W0703: Catch "Exception". disable=C0111,W0511,W0142,W0622,W0703 [Basic] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Type attributes names can be 2 to 31 characters long, with lowercase and underscores attr-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{1,30}|setUp|tearDown)$ # Module names matching sahara-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(sahara-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 max-args=6 [Variables] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [TYPECHECK] generated-members=query,node_template,status_code,data ceilometer-6.0.0/.testr.conf0000664000567000056710000000072612701406223017076 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-600} \ ${PYTHON:-python} -m subunit.run discover ${OS_TEST_PATH:-./ceilometer/tests} -t . $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list # NOTE(chdent): Only used/matches on gabbi-related tests. group_regex=(gabbi\.driver\.test_gabbi_(?:prefix_|)[^_]+)_ ceilometer-6.0.0/ceilometer.egg-info/0000775000567000056710000000000012701406364020633 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/ceilometer.egg-info/requires.txt0000664000567000056710000000164112701406363023234 0ustar jenkinsjenkins00000000000000retrying!=1.3.0,>=1.2.3 jsonpath-rw-ext>=0.1.9 jsonschema!=2.5.0,<3.0.0,>=2.0.0 kafka-python<1.0.0,>=0.9.5 keystonemiddleware!=4.1.0,>=4.0.0 lxml>=2.3 msgpack-python>=0.4.0 oslo.context>=0.2.0 oslo.db>=4.1.0 oslo.concurrency>=3.5.0 oslo.config>=3.7.0 oslo.i18n>=2.1.0 oslo.log>=1.14.0 oslo.policy>=0.5.0 oslo.reports>=0.6.0 oslo.rootwrap>=2.0.0 oslo.service>=1.0.0 PasteDeploy>=1.5.0 pbr>=1.6 pecan>=1.0.0 oslo.messaging>=4.0.0 oslo.middleware>=3.0.0 oslo.serialization>=1.10.0 oslo.utils>=3.5.0 pysnmp<5.0.0,>=4.2.3 python-ceilometerclient>=2.2.1 python-glanceclient>=2.0.0 python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 keystoneauth1>=2.1.0 python-neutronclient!=4.1.0,>=2.6.0 python-novaclient!=2.33.0,>=2.29.0 python-swiftclient>=2.2.0 PyYAML>=3.1.0 requests!=2.9.0,>=2.8.1 six>=1.9.0 SQLAlchemy<1.1.0,>=1.0.10 sqlalchemy-migrate>=0.9.6 stevedore>=1.5.0 tooz>=1.28.0 Werkzeug>=0.7 WebOb>=1.2.3 WSME>=0.8 python-dateutil>=2.4.2 ceilometer-6.0.0/ceilometer.egg-info/dependency_links.txt0000664000567000056710000000000112701406363024700 0ustar jenkinsjenkins00000000000000 ceilometer-6.0.0/ceilometer.egg-info/not-zip-safe0000664000567000056710000000000112701406355023061 0ustar jenkinsjenkins00000000000000 ceilometer-6.0.0/ceilometer.egg-info/SOURCES.txt0000664000567000056710000006427212701406364022532 0ustar jenkinsjenkins00000000000000.coveragerc .mailmap .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MAINTAINERS README.rst babel.cfg functions.sh pylintrc requirements.txt run-functional-tests.sh setup.cfg setup.py test-requirements.txt tox.ini ceilometer/__init__.py ceilometer/collector.py ceilometer/coordination.py ceilometer/declarative.py ceilometer/exchange_control.py ceilometer/i18n.py ceilometer/keystone_client.py ceilometer/messaging.py ceilometer/middleware.py ceilometer/neutron_client.py ceilometer/notification.py ceilometer/nova_client.py ceilometer/opts.py ceilometer/pipeline.py ceilometer/sample.py ceilometer/service.py ceilometer/service_base.py ceilometer/utils.py ceilometer/version.py ceilometer.egg-info/PKG-INFO ceilometer.egg-info/SOURCES.txt ceilometer.egg-info/dependency_links.txt ceilometer.egg-info/entry_points.txt ceilometer.egg-info/not-zip-safe ceilometer.egg-info/pbr.json ceilometer.egg-info/requires.txt ceilometer.egg-info/top_level.txt ceilometer/agent/__init__.py ceilometer/agent/manager.py ceilometer/agent/plugin_base.py ceilometer/agent/discovery/__init__.py ceilometer/agent/discovery/endpoint.py ceilometer/agent/discovery/localnode.py ceilometer/agent/discovery/tenant.py ceilometer/api/__init__.py ceilometer/api/app.py ceilometer/api/app.wsgi ceilometer/api/hooks.py ceilometer/api/middleware.py ceilometer/api/rbac.py ceilometer/api/controllers/__init__.py ceilometer/api/controllers/root.py ceilometer/api/controllers/v2/__init__.py ceilometer/api/controllers/v2/base.py ceilometer/api/controllers/v2/capabilities.py ceilometer/api/controllers/v2/events.py ceilometer/api/controllers/v2/meters.py ceilometer/api/controllers/v2/query.py ceilometer/api/controllers/v2/resources.py ceilometer/api/controllers/v2/root.py ceilometer/api/controllers/v2/samples.py ceilometer/api/controllers/v2/utils.py ceilometer/cmd/__init__.py ceilometer/cmd/agent_notification.py ceilometer/cmd/api.py ceilometer/cmd/collector.py ceilometer/cmd/polling.py ceilometer/cmd/sample.py ceilometer/cmd/storage.py ceilometer/compute/__init__.py ceilometer/compute/discovery.py ceilometer/compute/util.py ceilometer/compute/notifications/__init__.py ceilometer/compute/notifications/instance.py ceilometer/compute/pollsters/__init__.py ceilometer/compute/pollsters/cpu.py ceilometer/compute/pollsters/disk.py ceilometer/compute/pollsters/instance.py ceilometer/compute/pollsters/memory.py ceilometer/compute/pollsters/net.py ceilometer/compute/pollsters/util.py ceilometer/compute/virt/__init__.py ceilometer/compute/virt/inspector.py ceilometer/compute/virt/hyperv/__init__.py ceilometer/compute/virt/hyperv/inspector.py ceilometer/compute/virt/libvirt/__init__.py ceilometer/compute/virt/libvirt/inspector.py ceilometer/compute/virt/vmware/__init__.py ceilometer/compute/virt/vmware/inspector.py ceilometer/compute/virt/vmware/vsphere_operations.py ceilometer/compute/virt/xenapi/__init__.py ceilometer/compute/virt/xenapi/inspector.py ceilometer/conf/__init__.py ceilometer/conf/defaults.py ceilometer/dispatcher/__init__.py ceilometer/dispatcher/database.py ceilometer/dispatcher/file.py ceilometer/dispatcher/gnocchi.py ceilometer/dispatcher/http.py ceilometer/energy/__init__.py ceilometer/energy/kwapi.py ceilometer/event/__init__.py ceilometer/event/converter.py ceilometer/event/endpoint.py ceilometer/event/trait_plugins.py ceilometer/event/storage/__init__.py ceilometer/event/storage/base.py ceilometer/event/storage/impl_db2.py ceilometer/event/storage/impl_elasticsearch.py ceilometer/event/storage/impl_hbase.py ceilometer/event/storage/impl_log.py ceilometer/event/storage/impl_mongodb.py ceilometer/event/storage/impl_sqlalchemy.py ceilometer/event/storage/models.py ceilometer/event/storage/pymongo_base.py ceilometer/hacking/__init__.py ceilometer/hacking/checks.py ceilometer/hardware/__init__.py ceilometer/hardware/discovery.py ceilometer/hardware/inspector/__init__.py ceilometer/hardware/inspector/base.py ceilometer/hardware/inspector/snmp.py ceilometer/hardware/pollsters/__init__.py ceilometer/hardware/pollsters/generic.py ceilometer/hardware/pollsters/util.py ceilometer/hardware/pollsters/data/snmp.yaml ceilometer/image/__init__.py ceilometer/image/glance.py ceilometer/ipmi/__init__.py ceilometer/ipmi/notifications/__init__.py ceilometer/ipmi/notifications/ironic.py ceilometer/ipmi/platform/__init__.py ceilometer/ipmi/platform/exception.py ceilometer/ipmi/platform/intel_node_manager.py ceilometer/ipmi/platform/ipmi_sensor.py ceilometer/ipmi/platform/ipmitool.py ceilometer/ipmi/pollsters/__init__.py ceilometer/ipmi/pollsters/node.py ceilometer/ipmi/pollsters/sensor.py ceilometer/locale/ceilometer-log-error.pot ceilometer/locale/ceilometer-log-info.pot ceilometer/locale/ceilometer-log-warning.pot ceilometer/locale/ceilometer.pot ceilometer/locale/de/LC_MESSAGES/ceilometer-log-error.po ceilometer/locale/de/LC_MESSAGES/ceilometer-log-info.po ceilometer/locale/de/LC_MESSAGES/ceilometer-log-warning.po ceilometer/locale/de/LC_MESSAGES/ceilometer.po ceilometer/locale/es/LC_MESSAGES/ceilometer-log-error.po ceilometer/locale/es/LC_MESSAGES/ceilometer-log-info.po ceilometer/locale/es/LC_MESSAGES/ceilometer.po ceilometer/locale/fr/LC_MESSAGES/ceilometer.po ceilometer/locale/it/LC_MESSAGES/ceilometer.po ceilometer/locale/ja/LC_MESSAGES/ceilometer.po ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-error.po ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-info.po ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-warning.po ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po ceilometer/locale/ru/LC_MESSAGES/ceilometer.po ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po ceilometer/meter/__init__.py ceilometer/meter/notifications.py ceilometer/meter/data/meters.yaml ceilometer/network/__init__.py ceilometer/network/floatingip.py ceilometer/network/notifications.py ceilometer/network/services/__init__.py ceilometer/network/services/base.py ceilometer/network/services/discovery.py ceilometer/network/services/fwaas.py ceilometer/network/services/lbaas.py ceilometer/network/services/vpnaas.py ceilometer/network/statistics/__init__.py ceilometer/network/statistics/driver.py ceilometer/network/statistics/flow.py ceilometer/network/statistics/port.py ceilometer/network/statistics/switch.py ceilometer/network/statistics/table.py ceilometer/network/statistics/opencontrail/__init__.py ceilometer/network/statistics/opencontrail/client.py ceilometer/network/statistics/opencontrail/driver.py ceilometer/network/statistics/opendaylight/__init__.py ceilometer/network/statistics/opendaylight/client.py ceilometer/network/statistics/opendaylight/driver.py ceilometer/objectstore/__init__.py ceilometer/objectstore/rgw.py ceilometer/objectstore/rgw_client.py ceilometer/objectstore/swift.py ceilometer/publisher/__init__.py ceilometer/publisher/direct.py ceilometer/publisher/file.py ceilometer/publisher/kafka_broker.py ceilometer/publisher/messaging.py ceilometer/publisher/test.py ceilometer/publisher/udp.py ceilometer/publisher/utils.py ceilometer/storage/__init__.py ceilometer/storage/base.py ceilometer/storage/impl_db2.py ceilometer/storage/impl_hbase.py ceilometer/storage/impl_log.py ceilometer/storage/impl_mongodb.py ceilometer/storage/impl_sqlalchemy.py ceilometer/storage/models.py ceilometer/storage/pymongo_base.py ceilometer/storage/hbase/__init__.py ceilometer/storage/hbase/base.py ceilometer/storage/hbase/inmemory.py ceilometer/storage/hbase/migration.py ceilometer/storage/hbase/utils.py ceilometer/storage/mongo/__init__.py ceilometer/storage/mongo/utils.py ceilometer/storage/sqlalchemy/__init__.py ceilometer/storage/sqlalchemy/migration.py ceilometer/storage/sqlalchemy/models.py ceilometer/storage/sqlalchemy/utils.py ceilometer/storage/sqlalchemy/migrate_repo/README ceilometer/storage/sqlalchemy/migrate_repo/__init__.py ceilometer/storage/sqlalchemy/migrate_repo/manage.py ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py ceilometer/telemetry/__init__.py ceilometer/telemetry/notifications.py ceilometer/tests/__init__.py ceilometer/tests/base.py ceilometer/tests/db.py ceilometer/tests/mocks.py ceilometer/tests/pipeline_base.py ceilometer/tests/functional/__init__.py ceilometer/tests/functional/test_bin.py ceilometer/tests/functional/test_collector.py ceilometer/tests/functional/test_notification.py ceilometer/tests/functional/api/__init__.py ceilometer/tests/functional/api/v2/__init__.py ceilometer/tests/functional/api/v2/test_acl_scenarios.py ceilometer/tests/functional/api/v2/test_api_upgrade.py ceilometer/tests/functional/api/v2/test_app.py ceilometer/tests/functional/api/v2/test_capabilities.py ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py ceilometer/tests/functional/api/v2/test_event_scenarios.py ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py ceilometer/tests/functional/api/v2/test_list_resources_scenarios.py ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py ceilometer/tests/functional/api/v2/test_statistics_scenarios.py ceilometer/tests/functional/gabbi/__init__.py ceilometer/tests/functional/gabbi/fixtures.py ceilometer/tests/functional/gabbi/gabbi_paste.ini ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml ceilometer/tests/functional/gabbi/test_gabbi.py ceilometer/tests/functional/gabbi/test_gabbi_prefix.py ceilometer/tests/functional/gabbi/gabbits/api_events_no_data.yaml ceilometer/tests/functional/gabbi/gabbits/api_events_with_data.yaml ceilometer/tests/functional/gabbi/gabbits/basic.yaml ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml ceilometer/tests/functional/gabbi/gabbits/meters.yaml ceilometer/tests/functional/gabbi/gabbits/middleware.yaml ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml ceilometer/tests/functional/gabbi/gabbits/samples.yaml ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml ceilometer/tests/functional/hooks/post_test_hook.sh ceilometer/tests/functional/publisher/__init__.py ceilometer/tests/functional/publisher/test_direct.py ceilometer/tests/functional/storage/__init__.py ceilometer/tests/functional/storage/test_impl_db2.py ceilometer/tests/functional/storage/test_impl_hbase.py ceilometer/tests/functional/storage/test_impl_log.py ceilometer/tests/functional/storage/test_impl_mongodb.py ceilometer/tests/functional/storage/test_impl_sqlalchemy.py ceilometer/tests/functional/storage/test_pymongo_base.py ceilometer/tests/functional/storage/test_storage_scenarios.py ceilometer/tests/integration/__init__.py ceilometer/tests/integration/gabbi/__init__.py ceilometer/tests/integration/gabbi/test_gabbi_live.py ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json ceilometer/tests/integration/hooks/post_test_hook.sh ceilometer/tests/tempest/__init__.py ceilometer/tests/tempest/config.py ceilometer/tests/tempest/plugin.py ceilometer/tests/tempest/api/__init__.py ceilometer/tests/tempest/api/base.py ceilometer/tests/tempest/api/test_telemetry_notification_api.py ceilometer/tests/tempest/scenario/__init__.py ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py ceilometer/tests/tempest/service/__init__.py ceilometer/tests/tempest/service/client.py ceilometer/tests/unit/__init__.py ceilometer/tests/unit/test_coordination.py ceilometer/tests/unit/test_declarative.py ceilometer/tests/unit/test_decoupled_pipeline.py ceilometer/tests/unit/test_event_pipeline.py ceilometer/tests/unit/test_messaging.py ceilometer/tests/unit/test_middleware.py ceilometer/tests/unit/test_neutronclient.py ceilometer/tests/unit/test_neutronclient_lbaas_v2.py ceilometer/tests/unit/test_novaclient.py ceilometer/tests/unit/test_sample.py ceilometer/tests/unit/test_utils.py ceilometer/tests/unit/agent/__init__.py ceilometer/tests/unit/agent/agentbase.py ceilometer/tests/unit/agent/test_discovery.py ceilometer/tests/unit/agent/test_manager.py ceilometer/tests/unit/agent/test_plugin.py ceilometer/tests/unit/api/__init__.py ceilometer/tests/unit/api/test_app.py ceilometer/tests/unit/api/test_hooks.py ceilometer/tests/unit/api/test_versions.py ceilometer/tests/unit/api/v2/__init__.py ceilometer/tests/unit/api/v2/test_complex_query.py ceilometer/tests/unit/api/v2/test_query.py ceilometer/tests/unit/api/v2/test_statistics.py ceilometer/tests/unit/api/v2/test_wsme_custom_type.py ceilometer/tests/unit/compute/__init__.py ceilometer/tests/unit/compute/test_discovery.py ceilometer/tests/unit/compute/notifications/__init__.py ceilometer/tests/unit/compute/notifications/test_instance.py ceilometer/tests/unit/compute/pollsters/__init__.py ceilometer/tests/unit/compute/pollsters/base.py ceilometer/tests/unit/compute/pollsters/test_cpu.py ceilometer/tests/unit/compute/pollsters/test_diskio.py ceilometer/tests/unit/compute/pollsters/test_instance.py ceilometer/tests/unit/compute/pollsters/test_location_metadata.py ceilometer/tests/unit/compute/pollsters/test_memory.py ceilometer/tests/unit/compute/pollsters/test_net.py ceilometer/tests/unit/compute/virt/__init__.py ceilometer/tests/unit/compute/virt/hyperv/__init__.py ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py ceilometer/tests/unit/compute/virt/libvirt/__init__.py ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py ceilometer/tests/unit/compute/virt/vmware/__init__.py ceilometer/tests/unit/compute/virt/vmware/test_inspector.py ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py ceilometer/tests/unit/compute/virt/xenapi/__init__.py ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py ceilometer/tests/unit/dispatcher/__init__.py ceilometer/tests/unit/dispatcher/test_db.py ceilometer/tests/unit/dispatcher/test_dispatcher.py ceilometer/tests/unit/dispatcher/test_file.py ceilometer/tests/unit/dispatcher/test_gnocchi.py ceilometer/tests/unit/dispatcher/test_http.py ceilometer/tests/unit/energy/__init__.py ceilometer/tests/unit/energy/test_kwapi.py ceilometer/tests/unit/event/__init__.py ceilometer/tests/unit/event/test_converter.py ceilometer/tests/unit/event/test_endpoint.py ceilometer/tests/unit/event/test_trait_plugins.py ceilometer/tests/unit/hardware/__init__.py ceilometer/tests/unit/hardware/inspector/__init__.py ceilometer/tests/unit/hardware/inspector/test_inspector.py ceilometer/tests/unit/hardware/inspector/test_snmp.py ceilometer/tests/unit/hardware/pollsters/__init__.py ceilometer/tests/unit/hardware/pollsters/test_generic.py ceilometer/tests/unit/hardware/pollsters/test_util.py ceilometer/tests/unit/image/__init__.py ceilometer/tests/unit/image/test_glance.py ceilometer/tests/unit/ipmi/__init__.py ceilometer/tests/unit/ipmi/notifications/__init__.py ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py ceilometer/tests/unit/ipmi/notifications/test_ironic.py ceilometer/tests/unit/ipmi/platform/__init__.py ceilometer/tests/unit/ipmi/platform/fake_utils.py ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py ceilometer/tests/unit/ipmi/pollsters/__init__.py ceilometer/tests/unit/ipmi/pollsters/base.py ceilometer/tests/unit/ipmi/pollsters/test_node.py ceilometer/tests/unit/ipmi/pollsters/test_sensor.py ceilometer/tests/unit/meter/__init__.py ceilometer/tests/unit/meter/test_meter_plugins.py ceilometer/tests/unit/meter/test_notifications.py ceilometer/tests/unit/network/__init__.py ceilometer/tests/unit/network/test_floating_ip.py ceilometer/tests/unit/network/test_notifications.py ceilometer/tests/unit/network/services/__init__.py ceilometer/tests/unit/network/services/test_fwaas.py ceilometer/tests/unit/network/services/test_lbaas.py ceilometer/tests/unit/network/services/test_lbaas_v2.py ceilometer/tests/unit/network/services/test_vpnaas.py ceilometer/tests/unit/network/statistics/__init__.py ceilometer/tests/unit/network/statistics/test_driver.py ceilometer/tests/unit/network/statistics/test_flow.py ceilometer/tests/unit/network/statistics/test_port.py ceilometer/tests/unit/network/statistics/test_statistics.py ceilometer/tests/unit/network/statistics/test_switch.py ceilometer/tests/unit/network/statistics/test_table.py ceilometer/tests/unit/network/statistics/opencontrail/__init__.py ceilometer/tests/unit/network/statistics/opencontrail/test_client.py ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py ceilometer/tests/unit/network/statistics/opendaylight/__init__.py ceilometer/tests/unit/network/statistics/opendaylight/test_client.py ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py ceilometer/tests/unit/objectstore/__init__.py ceilometer/tests/unit/objectstore/test_rgw.py ceilometer/tests/unit/objectstore/test_rgw_client.py ceilometer/tests/unit/objectstore/test_swift.py ceilometer/tests/unit/publisher/__init__.py ceilometer/tests/unit/publisher/test_file.py ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py ceilometer/tests/unit/publisher/test_messaging_publisher.py ceilometer/tests/unit/publisher/test_udp.py ceilometer/tests/unit/publisher/test_utils.py ceilometer/tests/unit/storage/__init__.py ceilometer/tests/unit/storage/test_base.py ceilometer/tests/unit/storage/test_get_connection.py ceilometer/tests/unit/storage/test_models.py ceilometer/tests/unit/storage/sqlalchemy/__init__.py ceilometer/tests/unit/storage/sqlalchemy/test_models.py ceilometer/tests/unit/telemetry/__init__.py ceilometer/tests/unit/telemetry/test_notifications.py ceilometer/tests/unit/transformer/__init__.py ceilometer/tests/unit/transformer/test_conversions.py ceilometer/transformer/__init__.py ceilometer/transformer/accumulator.py ceilometer/transformer/arithmetic.py ceilometer/transformer/conversions.py devstack/README.rst devstack/apache-ceilometer.template devstack/plugin.sh devstack/settings devstack/files/rpms/ceilometer devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh doc/Makefile doc/source/1-agents.png doc/source/2-1-collection-notification.png doc/source/2-2-collection-poll.png doc/source/2-accessmodel.png doc/source/3-Pipeline.png doc/source/4-Transformer.png doc/source/5-multi-publish.png doc/source/6-storagemodel.png doc/source/architecture.rst doc/source/ceilo-arch.png doc/source/ceilo-gnocchi-arch.png doc/source/conf.py doc/source/configuration.rst doc/source/contributing.rst doc/source/events.rst doc/source/format.rst doc/source/glossary.rst doc/source/gmr.rst doc/source/index.rst doc/source/measurements.rst doc/source/new_meters.rst doc/source/overview.rst doc/source/plugins.rst doc/source/testing.rst doc/source/_templates/.placeholder doc/source/api/index.rst doc/source/install/dbreco.rst doc/source/install/development.rst doc/source/install/index.rst doc/source/install/manual.rst doc/source/install/mod_wsgi.rst doc/source/install/upgrade.rst doc/source/releasenotes/folsom.rst doc/source/releasenotes/index.rst doc/source/webapi/index.rst doc/source/webapi/v2.rst etc/apache2/ceilometer etc/ceilometer/README-ceilometer.conf.txt etc/ceilometer/api_paste.ini etc/ceilometer/ceilometer-config-generator.conf etc/ceilometer/event_definitions.yaml etc/ceilometer/event_pipeline.yaml etc/ceilometer/gnocchi_resources.yaml etc/ceilometer/pipeline.yaml etc/ceilometer/policy.json etc/ceilometer/rootwrap.conf etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml etc/ceilometer/examples/osprofiler_event_definitions.yaml etc/ceilometer/rootwrap.d/ipmi.filters rally-jobs/README.rst rally-jobs/ceilometer.yaml rally-jobs/extra/README.rst rally-jobs/extra/fake.img rally-jobs/plugins/README.rst rally-jobs/plugins/plugin_sample.py releasenotes/notes/.placeholder releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml releasenotes/notes/always-requeue-7a2df9243987ab67.yaml releasenotes/notes/batch-messaging-d126cc525879d58e.yaml releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml releasenotes/notes/event-type-race-c295baf7f1661eab.yaml releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml releasenotes/notes/keystone-v3-fab1e257c5672965.yaml releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml releasenotes/notes/remove-eventlet-6738321434b60c78.yaml releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml releasenotes/notes/support-None-query-45abaae45f08eda4.yaml releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/__init__.py tools/ceilometer-test-event.py tools/lintstack.py tools/lintstack.sh tools/make_test_data.py tools/make_test_data.sh tools/make_test_event_data.py tools/pretty_tox.sh tools/send_test_data.py tools/show_data.py tools/test_hbase_table_utils.pyceilometer-6.0.0/ceilometer.egg-info/PKG-INFO0000664000567000056710000000207512701406363021733 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: ceilometer Version: 6.0.0 Summary: OpenStack Telemetry Home-page: http://docs.openstack.org/developer/ceilometer/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ceilometer ========== Release notes can be read online at: http://docs.openstack.org/developer/ceilometer/releasenotes/index.html Documentation for the project can be found at: http://docs.openstack.org/developer/ceilometer/ The project home is at: http://launchpad.net/ceilometer Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Topic :: System :: Monitoring ceilometer-6.0.0/ceilometer.egg-info/top_level.txt0000664000567000056710000000001312701406363023356 0ustar jenkinsjenkins00000000000000ceilometer ceilometer-6.0.0/ceilometer.egg-info/pbr.json0000664000567000056710000000005612701406363022311 0ustar jenkinsjenkins00000000000000{"is_release": true, "git_version": "2444399"}ceilometer-6.0.0/ceilometer.egg-info/entry_points.txt0000664000567000056710000003565612701406363024147 0ustar jenkinsjenkins00000000000000[ceilometer.builder.poll.central] hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster [ceilometer.compute.virt] hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector xenapi = ceilometer.compute.virt.xenapi.inspector:XenapiInspector [ceilometer.discover] endpoint = ceilometer.agent.discovery.endpoint:EndpointDiscovery fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery fw_services = ceilometer.network.services.discovery:FirewallDiscovery ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery lb_health_probes = ceilometer.network.services.discovery:LBHealthMonitorsDiscovery lb_listeners = ceilometer.network.services.discovery:LBListenersDiscovery lb_loadbalancers = ceilometer.network.services.discovery:LBLoadBalancersDiscovery lb_members = ceilometer.network.services.discovery:LBMembersDiscovery lb_pools = ceilometer.network.services.discovery:LBPoolsDiscovery lb_vips = ceilometer.network.services.discovery:LBVipsDiscovery local_instances = ceilometer.compute.discovery:InstanceDiscovery local_node = ceilometer.agent.discovery.localnode:LocalNodeDiscovery tenant = ceilometer.agent.discovery.tenant:TenantDiscovery tripleo_overcloud_nodes = ceilometer.hardware.discovery:NodesDiscoveryTripleO vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery [ceilometer.dispatcher.event] database = ceilometer.dispatcher.database:DatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher http = ceilometer.dispatcher.http:HttpDispatcher [ceilometer.dispatcher.meter] database = ceilometer.dispatcher.database:DatabaseDispatcher file = ceilometer.dispatcher.file:FileDispatcher gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher http = ceilometer.dispatcher.http:HttpDispatcher [ceilometer.event.publisher] direct = ceilometer.publisher.direct:DirectPublisher kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher notifier = ceilometer.publisher.messaging:EventNotifierPublisher test = ceilometer.publisher.test:TestPublisher [ceilometer.event.storage] db2 = ceilometer.event.storage.impl_db2:Connection es = ceilometer.event.storage.impl_elasticsearch:Connection hbase = ceilometer.event.storage.impl_hbase:Connection log = ceilometer.event.storage.impl_log:Connection mongodb = ceilometer.event.storage.impl_mongodb:Connection mysql = ceilometer.event.storage.impl_sqlalchemy:Connection postgresql = ceilometer.event.storage.impl_sqlalchemy:Connection sqlite = ceilometer.event.storage.impl_sqlalchemy:Connection [ceilometer.event.trait_plugin] bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin split = ceilometer.event.trait_plugins:SplitterTraitPlugin timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin [ceilometer.hardware.inspectors] snmp = ceilometer.hardware.inspector.snmp:SNMPInspector [ceilometer.metering.storage] db2 = ceilometer.storage.impl_db2:Connection hbase = ceilometer.storage.impl_hbase:Connection log = ceilometer.storage.impl_log:Connection mongodb = ceilometer.storage.impl_mongodb:Connection mysql = ceilometer.storage.impl_sqlalchemy:Connection postgresql = ceilometer.storage.impl_sqlalchemy:Connection sqlite = ceilometer.storage.impl_sqlalchemy:Connection [ceilometer.notification] _sample = ceilometer.telemetry.notifications:TelemetryIpc floatingip = ceilometer.network.notifications:FloatingIP hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification http.request = ceilometer.middleware:HTTPRequest http.response = ceilometer.middleware:HTTPResponse instance = ceilometer.compute.notifications.instance:Instance instance_scheduled = ceilometer.compute.notifications.instance:InstanceScheduled meter = ceilometer.meter.notifications:ProcessMeterNotifications network = ceilometer.network.notifications:Network network.services.firewall = ceilometer.network.notifications:Firewall network.services.firewall.policy = ceilometer.network.notifications:FirewallPolicy network.services.firewall.rule = ceilometer.network.notifications:FirewallRule network.services.lb.health_monitor = ceilometer.network.notifications:HealthMonitor network.services.lb.member = ceilometer.network.notifications:Member network.services.lb.pool = ceilometer.network.notifications:Pool network.services.lb.vip = ceilometer.network.notifications:Vip network.services.vpn = ceilometer.network.notifications:VPNService network.services.vpn.connections = ceilometer.network.notifications:IPSecSiteConnection network.services.vpn.ikepolicy = ceilometer.network.notifications:IKEPolicy network.services.vpn.ipsecpolicy = ceilometer.network.notifications:IPSecPolicy port = ceilometer.network.notifications:Port router = ceilometer.network.notifications:Router subnet = ceilometer.network.notifications:Subnet [ceilometer.poll.central] energy = ceilometer.energy.kwapi:EnergyPollster image = ceilometer.image.glance:ImagePollster image.size = ceilometer.image.glance:ImageSizePollster ip.floating = ceilometer.network.floatingip:FloatingIPPollster network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster network.services.lb.active.connections = ceilometer.network.services.lbaas:LBActiveConnectionsPollster network.services.lb.health_monitor = ceilometer.network.services.lbaas:LBHealthMonitorPollster network.services.lb.incoming.bytes = ceilometer.network.services.lbaas:LBBytesInPollster network.services.lb.listener = ceilometer.network.services.lbaas:LBListenerPollster network.services.lb.loadbalancer = ceilometer.network.services.lbaas:LBLoadBalancerPollster network.services.lb.member = ceilometer.network.services.lbaas:LBMemberPollster network.services.lb.outgoing.bytes = ceilometer.network.services.lbaas:LBBytesOutPollster network.services.lb.pool = ceilometer.network.services.lbaas:LBPoolPollster network.services.lb.total.connections = ceilometer.network.services.lbaas:LBTotalConnectionsPollster network.services.lb.vip = ceilometer.network.services.lbaas:LBVipPollster network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster power = ceilometer.energy.kwapi:PowerPollster rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster rgw.usage = ceilometer.objectstore.rgw:UsagePollster storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster storage.objects = ceilometer.objectstore.swift:ObjectsPollster storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster switch = ceilometer.network.statistics.switch:SWPollster switch.flow = ceilometer.network.statistics.flow:FlowPollster switch.flow.bytes = ceilometer.network.statistics.flow:FlowPollsterBytes switch.flow.duration.nanoseconds = ceilometer.network.statistics.flow:FlowPollsterDurationNanoseconds switch.flow.duration.seconds = ceilometer.network.statistics.flow:FlowPollsterDurationSeconds switch.flow.packets = ceilometer.network.statistics.flow:FlowPollsterPackets switch.port = ceilometer.network.statistics.port:PortPollster switch.port.collision.count = ceilometer.network.statistics.port:PortPollsterCollisionCount switch.port.receive.bytes = ceilometer.network.statistics.port:PortPollsterReceiveBytes switch.port.receive.crc_error = ceilometer.network.statistics.port:PortPollsterReceiveCRCErrors switch.port.receive.drops = ceilometer.network.statistics.port:PortPollsterReceiveDrops switch.port.receive.errors = ceilometer.network.statistics.port:PortPollsterReceiveErrors switch.port.receive.frame_error = ceilometer.network.statistics.port:PortPollsterReceiveFrameErrors switch.port.receive.overrun_error = ceilometer.network.statistics.port:PortPollsterReceiveOverrunErrors switch.port.receive.packets = ceilometer.network.statistics.port:PortPollsterReceivePackets switch.port.transmit.bytes = ceilometer.network.statistics.port:PortPollsterTransmitBytes switch.port.transmit.drops = ceilometer.network.statistics.port:PortPollsterTransmitDrops switch.port.transmit.errors = ceilometer.network.statistics.port:PortPollsterTransmitErrors switch.port.transmit.packets = ceilometer.network.statistics.port:PortPollsterTransmitPackets switch.table = ceilometer.network.statistics.table:TablePollster switch.table.active.entries = ceilometer.network.statistics.table:TablePollsterActiveEntries switch.table.lookup.packets = ceilometer.network.statistics.table:TablePollsterLookupPackets switch.table.matched.packets = ceilometer.network.statistics.table:TablePollsterMatchedPackets [ceilometer.poll.compute] cpu = ceilometer.compute.pollsters.cpu:CPUPollster cpu_util = ceilometer.compute.pollsters.cpu:CPUUtilPollster disk.allocation = ceilometer.compute.pollsters.disk:AllocationPollster disk.capacity = ceilometer.compute.pollsters.disk:CapacityPollster disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster disk.device.iops = ceilometer.compute.pollsters.disk:PerDeviceDiskIOPSPollster disk.device.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskLatencyPollster disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster disk.device.read.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceReadBytesRatePollster disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster disk.device.read.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsRatePollster disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster disk.device.write.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesRatePollster disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster disk.device.write.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsRatePollster disk.iops = ceilometer.compute.pollsters.disk:DiskIOPSPollster disk.latency = ceilometer.compute.pollsters.disk:DiskLatencyPollster disk.read.bytes = ceilometer.compute.pollsters.disk:ReadBytesPollster disk.read.bytes.rate = ceilometer.compute.pollsters.disk:ReadBytesRatePollster disk.read.requests = ceilometer.compute.pollsters.disk:ReadRequestsPollster disk.read.requests.rate = ceilometer.compute.pollsters.disk:ReadRequestsRatePollster disk.usage = ceilometer.compute.pollsters.disk:PhysicalPollster disk.write.bytes = ceilometer.compute.pollsters.disk:WriteBytesPollster disk.write.bytes.rate = ceilometer.compute.pollsters.disk:WriteBytesRatePollster disk.write.requests = ceilometer.compute.pollsters.disk:WriteRequestsPollster disk.write.requests.rate = ceilometer.compute.pollsters.disk:WriteRequestsRatePollster instance = ceilometer.compute.pollsters.instance:InstancePollster memory.resident = ceilometer.compute.pollsters.memory:MemoryResidentPollster memory.usage = ceilometer.compute.pollsters.memory:MemoryUsagePollster network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster [ceilometer.poll.ipmi] hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster hardware.ipmi.node.airflow = ceilometer.ipmi.pollsters.node:AirflowPollster hardware.ipmi.node.cpu_util = ceilometer.ipmi.pollsters.node:CPUUtilPollster hardware.ipmi.node.cups = ceilometer.ipmi.pollsters.node:CUPSIndexPollster hardware.ipmi.node.io_util = ceilometer.ipmi.pollsters.node:IOUtilPollster hardware.ipmi.node.mem_util = ceilometer.ipmi.pollsters.node:MemUtilPollster hardware.ipmi.node.outlet_temperature = ceilometer.ipmi.pollsters.node:OutletTemperaturePollster hardware.ipmi.node.power = ceilometer.ipmi.pollsters.node:PowerPollster hardware.ipmi.node.temperature = ceilometer.ipmi.pollsters.node:InletTemperaturePollster hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster [ceilometer.publisher] direct = ceilometer.publisher.direct:DirectPublisher file = ceilometer.publisher.file:FilePublisher kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher notifier = ceilometer.publisher.messaging:SampleNotifierPublisher test = ceilometer.publisher.test:TestPublisher udp = ceilometer.publisher.udp:UDPPublisher [ceilometer.transformer] accumulator = ceilometer.transformer.accumulator:TransformerAccumulator aggregator = ceilometer.transformer.conversions:AggregatorTransformer arithmetic = ceilometer.transformer.arithmetic:ArithmeticTransformer delta = ceilometer.transformer.conversions:DeltaTransformer rate_of_change = ceilometer.transformer.conversions:RateOfChangeTransformer unit_conversion = ceilometer.transformer.conversions:ScalingTransformer [console_scripts] ceilometer-agent-notification = ceilometer.cmd.agent_notification:main ceilometer-api = ceilometer.cmd.api:main ceilometer-collector = ceilometer.cmd.collector:main ceilometer-dbsync = ceilometer.cmd.storage:dbsync ceilometer-expirer = ceilometer.cmd.storage:expirer ceilometer-polling = ceilometer.cmd.polling:main ceilometer-rootwrap = oslo_rootwrap.cmd:main ceilometer-send-sample = ceilometer.cmd.sample:send_sample [keystoneauth1.plugin] password-ceilometer-legacy = ceilometer.keystone_client:LegacyCeilometerKeystoneLoader [network.statistics.drivers] opencontrail = ceilometer.network.statistics.opencontrail.driver:OpencontrailDriver opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver [oslo.config.opts] ceilometer = ceilometer.opts:list_opts [oslo.config.opts.defaults] ceilometer = ceilometer.conf.defaults:set_cors_middleware_defaults [tempest.test_plugins] ceilometer_tests = ceilometer.tests.tempest.plugin:CeilometerTempestPlugin ceilometer-6.0.0/AUTHORS0000664000567000056710000002604512701406363016067 0ustar jenkinsjenkins00000000000000Abhishek Chanda Abhishek Lekshmanan Abhishek Lekshmanan Adelina Tuvenie Ajaya Agrawal Akhil Hingane Ala Rezmerita Alessandro Pilotti Alex Holden Alexei Kornienko Ana Malagon Ananya Chatterjee Andreas Jaeger Andreas Jaeger Andrew Hutchings Andrew Melton Angus Lees Angus Salkeld Ann Kamyshnikova Artur Svechnikov Ashwin Agate Balazs Gibizer Bartosz GĂłrski Ben Nemec Ben Nemec Boris Pavlovic Brad Pokorny Brant Knudson Brian Cline Brian Moss Brooklyn Chen BĂ©la Vancsics Can ZHANG Cedric Soulas Chad Lung Chandan Kumar ChangBo Guo(gcb) Chaozhe.Chen ChenZheng Chinmaya Bharadwaj Chmouel Boudjnah Chris Dent Chris Dent Christian Berendt Christian Martinez Christian Schwede Chuck Short Clark Boylan Claudiu Belu Cyril Roelandt Cyril Roelandt Damian Van Vuuren Dan Florea Dan Prince Dan Travis Darren Birkett Davanum Srinivas David Peraza Dazhao Debo~ Dutta Dina Belova Dirk Mueller Divya Dong Ma Doug Hellmann Drew Thorstensen Edwin Zhai Emma Foley Endre Karlson Eoghan Glynn Eoghan Glynn Eric Brown Fabio Giannetti Fei Long Wang Feng Xi Yan Fengqian Gao Flavio Percoco François Charlier François Rossigneux Frederic FAURE Gangyi Luo Gauvain Pocentek Gerard Garcia Gordon Chung Graham Binns Guangyu Suo Hang Liu Haomeng, Wang Harri Hämäläinen Hisashi Osanai Hongbin Lu Igor Degtiarov Ihar Hrachyshka Ildiko Vancsa Ilya Sviridov Ilya Tyaptin IonuČ› ArČ›ÄriČ™i Jake Liu James E. Blair Jason Myers Jason Zhang Jay Lau Jay Pipes Jeremy Stanley Jie Li Jim Rollenhagen Jimmy McCrory Joanna H. Huang Joe Gordon Joe H. Rahme John H. Tran John Herndon JordanP JuPing Julien Danjou Justin SB KIYOHIRO ADACHI Kamil Rykowski Keith Byrne Ken Pepple Ken'ichi Ohmichi Kennan Kennan Kevin McDonald Kishore Juigil Koert van der Veer Komei Shimamura Ladislav Smola Lan Qi song Lena Novokshonova Lianhao Lu LinuxJedi LiuSheng Luis A. Garcia Luo Gangyi Maho Koshiya Mark McClain Mark McLoughlin Martin Geisler Martin Kletzander Mathew Odden Mathieu GagneĚ Matt Riedemann Mehdi Abaakouk Mehdi Abaakouk Michael Krotscheck Michael Still MichaĹ‚ JastrzÄ™bski Miguel Alex Cantu Miguel Grinberg Mike Spreitzer Monsyne Dragon Monty Taylor Morgan Fainberg Nadya Privalova Nadya Shakhat Nejc Saje Nick Barcet Nicolas Barcet (nijaba) Noorul Islam K M Octavian Ciuhandu PanFengyun Patrick East Paul Belanger Peter Portante Phil Neal Piyush Masrani Pradeep Kilambi Pradeep Kilambi Pradeep Kumar Singh Pradyumna Sampath Pádraig Brady Qiaowei Ren Rabi Mishra Rafael Rivero Rich Bowen Rikimaru Honjo Rob Raymond Robert Collins Robert Mizielski Rohit Jaiswal Romain Soufflet Roman Bogorodskiy Rosario Di Somma Ruslan Aliev Russell Bryant Ryan Petrello Ryota MIBU Saba Ahmed Sam Morrison Samta Samuel Merritt Sandy Walsh Sanja Nosan Sascha Peilicke Sean Dague Sergey Lukjanov Sergey Vilgelm Shane Wang Shengjie Min Shilla Saebi Shuangtai Tian Shubham Chitranshi Simona Iuliana Toader Sofer Athlan-Guyot Srinivas Sakhamuri Stas Maksimov Stefano Zilli Stephen Balukoff Stephen Gran Steve Lewis Steve Martinelli Steven Berler Sumant Murke Surya Prabhakar Svetlana Shturm Swami Reddy Swann Croiset Swapnil Kulkarni (coolsvap) Sylvain Afchain Takashi NATSUME Tatsuro Makita Terri Yu Thierry Carrez Thomas Bechtold Thomas Herve Thomas Herve Thomas Maddox Tong Li Ubuntu Victor Stinner Victor Stinner Vitalii Lebedynskyi Vitaly Gridnev Vladislav Kuzmin Wu Wenxiang Xia Linjuan XiaBing Yao Yaguang Tang Yanyan Hu Yassine Lamgarchal Yathiraj Udupi You Yamagata Yunhong, Jiang Zhi Kun Liu Zhi Yan Liu ZhiQiang Fan Zhongyue Luo Zi Lian Ji ananya23d annegentle ansaba ccrouch eNovance emilienm florent fujioka yuuichi gengjh gord chung guillaume pernot hgangwx jiaxi jinxingfang jizilian joyce kairoaraujo kiwik-chenrui leizhang lianghuifei lijian liuqing liusheng lizheming lqslan lrqrun ls1175 lvdongbing lzhijun mizeng nellysmitt replay sanuptpm sh.huang shengjie min srsakhamuri tanlin terriyu unknown vagrant venkatamahesh vivek.nandavanam vivek.nandavanam xialinjuan xiangjun li xingzhou yanheven zhang-jinnan zhangguoqing zjingbj ceilometer-6.0.0/requirements.txt0000664000567000056710000000332412701406223020271 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. retrying!=1.3.0,>=1.2.3 # Apache-2.0 jsonpath-rw-ext>=0.1.9 # Apache-2.0 jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT kafka-python<1.0.0,>=0.9.5 # Apache-2.0 keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 lxml>=2.3 # BSD msgpack-python>=0.4.0 # Apache-2.0 oslo.context>=0.2.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 oslo.config>=3.7.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.reports>=0.6.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.service>=1.0.0 # Apache-2.0 PasteDeploy>=1.5.0 # MIT pbr>=1.6 # Apache-2.0 pecan>=1.0.0 # BSD oslo.messaging>=4.0.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 pysnmp<5.0.0,>=4.2.3 # BSD python-ceilometerclient>=2.2.1 # Apache-2.0 python-glanceclient>=2.0.0 # Apache-2.0 python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 # Apache-2.0 keystoneauth1>=2.1.0 # Apache-2.0 python-neutronclient!=4.1.0,>=2.6.0 # Apache-2.0 python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 PyYAML>=3.1.0 # MIT requests!=2.9.0,>=2.8.1 # Apache-2.0 six>=1.9.0 # MIT SQLAlchemy<1.1.0,>=1.0.10 # MIT sqlalchemy-migrate>=0.9.6 # Apache-2.0 stevedore>=1.5.0 # Apache-2.0 tooz>=1.28.0 # Apache-2.0 Werkzeug>=0.7 # BSD License WebOb>=1.2.3 # MIT WSME>=0.8 # MIT # NOTE(jd) We do not import it directly, but WSME datetime string parsing # behaviour changes when this library is installed python-dateutil>=2.4.2 # BSD ceilometer-6.0.0/HACKING.rst0000664000567000056710000000206112701406223016600 0ustar jenkinsjenkins00000000000000Ceilometer Style Commandments ============================= - Step 1: Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ - Step 2: Read on Ceilometer Specific Commandments -------------------------------- - [C301] LOG.warn() is not allowed. Use LOG.warning() - [C302] Deprecated library function os.popen() Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up. ceilometer-6.0.0/CONTRIBUTING.rst0000664000567000056710000000106312701406223017444 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/ceilometer ceilometer-6.0.0/releasenotes/0000775000567000056710000000000012701406364017502 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/releasenotes/notes/0000775000567000056710000000000012701406364020632 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml0000664000567000056710000000052512701406223026326 0ustar jenkinsjenkins00000000000000--- features: - > Support resource caching in Gnocchi dispatcher to improve write performance to avoid additional queries. other: - > A dogpile.cache supported backend is required to enable cache. Additional configuration `options `_ are also required. ceilometer-6.0.0/releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml0000664000567000056710000000031412701406223031336 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1530793 `_] network.services.lb.incoming.bytes meter was previous set to incorrect type. It should be a gauge meter. ceilometer-6.0.0/releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml0000664000567000056710000000052412701406223026377 0ustar jenkinsjenkins00000000000000--- features: - > Support for CORS is added. More information can be found [`here `_] upgrade: - > The api-paste.ini file can be modified to include or exclude the CORs middleware. Additional configurations can be made to middleware as well. ceilometer-6.0.0/releasenotes/notes/batch-messaging-d126cc525879d58e.yaml0000664000567000056710000000100612701406223026713 0ustar jenkinsjenkins00000000000000--- features: - > Add support for batch processing of messages from queue. This will allow the collector and notification agent to grab multiple messages per thread to enable more efficient processing. upgrade: - > batch_size and batch_timeout configuration options are added to both [notification] and [collector] sections of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. ceilometer-6.0.0/releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml0000664000567000056710000000023612701406223030102 0ustar jenkinsjenkins00000000000000--- upgrade: - > gnocchi_resources.yaml in Ceilometer should be updated. fixes: - > Fix samples from Heat to map to correct Gnocchi resource type ceilometer-6.0.0/releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml0000664000567000056710000000054412701406223031421 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1480333 `_] Support ability to configure collector to capture events or meters mutally exclusively, rather than capturing both always. other: - > Configure individual dispatchers by specifying meter_dispatchers and event_dispatchers in configuration file. ceilometer-6.0.0/releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml0000664000567000056710000000020712701406223027050 0ustar jenkinsjenkins00000000000000--- features: - > Ceilometer alarms code is now fully removed from code base. Equivalent functionality is handled by Aodh. ceilometer-6.0.0/releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml0000664000567000056710000000060012701406223031225 0ustar jenkinsjenkins00000000000000--- upgrade: - > To utilize the new policy support. The policy.json file should be updated accordingly. The pre-existing policy.json file will continue to function as it does if policy changes are not required. fixes: - > [`bug 1504495 `_] Configure ceilometer to handle policy.json rules when possible. ceilometer-6.0.0/releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml0000664000567000056710000000033512701406223030460 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1506738 `_] [`bug 1509677 `_] Optimise SQL backend queries to minimise query load ceilometer-6.0.0/releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml0000664000567000056710000000063512701406223031167 0ustar jenkinsjenkins00000000000000--- features: - > To minimise load on Nova API, an additional configuration option was added to control discovery interval vs metric polling interval. If resource_update_interval option is configured in compute section, the compute agent will discover new instances based on defined interval. The agent will continue to poll the discovered instances at the interval defined by pipeline. ceilometer-6.0.0/releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml0000664000567000056710000000025212701406223032304 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1531626 `_] Ensure aggregator transformer timeout is honoured if size is not provided. ceilometer-6.0.0/releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml0000664000567000056710000000021712701406223030022 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1523124 `_] Fix gnocchi dispatcher to support UDP collector ceilometer-6.0.0/releasenotes/notes/.placeholder0000664000567000056710000000000012701406223023075 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml0000664000567000056710000000050612701406223031266 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1532661 `_] Fix statistics query failures due to large numbers stored in MongoDB. Data from MongoDB is returned as Int64 for big numbers when int and float types are expected. The data is cast to appropriate type to handle large data. ceilometer-6.0.0/releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml0000664000567000056710000000031212701406223026615 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 255569 `_] Fix caching support in Gnocchi dispatcher. Added better locking support to enable smoother cache access. ceilometer-6.0.0/releasenotes/notes/always-requeue-7a2df9243987ab67.yaml0000664000567000056710000000111012701406223026625 0ustar jenkinsjenkins00000000000000--- critical: - > The previous configuration options default for `requeue_sample_on_dispatcher_error' and `requeue_event_on_dispatcher_error' allowed to lose data very easily: if the dispatcher failed to send data to the backend (e.g. Gnocchi is down), then the dispatcher raised and the data were lost forever. This was completely unacceptable, and nobody should be able to configure Ceilometer in that way." upgrade: - > The options `requeue_event_on_dispatcher_error' and `requeue_sample_on_dispatcher_error' have been enabled and removed. ceilometer-6.0.0/releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml0000664000567000056710000000026712701406223026622 0ustar jenkinsjenkins00000000000000--- features: - > Gnocchi dispatcher now uses client rather than direct http requests upgrade: - > gnocchiclient library is now a requirement if using ceilometer+gnocchi. ceilometer-6.0.0/releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml0000664000567000056710000000051012701406223027660 0ustar jenkinsjenkins00000000000000--- features: - > RPC collector support is dropped. The queue-based notifier publisher and collector was added as the recommended alternative as of Icehouse cycle. upgrade: - > Pipeline.yaml files for agents should be updated to notifier:// or udp:// publishers. The rpc:// publisher is no longer supported. ceilometer-6.0.0/releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml0000664000567000056710000000117412701406223030523 0ustar jenkinsjenkins00000000000000--- features: - > Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is deprecated. The same metrics are available between v1 and v2. issues: - > Neutron API is not designed to be polled against. When polling against Neutron is enabled, Ceilometer's polling agents may generage a significant load against the Neutron API. It is recommended that a dedicated API be enabled for polling while Neutron's API is improved to handle polling. upgrade: - > By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add neutron_lbaas_version=v1 option to configuration file. ceilometer-6.0.0/releasenotes/notes/remove-eventlet-6738321434b60c78.yaml0000664000567000056710000000012712701406223026551 0ustar jenkinsjenkins00000000000000--- features: - > Remove eventlet from Ceilometer in favour of threaded approach ceilometer-6.0.0/releasenotes/notes/keystone-v3-fab1e257c5672965.yaml0000664000567000056710000000010312701406223026036 0ustar jenkinsjenkins00000000000000--- features: - > Add support for Keystone v3 authentication ceilometer-6.0.0/releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml0000664000567000056710000000061212701406223030343 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1536338 `_] Patch was added to fix the broken floatingip pollster that polled data from nova api, but since the nova api filtered the data by tenant, ceilometer was not getting any data back. The fix changes the pollster to use the neutron api instead to get the floating ip info. ceilometer-6.0.0/releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml0000664000567000056710000000022112701406223030352 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1513731 `_] Add support for hardware cpu_util in snmp.yaml ceilometer-6.0.0/releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml0000664000567000056710000000054112701406223031535 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1536699 `_] Patch to fix volume field lookup in meter definition file. In case the field is missing in the definition, it raises a keyerror and aborts. Instead we should skip the missing field meter and continue with the rest of the definitions. ceilometer-6.0.0/releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml0000664000567000056710000000032212701406223027710 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1518338 `_] Add support for storing SNMP metrics in Gnocchi.This functionality requires Gnocchi v2.1.0 to be installed. ceilometer-6.0.0/releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml0000664000567000056710000000035112701406223027270 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1550436 `_] Cache json parsers when building parsing logic to handle event and meter definitions. This will improve agent startup and setup time. ceilometer-6.0.0/releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml0000664000567000056710000000054512701406223033452 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1542189 `_] Handle malformed resource definitions in gnocchi_resources.yaml gracefully. Currently we raise an exception once we hit a bad resource and skip the rest. Instead the patch skips the bad resource and proceeds with rest of the definitions. ceilometer-6.0.0/releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml0000664000567000056710000000066412701406223030434 0ustar jenkinsjenkins00000000000000--- critical: - > [`bug 1533787 `_] Fix an issue where agents are not properly getting registered to group when multiple notification agents are deployed. This can result in bad transformation as the agents are not coordinated. It is still recommended to set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when deploying multiple agents. ceilometer-6.0.0/releasenotes/notes/event-type-race-c295baf7f1661eab.yaml0000664000567000056710000000024512701406223027075 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1254800 `_] Add better support to catch race conditions when creating event_types ceilometer-6.0.0/releasenotes/notes/support-None-query-45abaae45f08eda4.yaml0000664000567000056710000000023712701406223027713 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1388680 `_] Suppose ability to query for None value when using SQL backend. ceilometer-6.0.0/releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml0000664000567000056710000000037412701406223031363 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1539163 `_] Add ability to define whether to use first or last timestamps when aggregating samples. This will allow more flexibility when chaining transformers. ceilometer-6.0.0/releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml0000664000567000056710000000037312701406223027165 0ustar jenkinsjenkins00000000000000--- features: - > Support for CADF-only payload in HTTP dispatcher is dropped as audit middleware in pyCADF was dropped in Kilo cycle. upgrade: - > audit middleware in keystonemiddleware library should be used for similar support. ceilometer-6.0.0/releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml0000664000567000056710000000034112701406223027705 0ustar jenkinsjenkins00000000000000--- critical: - > [`bug 1519767 `_] fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and its potential race conditions are now patched. ceilometer-6.0.0/releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml0000664000567000056710000000033212701406223027565 0ustar jenkinsjenkins00000000000000--- upgrade: - > Run db-sync to add new indices. fixes: - > [`bug 1526793 `_] Additional indices were added to better support querying of event data. ceilometer-6.0.0/releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml0000664000567000056710000000055312701406223030343 0ustar jenkinsjenkins00000000000000 --- fixes: - > [`bug 1536498 `_] Patch to fix duplicate meter definitions causing duplicate samples. If a duplicate is found, log a warning and skip the meter definition. Note that the first occurance of a meter will be used and any following duplicates will be skipped from processing. ceilometer-6.0.0/releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml0000664000567000056710000000041212701406223031177 0ustar jenkinsjenkins00000000000000--- features: - > [`bug 1506959 `_] Add support to query unique set of meter names rather than meters associated with each resource. The list is available by adding unique=True option to request. ceilometer-6.0.0/releasenotes/source/0000775000567000056710000000000012701406364021002 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/releasenotes/source/index.rst0000664000567000056710000000020612701406224022634 0ustar jenkinsjenkins00000000000000========================= Ceilometer Release Notes ========================= .. toctree:: :maxdepth: 1 liberty unreleased ceilometer-6.0.0/releasenotes/source/_templates/0000775000567000056710000000000012701406364023137 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/releasenotes/source/_templates/.placeholder0000664000567000056710000000000012701406223025402 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/releasenotes/source/unreleased.rst0000664000567000056710000000016012701406223023652 0ustar jenkinsjenkins00000000000000============================== Current Series Release Notes ============================== .. release-notes:: ceilometer-6.0.0/releasenotes/source/liberty.rst0000664000567000056710000000022212701406223023174 0ustar jenkinsjenkins00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty ceilometer-6.0.0/releasenotes/source/conf.py0000664000567000056710000002170212701406223022275 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Ceilometer Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ceilometer Release Notes' copyright = u'2015, Ceilometer Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from ceilometer.version import version_info as ceilometer_version # The full version, including alpha/beta/rc tags. release = ceilometer_version.version_string_with_vcs() # The short X.Y version. version = ceilometer_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'CeilometerReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'CeilometerReleaseNotes.tex', u'Ceilometer Release Notes Documentation', u'Ceilometer Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ceilometerreleasenotes', u'Ceilometer Release Notes Documentation', [u'Ceilometer Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'CeilometerReleaseNotes', u'Ceilometer Release Notes Documentation', u'Ceilometer Developers', 'CeilometerReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False ceilometer-6.0.0/releasenotes/source/_static/0000775000567000056710000000000012701406364022430 5ustar jenkinsjenkins00000000000000ceilometer-6.0.0/releasenotes/source/_static/.placeholder0000664000567000056710000000000012701406223024673 0ustar jenkinsjenkins00000000000000ceilometer-6.0.0/README.rst0000664000567000056710000000044212701406223016472 0ustar jenkinsjenkins00000000000000ceilometer ========== Release notes can be read online at: http://docs.openstack.org/developer/ceilometer/releasenotes/index.html Documentation for the project can be found at: http://docs.openstack.org/developer/ceilometer/ The project home is at: http://launchpad.net/ceilometer ceilometer-6.0.0/functions.sh0000664000567000056710000000115212701406223017346 0ustar jenkinsjenkins00000000000000function clean_exit(){ local error_code="$?" rm -rf "$1" kill $(jobs -p) return $error_code } check_for_cmd () { if ! which "$1" >/dev/null 2>&1 then echo "Could not find $1 command" 1>&2 exit 1 fi } wait_for_line () { exit_code=1 while read line do echo "$line" | grep -q "$1" && exit_code=0 && break done < "$2" # Read the fifo for ever otherwise process would block cat "$2" >/dev/null & if [ $exit_code -eq 1 ]; then echo "Entries of \"$1\" have not been found. Now tests will be stopped." exit $exit_code fi } ceilometer-6.0.0/.mailmap0000664000567000056710000000370612701406223016432 0ustar jenkinsjenkins00000000000000# Format is: # # Adam Gandelman Alan Pevec Alexei Kornienko ChangBo Guo(gcb) Chang Bo Guo Chinmaya Bharadwaj chinmay Clark Boylan Doug Hellmann Fei Long Wang Fengqian Gao Fengqian Fengqian Gao Fengqian.Gao Gordon Chung gordon chung Gordon Chung Gordon Chung Gordon Chung gordon chung Ildiko Vancsa Ildiko John H. Tran John Tran Julien Danjou LiuSheng liu-sheng Mehdi Abaakouk Nejc Saje Nejc Saje Nicolas Barcet (nijaba) Pádraig Brady Rich Bowen Sandy Walsh Sascha Peilicke Sean Dague Shengjie Min shengjie-min Shuangtai Tian shuangtai Swann Croiset ZhiQiang Fan ceilometer-6.0.0/setup.py0000664000567000056710000000200412701406223016511 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) ceilometer-6.0.0/test-requirements.txt0000664000567000056710000000235312701406223021247 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Hacking already pins down pep8, pyflakes and flake8 hacking<0.11,>=0.10.0 Babel>=1.3 # BSD contextlib2>=0.4.0 # PSF License coverage>=3.6 # Apache-2.0 elasticsearch<2.0,>=1.3.0 # Apache-2.0 fixtures>=1.3.1 # Apache-2.0/BSD happybase!=0.7,>=0.5;python_version=='2.7' # MIT mock>=1.2 # BSD PyMySQL>=0.6.2 # MIT License os-win>=0.2.3 # Apache-2.0 oslo.cache>=1.5.0 # Apache-2.0 # Docs Requirements oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 reno>=0.1.1 # Apache2 oslotest>=1.10.0 # Apache-2.0 oslo.vmware>=1.16.0 # Apache-2.0 overtest>=0.10.0 # Apache-2.0 psycopg2>=2.5 # LGPL/ZPL pylint==1.4.5 # GNU GPL v2 pymongo!=3.1,>=3.0.2 # Apache-2.0 gnocchiclient>=2.1.0 # Apache-2.0 python-subunit>=0.0.18 # Apache-2.0/BSD sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD sphinxcontrib-httpdomain # BSD sphinxcontrib-pecanwsme>=0.8 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT gabbi>=1.11.0 # Apache-2.0 requests-aws>=0.1.4 # BSD License (3 clause) tempest-lib>=0.14.0 # Apache-2.0 WebTest>=2.0 # MIT