ceilometer-10.0.0/0000775000175100017510000000000013236733440013751 5ustar zuulzuul00000000000000ceilometer-10.0.0/devstack/0000775000175100017510000000000013236733440015555 5ustar zuulzuul00000000000000ceilometer-10.0.0/devstack/upgrade/0000775000175100017510000000000013236733440017204 5ustar zuulzuul00000000000000ceilometer-10.0.0/devstack/upgrade/upgrade.sh0000777000175100017510000000502513236733243021177 0ustar zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-ceilometer`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "*********************************************************************" echo "ERROR: Abort $0" echo "*********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Upgrade Ceilometer # ================== # Locate ceilometer devstack plugin, the directory above the # grenade plugin. CEILOMETER_DEVSTACK_DIR=$(dirname $(dirname $0)) # Get functions from current DevStack source $TARGET_DEVSTACK_DIR/functions source $TARGET_DEVSTACK_DIR/stackrc source $TARGET_DEVSTACK_DIR/lib/apache # Get ceilometer functions from devstack plugin source $CEILOMETER_DEVSTACK_DIR/settings # Print the commands being run so that we can see the command that triggers # an error. set -o xtrace # Install the target ceilometer source $CEILOMETER_DEVSTACK_DIR/plugin.sh stack install # calls upgrade-ceilometer for specific release upgrade_project ceilometer $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH # Migrate the database # NOTE(chdent): As we evolve BIN_DIR is likely to be defined, but # currently it is not. CEILOMETER_BIN_DIR=$(dirname $(which ceilometer-upgrade)) $CEILOMETER_BIN_DIR/ceilometer-upgrade --skip-gnocchi-resource-types || die $LINENO "ceilometer-upgrade error" # Start Ceilometer start_ceilometer # Note these are process names, not service names # Note(liamji): Disable the test for # "ceilometer-polling --polling-namespaces ipmi". In the test environment, # the impi is not ready. The ceilometer-polling should fail. ensure_services_started "ceilometer-polling --polling-namespaces compute" \ "ceilometer-polling --polling-namespaces central" \ ceilometer-agent-notification set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ceilometer-10.0.0/devstack/upgrade/shutdown.sh0000777000175100017510000000114513236733243021422 0ustar zuulzuul00000000000000#!/bin/bash # # set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $BASE_DEVSTACK_DIR/functions source $BASE_DEVSTACK_DIR/stackrc # needed for status directory source $BASE_DEVSTACK_DIR/lib/tls source $BASE_DEVSTACK_DIR/lib/apache # Locate the ceilometer plugin and get its functions CEILOMETER_DEVSTACK_DIR=$(dirname $(dirname $0)) source $CEILOMETER_DEVSTACK_DIR/plugin.sh set -o xtrace stop_ceilometer # ensure everything is stopped SERVICES_DOWN="ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification" ensure_services_stopped $SERVICES_DOWN ceilometer-10.0.0/devstack/upgrade/settings0000666000175100017510000000074413236733243020777 0ustar zuulzuul00000000000000register_project_for_upgrade ceilometer devstack_localrc base enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer devstack_localrc base enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification tempest devstack_localrc target enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer devstack_localrc target enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification tempest ceilometer-10.0.0/devstack/plugin.sh0000666000175100017510000004021113236733243017410 0ustar zuulzuul00000000000000# Install and start **Ceilometer** service in devstack # # To enable Ceilometer in devstack add an entry to local.conf that # looks like # # [[local|localrc]] # enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer # # By default all ceilometer services are started (see devstack/settings) # except for the ceilometer-aipmi service. To disable a specific service # use the disable_service function. # # NOTE: Currently, there are two ways to get the IPMI based meters in # OpenStack. One way is to configure Ironic conductor to report those meters # for the nodes managed by Ironic and to have Ceilometer notification # agent to collect them. Ironic by default does NOT enable that reporting # functionality. So in order to do so, users need to set the option of # conductor.send_sensor_data to true in the ironic.conf configuration file # for the Ironic conductor service, and also enable the # ceilometer-anotification service. # # The other way is to use Ceilometer ipmi agent only to get the IPMI based # meters. To make use of the Ceilometer ipmi agent, it must be explicitly # enabled with the following setting: # # enable_service ceilometer-aipmi # # To avoid duplicated meters, users need to make sure to set the # option of conductor.send_sensor_data to false in the ironic.conf # configuration file if the node on which Ceilometer ipmi agent is running # is also managed by Ironic. # # Several variables set in the localrc section adjust common behaviors # of Ceilometer (see within for additional settings): # # CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600. # CEILOMETER_BACKEND: Database backend (e.g. 'gnocchi', 'none') # CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz. # CEILOMETER_EVENT_ALARM: Set to True to enable publisher for event alarming # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace # Support potential entry-points console scripts in VENV or not if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["ceilometer"]=${CEILOMETER_DIR}.venv CEILOMETER_BIN_DIR=${PROJECT_VENV["ceilometer"]}/bin else CEILOMETER_BIN_DIR=$(get_python_exec_prefix) fi # Test if any Ceilometer services are enabled # is_ceilometer_enabled function is_ceilometer_enabled { [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0 return 1 } function gnocchi_service_url { echo "$GNOCCHI_SERVICE_PROTOCOL://$GNOCCHI_SERVICE_HOST/metric" } # _ceilometer_install_redis() - Install the redis server and python lib. function _ceilometer_install_redis { if is_ubuntu; then install_package redis-server restart_service redis-server else # This will fail (correctly) where a redis package is unavailable install_package redis if is_suse; then # opensuse intsall multi-instance version of redis # and admin is expected to install the required conf cp /etc/redis/default.conf.example /etc/redis/default.conf restart_service redis@default else restart_service redis fi fi pip_install_gr redis } # Configure mod_wsgi function _ceilometer_config_apache_wsgi { sudo mkdir -p $CEILOMETER_WSGI_DIR local ceilometer_apache_conf=$(apache_site_config_for ceilometer) local apache_version=$(get_apache_version) local venv_path="" if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["ceilometer"]}/lib/$(python_version)/site-packages" fi sudo cp $CEILOMETER_DIR/devstack/apache-ceilometer.template $ceilometer_apache_conf sudo sed -e " s|%PORT%|$CEILOMETER_SERVICE_PORT|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%WSGIAPP%|$CEILOMETER_WSGI_DIR/app|g; s|%USER%|$STACK_USER|g; s|%VIRTUALENV%|$venv_path|g " -i $ceilometer_apache_conf } # Install required services for coordination function _ceilometer_prepare_coordination { if echo $CEILOMETER_COORDINATION_URL | grep -q '^memcached:'; then install_package memcached elif [[ "${CEILOMETER_COORDINATOR_URL%%:*}" == "redis" || "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" || "${CEILOMETER_BACKEND}" == "gnocchi" ]]; then _ceilometer_install_redis fi } # Install the python modules for inspecting nova virt instances function _ceilometer_prepare_virt_drivers { # Only install virt drivers if we're running nova compute if is_service_enabled n-cpu ; then if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then pip_install_gr libvirt-python fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then pip_install_gr oslo.vmware fi fi } # Create ceilometer related accounts in Keystone function ceilometer_create_accounts { # At this time, the /etc/openstack/clouds.yaml is available, # we could leverage that by setting OS_CLOUD OLD_OS_CLOUD=$OS_CLOUD export OS_CLOUD='devstack-admin' create_service_user "ceilometer" "admin" if is_service_enabled swift; then # Ceilometer needs ResellerAdmin role to access Swift account stats. get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_PROJECT_NAME fi if ! [[ $DEVSTACK_PLUGINS =~ 'gnocchi' ]] && [ "$CEILOMETER_BACKEND" == "gnocchi" ]; then create_service_user "gnocchi" local gnocchi_service=$(get_or_create_service "gnocchi" \ "metric" "OpenStack Metric Service") get_or_create_endpoint $gnocchi_service \ "$REGION_NAME" \ "$(gnocchi_service_url)" \ "$(gnocchi_service_url)" \ "$(gnocchi_service_url)" fi export OS_CLOUD=$OLD_OS_CLOUD } function install_gnocchi { echo_summary "Installing Gnocchi" if [ $GNOCCHI_GIT_PATH ]; then pip_install -e $GNOCCHI_GIT_PATH[redis,${DATABASE_TYPE},keystone] uwsgi else pip_install gnocchi[redis,${DATABASE_TYPE},keystone] uwsgi fi } function configure_gnocchi { echo_summary "Configure Gnocchi" recreate_database gnocchi sudo install -d -o $STACK_USER -m 755 $GNOCCHI_CONF_DIR iniset $GNOCCHI_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" iniset $GNOCCHI_CONF indexer url `database_connection_url gnocchi` iniset $GNOCCHI_CONF storage driver redis iniset $GNOCCHI_CONF storage redis_url redis://localhost:6379 iniset $GNOCCHI_CONF metricd metric_processing_delay "$GNOCCHI_METRICD_PROCESSING_DELAY" iniset $GNOCCHI_CONF api auth_mode keystone configure_auth_token_middleware $GNOCCHI_CONF gnocchi $GNOCCHI_AUTH_CACHE_DIR sudo mkdir -p $GNOCCHI_AUTH_CACHE_DIR sudo chown $STACK_USER $GNOCCHI_AUTH_CACHE_DIR rm -f $GNOCCHI_AUTH_CACHE_DIR/* gnocchi-upgrade rm -f "$GNOCCHI_UWSGI_FILE" write_uwsgi_config "$GNOCCHI_UWSGI_FILE" "$CEILOMETER_BIN_DIR/gnocchi-api" "/metric" if [ -n "$GNOCCHI_COORDINATOR_URL" ]; then iniset $GNOCCHI_CONF storage coordination_url "$GNOCCHI_COORDINATOR_URL" fi } # Activities to do before ceilometer has been installed. function preinstall_ceilometer { echo_summary "Preinstall not in virtualenv context. Skipping." } # cleanup_ceilometer() - Remove residual data files, anything left over # from previous runs that a clean run would need to clean up function cleanup_ceilometer { sudo rm -f "$CEILOMETER_CONF_DIR"/* sudo rmdir "$CEILOMETER_CONF_DIR" } # Set configuraiton for cache backend. # NOTE(cdent): This currently only works for redis. Still working # out how to express the other backends. function _ceilometer_configure_cache_backend { iniset $CEILOMETER_CONF cache enabled True iniset $CEILOMETER_CONF cache backend $CEILOMETER_CACHE_BACKEND inidelete $CEILOMETER_CONF cache backend_argument iniadd $CEILOMETER_CONF cache backend_argument url:$CEILOMETER_CACHE_URL iniadd $CEILOMETER_CONF cache backend_argument distributed_lock:True if [[ "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then iniadd $CEILOMETER_CONF cache backend_argument db:0 iniadd $CEILOMETER_CONF cache backend_argument redis_expiration_time:600 fi } # Set configuration for storage backend. function _ceilometer_configure_storage_backend { if [ "$CEILOMETER_BACKEND" = 'none' ] ; then # It's ok for the backend to be 'none', if panko is enabled. We do not # combine this condition with the outer if statement, so that the else # clause below is not executed. if ! is_service_enabled panko-api; then echo_summary "All Ceilometer backends seems disabled, set \$CEILOMETER_BACKEND to select one." fi elif [ "$CEILOMETER_BACKEND" = 'gnocchi' ] ; then sed -i "s/gnocchi:\/\//gnocchi:\/\/?archive_policy=${GNOCCHI_ARCHIVE_POLICY}\&filter_project=gnocchi_swift/" $CEILOMETER_CONF_DIR/event_pipeline.yaml $CEILOMETER_CONF_DIR/pipeline.yaml ! [[ $DEVSTACK_PLUGINS =~ 'gnocchi' ]] && configure_gnocchi else die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND" fi # configure panko if is_service_enabled panko-api; then if ! grep -q 'panko' $CEILOMETER_CONF_DIR/event_pipeline.yaml ; then echo ' - panko://' >> $CEILOMETER_CONF_DIR/event_pipeline.yaml fi fi } # Configure Ceilometer function configure_ceilometer { local conffile iniset_rpc_backend ceilometer $CEILOMETER_CONF iniset $CEILOMETER_CONF oslo_messaging_notifications topics "$CEILOMETER_NOTIFICATION_TOPICS" iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" if [[ -n "$CEILOMETER_COORDINATION_URL" ]]; then iniset $CEILOMETER_CONF coordination backend_url $CEILOMETER_COORDINATION_URL iniset $CEILOMETER_CONF notification workload_partitioning True iniset $CEILOMETER_CONF notification workers $API_WORKERS fi if [[ -n "$CEILOMETER_CACHE_BACKEND" ]]; then _ceilometer_configure_cache_backend fi # Install the policy file and declarative configuration files to # the conf dir. # NOTE(cdent): Do not make this a glob as it will conflict # with rootwrap installation done elsewhere and also clobber # ceilometer.conf settings that have already been made. # Anyway, explicit is better than implicit. cp $CEILOMETER_DIR/etc/ceilometer/polling_all.yaml $CEILOMETER_CONF_DIR/polling.yaml cp $CEILOMETER_DIR/ceilometer/pipeline/data/*.yaml $CEILOMETER_CONF_DIR if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/polling.yaml fi if [ "$CEILOMETER_EVENT_ALARM" == "True" ]; then if ! grep -q '^ *- notifier://?topic=alarm.all$' $CEILOMETER_CONF_DIR/event_pipeline.yaml; then sed -i '/^ *publishers:$/,+1s|^\( *\)-.*$|\1- notifier://?topic=alarm.all\n&|' $CEILOMETER_CONF_DIR/event_pipeline.yaml fi fi # The compute and central agents need these credentials in order to # call out to other services' public APIs. iniset $CEILOMETER_CONF service_credentials auth_type password iniset $CEILOMETER_CONF service_credentials user_domain_id default iniset $CEILOMETER_CONF service_credentials project_domain_id default iniset $CEILOMETER_CONF service_credentials project_name $SERVICE_PROJECT_NAME iniset $CEILOMETER_CONF service_credentials username ceilometer iniset $CEILOMETER_CONF service_credentials password $SERVICE_PASSWORD iniset $CEILOMETER_CONF service_credentials region_name $REGION_NAME iniset $CEILOMETER_CONF service_credentials auth_url $KEYSTONE_SERVICE_URI configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP" iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER" iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD" fi _ceilometer_configure_storage_backend if is_service_enabled ceilometer-aipmi; then # Configure rootwrap for the ipmi agent configure_rootwrap ceilometer fi } # init_ceilometer() - Initialize etc. function init_ceilometer { # Create cache dir sudo install -d -o $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* } # Install Ceilometer. # The storage and coordination backends are installed here because the # virtualenv context is active at this point and python drivers need to be # installed. The context is not active during preinstall (when it would # otherwise makes sense to do the backend services). function install_ceilometer { if is_service_enabled ceilometer-acentral ceilometer-acompute ceilometer-anotification gnocchi-api gnocchi-metricd; then _ceilometer_prepare_coordination fi ! [[ $DEVSTACK_PLUGINS =~ 'gnocchi' ]] && [ "$CEILOMETER_BACKEND" = 'gnocchi' ] && install_gnocchi if is_service_enabled ceilometer-acompute ; then _ceilometer_prepare_virt_drivers fi case $CEILOMETER_BACKEND in gnocchi) extra=gnocchi;; esac setup_develop $CEILOMETER_DIR $extra sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR } # start_ceilometer() - Start running processes, including screen function start_ceilometer { if ! [[ $DEVSTACK_PLUGINS =~ 'gnocchi' ]] && [ "$CEILOMETER_BACKEND" = "gnocchi" ] ; then run_process gnocchi-api "$CEILOMETER_BIN_DIR/uwsgi --ini $GNOCCHI_UWSGI_FILE" "" run_process gnocchi-metricd "$CEILOMETER_BIN_DIR/gnocchi-metricd --config-file $GNOCCHI_CONF" wait_for_service 30 "$(gnocchi_service_url)" $CEILOMETER_BIN_DIR/ceilometer-upgrade fi run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF" run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces ipmi --config-file $CEILOMETER_CONF" # run the notification agent after restarting apache as it needs # operational keystone if using gnocchi run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF" # Start the compute agent late to allow time for the notification agent to # fully wake up and connect to the message bus. See bug #1355809 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" fi } # stop_ceilometer() - Stop running processes function stop_ceilometer { # Kill the ceilometer screen windows for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification; do stop_process $serv done } # This is the main for plugin.sh if is_service_enabled ceilometer; then if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then # Set up other services echo_summary "Configuring system services for Ceilometer" preinstall_ceilometer elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Ceilometer" # Use stack_install_service here to account for virtualenv stack_install_service ceilometer elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Ceilometer" configure_ceilometer # Get ceilometer keystone settings in place ceilometer_create_accounts elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Ceilometer" # Tidy base for ceilometer init_ceilometer # Start the services start_ceilometer elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then iniset $TEMPEST_CONFIG telemetry alarm_granularity $CEILOMETER_ALARM_GRANULARITY fi if [[ "$1" == "unstack" ]]; then echo_summary "Shutting Down Ceilometer" stop_ceilometer fi if [[ "$1" == "clean" ]]; then echo_summary "Cleaning Ceilometer" cleanup_ceilometer fi fi # Restore xtrace $XTRACE ceilometer-10.0.0/devstack/settings0000666000175100017510000000620513236733243017346 0ustar zuulzuul00000000000000# turn on all the ceilometer services by default (except for ipmi pollster) # Pollsters enable_service ceilometer-acompute ceilometer-acentral # Notification Agent enable_service ceilometer-anotification # Default directories CEILOMETER_DIR=$DEST/ceilometer CEILOMETER_CONF_DIR=/etc/ceilometer CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer} CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-gnocchi} if [ "$CEILOMETER_BACKEND" = "gnocchi" ]; then enable_service gnocchi-api gnocchi-metricd fi # Gnocchi default archive_policy for Ceilometer # TODO(sileht): when Gnocchi 4.0 is out use the tarball instead GNOCCHI_GIT_PATH=${GNOCCHI_GIT_PATH:-git+https://github.com/gnocchixyz/gnocchi#egg=gnocchi} GNOCCHI_ARCHIVE_POLICY=${GNOCCHI_ARCHIVE_POLICY:-low} GNOCCHI_CONF_DIR=${GNOCCHI_CONF_DIR:-/etc/gnocchi} GNOCCHI_CONF=${GNOCCHI_CONF:-${GNOCCHI_CONF_DIR}/gnocchi.conf} GNOCCHI_COORDINATOR_URL=${CEILOMETER_COORDINATOR_URL:-redis://localhost:6379} GNOCCHI_METRICD_PROCESSING_DELAY=${GNOCCHI_METRICD_PROCESSING_DELAY:-5} GNOCCHI_AUTH_CACHE_DIR=${GNOCCHI_AUTH_CACHE_DIR:-/var/cache/gnocchi} GNOCCHI_UWSGI_FILE=${GNOCCHI_UWSGI_FILE:-${GNOCCHI_CONF_DIR}/uwsgi.ini} GNOCCHI_SERVICE_PROTOCOL=http GNOCCHI_SERVICE_HOST=${GNOCCHI_SERVICE_HOST:-${SERVICE_HOST}} GNOCCHI_WSGI_DIR=${GNOCCHI_WSGI_DIR:-/var/www/gnocchi} # FIXME(sileht): put 300 by default to match the archive policy # when the gate job have overrided this. CEILOMETER_ALARM_GRANULARITY=${CEILOMETER_ALARM_GRANULARITY:-60} # To enable OSprofiler change value of this variable to "notifications,profiler" CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications} CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-redis://localhost:6379} CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-} # Cache Options # NOTE(cdent): These are incomplete and specific for this testing. CEILOMETER_CACHE_BACKEND=${CEILOMETER_CACHE_BACKEND:-dogpile.cache.redis} CEILOMETER_CACHE_URL=${CEILOMETER_CACHE_URL:-redis://localhost:6379} CEILOMETER_EVENT_ALARM=${CEILOMETER_EVENT_ALARM:-False} # Set up default directories for client and middleware GITREPO["python-ceilometerclient"]=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git} GITBRANCH["python-ceilometerclient"]=${CEILOMETERCLIENT_BRANCH:-master} GITDIR["python-ceilometerclient"]=$DEST/python-ceilometerclient GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware # Make sure panko plugin is enabled before ceilometer function _ceilometer_check_for_storage { local plugins="${DEVSTACK_PLUGINS}" local plugin local seen_ceilometer=False for plugin in ${plugins//,/ }; do if [ "$plugin" == 'ceilometer' ]; then seen_ceilometer=True elif [ "$plugin" == 'panko' ] && [[ "$seen_ceilometer" == 'True' ]]; then die $LINENO "Panko must be enabled before ceilometer!" fi done } _ceilometer_check_for_storage # Get rid of this before done. # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: ceilometer-10.0.0/devstack/files/0000775000175100017510000000000013236733440016657 5ustar zuulzuul00000000000000ceilometer-10.0.0/devstack/files/rpms/0000775000175100017510000000000013236733440017640 5ustar zuulzuul00000000000000ceilometer-10.0.0/devstack/files/rpms/ceilometer0000666000175100017510000000003013236733243021707 0ustar zuulzuul00000000000000selinux-policy-targeted ceilometer-10.0.0/devstack/README.rst0000666000175100017510000000154513236733243017254 0ustar zuulzuul00000000000000=============================== Enabling Ceilometer in DevStack =============================== 1. Download Devstack:: git clone https://git.openstack.org/openstack-dev/devstack cd devstack 2. Add this repo as an external repository in ``local.conf`` file:: [[local|localrc]] enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer To use stable branches, make sure devstack is on that branch, and specify the branch name to enable_plugin, for example:: enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer stable/mitaka There are some options, such as CEILOMETER_BACKEND, defined in ``ceilometer/devstack/settings``, they can be used to configure the installation of Ceilometer. If you don't want to use their default value, you can set a new one in ``local.conf``. 3. Run ``stack.sh``. ceilometer-10.0.0/ceilometer.egg-info/0000775000175100017510000000000013236733440017573 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer.egg-info/entry_points.txt0000664000175100017510000003644713236733435023113 0ustar zuulzuul00000000000000[ceilometer.builder.poll.central] hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster [ceilometer.compute.virt] hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector xenapi = ceilometer.compute.virt.xenapi.inspector:XenapiInspector [ceilometer.discover.central] endpoint = ceilometer.polling.discovery.endpoint:EndpointDiscovery fip_services = ceilometer.network.services.discovery:FloatingIPDiscovery fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery fw_services = ceilometer.network.services.discovery:FirewallDiscovery images = ceilometer.image.discovery:ImagesDiscovery ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery lb_health_probes = ceilometer.network.services.discovery:LBHealthMonitorsDiscovery lb_listeners = ceilometer.network.services.discovery:LBListenersDiscovery lb_loadbalancers = ceilometer.network.services.discovery:LBLoadBalancersDiscovery lb_members = ceilometer.network.services.discovery:LBMembersDiscovery lb_pools = ceilometer.network.services.discovery:LBPoolsDiscovery lb_vips = ceilometer.network.services.discovery:LBVipsDiscovery tenant = ceilometer.polling.discovery.tenant:TenantDiscovery tripleo_overcloud_nodes = ceilometer.hardware.discovery:NodesDiscoveryTripleO volume_backups = ceilometer.volume.discovery:VolumeBackupsDiscovery volume_snapshots = ceilometer.volume.discovery:VolumeSnapshotsDiscovery volumes = ceilometer.volume.discovery:VolumeDiscovery vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery [ceilometer.discover.compute] local_instances = ceilometer.compute.discovery:InstanceDiscovery [ceilometer.discover.ipmi] local_node = ceilometer.polling.discovery.localnode:LocalNodeDiscovery [ceilometer.event.publisher] gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher http = ceilometer.publisher.http:HttpPublisher https = ceilometer.publisher.http:HttpPublisher notifier = ceilometer.publisher.messaging:EventNotifierPublisher test = ceilometer.publisher.test:TestPublisher zaqar = ceilometer.publisher.zaqar:ZaqarPublisher [ceilometer.event.trait_plugin] bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin split = ceilometer.event.trait_plugins:SplitterTraitPlugin timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin [ceilometer.hardware.inspectors] snmp = ceilometer.hardware.inspector.snmp:SNMPInspector [ceilometer.notification.pipeline] event = ceilometer.pipeline.event:EventPipelineManager meter = ceilometer.pipeline.sample:SamplePipelineManager [ceilometer.poll.central] image.size = ceilometer.image.glance:ImageSizePollster ip.floating = ceilometer.network.floatingip:FloatingIPPollster network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster network.services.lb.active.connections = ceilometer.network.services.lbaas:LBActiveConnectionsPollster network.services.lb.health_monitor = ceilometer.network.services.lbaas:LBHealthMonitorPollster network.services.lb.incoming.bytes = ceilometer.network.services.lbaas:LBBytesInPollster network.services.lb.listener = ceilometer.network.services.lbaas:LBListenerPollster network.services.lb.loadbalancer = ceilometer.network.services.lbaas:LBLoadBalancerPollster network.services.lb.member = ceilometer.network.services.lbaas:LBMemberPollster network.services.lb.outgoing.bytes = ceilometer.network.services.lbaas:LBBytesOutPollster network.services.lb.pool = ceilometer.network.services.lbaas:LBPoolPollster network.services.lb.total.connections = ceilometer.network.services.lbaas:LBTotalConnectionsPollster network.services.lb.vip = ceilometer.network.services.lbaas:LBVipPollster network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster port = ceilometer.network.statistics.port_v2:PortPollster port.receive.bytes = ceilometer.network.statistics.port_v2:PortPollsterReceiveBytes port.receive.drops = ceilometer.network.statistics.port_v2:PortPollsterReceiveDrops port.receive.errors = ceilometer.network.statistics.port_v2:PortPollsterReceiveErrors port.receive.packets = ceilometer.network.statistics.port_v2:PortPollsterReceivePackets port.transmit.bytes = ceilometer.network.statistics.port_v2:PortPollsterTransmitBytes port.transmit.packets = ceilometer.network.statistics.port_v2:PortPollsterTransmitPackets port.uptime = ceilometer.network.statistics.port_v2:PortPollsterUptime radosgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster radosgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster radosgw.objects = ceilometer.objectstore.rgw:ObjectsPollster radosgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster radosgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster radosgw.usage = ceilometer.objectstore.rgw:UsagePollster rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster rgw.usage = ceilometer.objectstore.rgw:UsagePollster storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster storage.objects = ceilometer.objectstore.swift:ObjectsPollster storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster switch = ceilometer.network.statistics.switch:SWPollster switch.flow = ceilometer.network.statistics.flow:FlowPollster switch.flow.bytes = ceilometer.network.statistics.flow:FlowPollsterBytes switch.flow.duration.nanoseconds = ceilometer.network.statistics.flow:FlowPollsterDurationNanoseconds switch.flow.duration.seconds = ceilometer.network.statistics.flow:FlowPollsterDurationSeconds switch.flow.packets = ceilometer.network.statistics.flow:FlowPollsterPackets switch.port = ceilometer.network.statistics.port:PortPollster switch.port.collision.count = ceilometer.network.statistics.port:PortPollsterCollisionCount switch.port.receive.bytes = ceilometer.network.statistics.port:PortPollsterReceiveBytes switch.port.receive.crc_error = ceilometer.network.statistics.port:PortPollsterReceiveCRCErrors switch.port.receive.drops = ceilometer.network.statistics.port:PortPollsterReceiveDrops switch.port.receive.errors = ceilometer.network.statistics.port:PortPollsterReceiveErrors switch.port.receive.frame_error = ceilometer.network.statistics.port:PortPollsterReceiveFrameErrors switch.port.receive.overrun_error = ceilometer.network.statistics.port:PortPollsterReceiveOverrunErrors switch.port.receive.packets = ceilometer.network.statistics.port:PortPollsterReceivePackets switch.port.transmit.bytes = ceilometer.network.statistics.port:PortPollsterTransmitBytes switch.port.transmit.drops = ceilometer.network.statistics.port:PortPollsterTransmitDrops switch.port.transmit.errors = ceilometer.network.statistics.port:PortPollsterTransmitErrors switch.port.transmit.packets = ceilometer.network.statistics.port:PortPollsterTransmitPackets switch.port.uptime = ceilometer.network.statistics.port:PortPollsterUptime switch.ports = ceilometer.network.statistics.switch:SwitchPollsterPorts switch.table = ceilometer.network.statistics.table:TablePollster switch.table.active.entries = ceilometer.network.statistics.table:TablePollsterActiveEntries switch.table.lookup.packets = ceilometer.network.statistics.table:TablePollsterLookupPackets switch.table.matched.packets = ceilometer.network.statistics.table:TablePollsterMatchedPackets volume.backup.size = ceilometer.volume.cinder:VolumeBackupSize volume.size = ceilometer.volume.cinder:VolumeSizePollster volume.snapshot.size = ceilometer.volume.cinder:VolumeSnapshotSize [ceilometer.poll.compute] cpu = ceilometer.compute.pollsters.instance_stats:CPUPollster cpu_l3_cache = ceilometer.compute.pollsters.instance_stats:CPUL3CachePollster cpu_util = ceilometer.compute.pollsters.instance_stats:CPUUtilPollster disk.allocation = ceilometer.compute.pollsters.disk:AllocationPollster disk.capacity = ceilometer.compute.pollsters.disk:CapacityPollster disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster disk.device.iops = ceilometer.compute.pollsters.disk:PerDeviceDiskIOPSPollster disk.device.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskLatencyPollster disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster disk.device.read.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceReadBytesRatePollster disk.device.read.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskReadLatencyPollster disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster disk.device.read.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsRatePollster disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster disk.device.write.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesRatePollster disk.device.write.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskWriteLatencyPollster disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster disk.device.write.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsRatePollster disk.iops = ceilometer.compute.pollsters.disk:DiskIOPSPollster disk.latency = ceilometer.compute.pollsters.disk:DiskLatencyPollster disk.read.bytes = ceilometer.compute.pollsters.disk:ReadBytesPollster disk.read.bytes.rate = ceilometer.compute.pollsters.disk:ReadBytesRatePollster disk.read.requests = ceilometer.compute.pollsters.disk:ReadRequestsPollster disk.read.requests.rate = ceilometer.compute.pollsters.disk:ReadRequestsRatePollster disk.usage = ceilometer.compute.pollsters.disk:PhysicalPollster disk.write.bytes = ceilometer.compute.pollsters.disk:WriteBytesPollster disk.write.bytes.rate = ceilometer.compute.pollsters.disk:WriteBytesRatePollster disk.write.requests = ceilometer.compute.pollsters.disk:WriteRequestsPollster disk.write.requests.rate = ceilometer.compute.pollsters.disk:WriteRequestsRatePollster memory.bandwidth.local = ceilometer.compute.pollsters.instance_stats:MemoryBandwidthLocalPollster memory.bandwidth.total = ceilometer.compute.pollsters.instance_stats:MemoryBandwidthTotalPollster memory.resident = ceilometer.compute.pollsters.instance_stats:MemoryResidentPollster memory.swap.in = ceilometer.compute.pollsters.instance_stats:MemorySwapInPollster memory.swap.out = ceilometer.compute.pollsters.instance_stats:MemorySwapOutPollster memory.usage = ceilometer.compute.pollsters.instance_stats:MemoryUsagePollster network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster network.incoming.packets.drop = ceilometer.compute.pollsters.net:IncomingDropPollster network.incoming.packets.error = ceilometer.compute.pollsters.net:IncomingErrorsPollster network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster network.outgoing.packets.drop = ceilometer.compute.pollsters.net:OutgoingDropPollster network.outgoing.packets.error = ceilometer.compute.pollsters.net:OutgoingErrorsPollster perf.cache.misses = ceilometer.compute.pollsters.instance_stats:PerfCacheMissesPollster perf.cache.references = ceilometer.compute.pollsters.instance_stats:PerfCacheReferencesPollster perf.cpu.cycles = ceilometer.compute.pollsters.instance_stats:PerfCPUCyclesPollster perf.instructions = ceilometer.compute.pollsters.instance_stats:PerfInstructionsPollster [ceilometer.poll.ipmi] hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster hardware.ipmi.node.airflow = ceilometer.ipmi.pollsters.node:AirflowPollster hardware.ipmi.node.cpu_util = ceilometer.ipmi.pollsters.node:CPUUtilPollster hardware.ipmi.node.cups = ceilometer.ipmi.pollsters.node:CUPSIndexPollster hardware.ipmi.node.io_util = ceilometer.ipmi.pollsters.node:IOUtilPollster hardware.ipmi.node.mem_util = ceilometer.ipmi.pollsters.node:MemUtilPollster hardware.ipmi.node.outlet_temperature = ceilometer.ipmi.pollsters.node:OutletTemperaturePollster hardware.ipmi.node.power = ceilometer.ipmi.pollsters.node:PowerPollster hardware.ipmi.node.temperature = ceilometer.ipmi.pollsters.node:InletTemperaturePollster hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster [ceilometer.sample.endpoint] _sample = ceilometer.telemetry.notifications:TelemetryIpc hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification http.request = ceilometer.middleware:HTTPRequest http.response = ceilometer.middleware:HTTPResponse meter = ceilometer.meter.notifications:ProcessMeterNotifications [ceilometer.sample.publisher] file = ceilometer.publisher.file:FilePublisher gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher http = ceilometer.publisher.http:HttpPublisher https = ceilometer.publisher.http:HttpPublisher notifier = ceilometer.publisher.messaging:SampleNotifierPublisher test = ceilometer.publisher.test:TestPublisher udp = ceilometer.publisher.udp:UDPPublisher zaqar = ceilometer.publisher.zaqar:ZaqarPublisher [ceilometer.transformer] accumulator = ceilometer.transformer.accumulator:TransformerAccumulator aggregator = ceilometer.transformer.conversions:AggregatorTransformer arithmetic = ceilometer.transformer.arithmetic:ArithmeticTransformer delta = ceilometer.transformer.conversions:DeltaTransformer rate_of_change = ceilometer.transformer.conversions:RateOfChangeTransformer unit_conversion = ceilometer.transformer.conversions:ScalingTransformer [console_scripts] ceilometer-agent-notification = ceilometer.cmd.agent_notification:main ceilometer-polling = ceilometer.cmd.polling:main ceilometer-rootwrap = oslo_rootwrap.cmd:main ceilometer-send-sample = ceilometer.cmd.sample:send_sample ceilometer-upgrade = ceilometer.cmd.storage:upgrade [network.statistics.drivers] opencontrail = ceilometer.network.statistics.opencontrail.driver:OpencontrailDriver opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver [oslo.config.opts] ceilometer = ceilometer.opts:list_opts ceilometer-auth = ceilometer.opts:list_keystoneauth_opts ceilometer-10.0.0/ceilometer.egg-info/PKG-INFO0000664000175100017510000000460513236733435020701 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: ceilometer Version: 10.0.0 Summary: OpenStack Telemetry Home-page: https://docs.openstack.org/ceilometer/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ========== Ceilometer ========== -------- Overview -------- Ceilometer is a data collection service that collects event and metering data by monitoring notifications sent from OpenStack services. It publishes collected data to various targets including data stores and message queues. Ceilometer is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. ------------- Documentation ------------- Release notes are available at https://releases.openstack.org/teams/telemetry.html Developer documentation is available at https://docs.openstack.org/ceilometer/latest/ Launchpad Projects ------------------ - Server: https://launchpad.net/ceilometer Code Repository --------------- - Server: https://github.com/openstack/ceilometer Bug Tracking ------------ - Bugs: https://bugs.launchpad.net/ceilometer IRC --- IRC Channel: #openstack-telemetry on `Freenode`_. Mailinglist ----------- Project use http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev as the mailinglist. Please use tag ``[Ceilometer]`` in the subject for new threads. .. _Freenode: https://freenode.net/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Topic :: System :: Monitoring ceilometer-10.0.0/ceilometer.egg-info/SOURCES.txt0000664000175100017510000005177713236733440021500 0ustar zuulzuul00000000000000.coveragerc .mailmap .testr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MAINTAINERS README.rst babel.cfg bindep.txt requirements.txt run-tests.sh setup.cfg setup.py test-requirements.txt tox.ini ceilometer/__init__.py ceilometer/agent.py ceilometer/declarative.py ceilometer/gnocchi_client.py ceilometer/i18n.py ceilometer/keystone_client.py ceilometer/messaging.py ceilometer/middleware.py ceilometer/neutron_client.py ceilometer/notification.py ceilometer/nova_client.py ceilometer/opts.py ceilometer/sample.py ceilometer/service.py ceilometer/utils.py ceilometer/version.py ceilometer.egg-info/PKG-INFO ceilometer.egg-info/SOURCES.txt ceilometer.egg-info/dependency_links.txt ceilometer.egg-info/entry_points.txt ceilometer.egg-info/not-zip-safe ceilometer.egg-info/pbr.json ceilometer.egg-info/requires.txt ceilometer.egg-info/top_level.txt ceilometer/cmd/__init__.py ceilometer/cmd/agent_notification.py ceilometer/cmd/polling.py ceilometer/cmd/sample.py ceilometer/cmd/storage.py ceilometer/compute/__init__.py ceilometer/compute/discovery.py ceilometer/compute/pollsters/__init__.py ceilometer/compute/pollsters/disk.py ceilometer/compute/pollsters/instance_stats.py ceilometer/compute/pollsters/net.py ceilometer/compute/pollsters/util.py ceilometer/compute/virt/__init__.py ceilometer/compute/virt/inspector.py ceilometer/compute/virt/hyperv/__init__.py ceilometer/compute/virt/hyperv/inspector.py ceilometer/compute/virt/libvirt/__init__.py ceilometer/compute/virt/libvirt/inspector.py ceilometer/compute/virt/libvirt/utils.py ceilometer/compute/virt/vmware/__init__.py ceilometer/compute/virt/vmware/inspector.py ceilometer/compute/virt/vmware/vsphere_operations.py ceilometer/compute/virt/xenapi/__init__.py ceilometer/compute/virt/xenapi/inspector.py ceilometer/data/meters.d/meters.yaml ceilometer/event/__init__.py ceilometer/event/converter.py ceilometer/event/models.py ceilometer/event/trait_plugins.py ceilometer/hacking/__init__.py ceilometer/hacking/checks.py ceilometer/hardware/__init__.py ceilometer/hardware/discovery.py ceilometer/hardware/inspector/__init__.py ceilometer/hardware/inspector/base.py ceilometer/hardware/inspector/snmp.py ceilometer/hardware/pollsters/__init__.py ceilometer/hardware/pollsters/generic.py ceilometer/hardware/pollsters/util.py ceilometer/hardware/pollsters/data/snmp.yaml ceilometer/image/__init__.py ceilometer/image/discovery.py ceilometer/image/glance.py ceilometer/ipmi/__init__.py ceilometer/ipmi/notifications/__init__.py ceilometer/ipmi/notifications/ironic.py ceilometer/ipmi/platform/__init__.py ceilometer/ipmi/platform/exception.py ceilometer/ipmi/platform/intel_node_manager.py ceilometer/ipmi/platform/ipmi_sensor.py ceilometer/ipmi/platform/ipmitool.py ceilometer/ipmi/pollsters/__init__.py ceilometer/ipmi/pollsters/node.py ceilometer/ipmi/pollsters/sensor.py ceilometer/locale/de/LC_MESSAGES/ceilometer.po ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po ceilometer/locale/es/LC_MESSAGES/ceilometer.po ceilometer/locale/fr/LC_MESSAGES/ceilometer.po ceilometer/locale/it/LC_MESSAGES/ceilometer.po ceilometer/locale/ja/LC_MESSAGES/ceilometer.po ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po ceilometer/locale/ru/LC_MESSAGES/ceilometer.po ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po ceilometer/meter/__init__.py ceilometer/meter/notifications.py ceilometer/network/__init__.py ceilometer/network/floatingip.py ceilometer/network/services/__init__.py ceilometer/network/services/base.py ceilometer/network/services/discovery.py ceilometer/network/services/fwaas.py ceilometer/network/services/lbaas.py ceilometer/network/services/vpnaas.py ceilometer/network/statistics/__init__.py ceilometer/network/statistics/driver.py ceilometer/network/statistics/flow.py ceilometer/network/statistics/port.py ceilometer/network/statistics/port_v2.py ceilometer/network/statistics/switch.py ceilometer/network/statistics/table.py ceilometer/network/statistics/opencontrail/__init__.py ceilometer/network/statistics/opencontrail/client.py ceilometer/network/statistics/opencontrail/driver.py ceilometer/network/statistics/opendaylight/__init__.py ceilometer/network/statistics/opendaylight/client.py ceilometer/network/statistics/opendaylight/driver.py ceilometer/objectstore/__init__.py ceilometer/objectstore/rgw.py ceilometer/objectstore/rgw_client.py ceilometer/objectstore/swift.py ceilometer/pipeline/__init__.py ceilometer/pipeline/base.py ceilometer/pipeline/event.py ceilometer/pipeline/sample.py ceilometer/pipeline/data/event_definitions.yaml ceilometer/pipeline/data/event_pipeline.yaml ceilometer/pipeline/data/pipeline.yaml ceilometer/polling/__init__.py ceilometer/polling/manager.py ceilometer/polling/plugin_base.py ceilometer/polling/discovery/__init__.py ceilometer/polling/discovery/endpoint.py ceilometer/polling/discovery/localnode.py ceilometer/polling/discovery/tenant.py ceilometer/publisher/__init__.py ceilometer/publisher/file.py ceilometer/publisher/gnocchi.py ceilometer/publisher/http.py ceilometer/publisher/messaging.py ceilometer/publisher/test.py ceilometer/publisher/udp.py ceilometer/publisher/utils.py ceilometer/publisher/zaqar.py ceilometer/publisher/data/gnocchi_resources.yaml ceilometer/telemetry/__init__.py ceilometer/telemetry/notifications.py ceilometer/tests/__init__.py ceilometer/tests/base.py ceilometer/tests/unit/__init__.py ceilometer/tests/unit/pipeline_base.py ceilometer/tests/unit/test_bin.py ceilometer/tests/unit/test_declarative.py ceilometer/tests/unit/test_decoupled_pipeline.py ceilometer/tests/unit/test_event_pipeline.py ceilometer/tests/unit/test_messaging.py ceilometer/tests/unit/test_middleware.py ceilometer/tests/unit/test_neutronclient.py ceilometer/tests/unit/test_neutronclient_lbaas_v2.py ceilometer/tests/unit/test_notification.py ceilometer/tests/unit/test_novaclient.py ceilometer/tests/unit/test_polling.py ceilometer/tests/unit/test_sample.py ceilometer/tests/unit/compute/__init__.py ceilometer/tests/unit/compute/test_discovery.py ceilometer/tests/unit/compute/pollsters/__init__.py ceilometer/tests/unit/compute/pollsters/base.py ceilometer/tests/unit/compute/pollsters/test_cpu.py ceilometer/tests/unit/compute/pollsters/test_diskio.py ceilometer/tests/unit/compute/pollsters/test_location_metadata.py ceilometer/tests/unit/compute/pollsters/test_memory.py ceilometer/tests/unit/compute/pollsters/test_net.py ceilometer/tests/unit/compute/pollsters/test_perf.py ceilometer/tests/unit/compute/virt/__init__.py ceilometer/tests/unit/compute/virt/hyperv/__init__.py ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py ceilometer/tests/unit/compute/virt/libvirt/__init__.py ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py ceilometer/tests/unit/compute/virt/vmware/__init__.py ceilometer/tests/unit/compute/virt/vmware/test_inspector.py ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py ceilometer/tests/unit/compute/virt/xenapi/__init__.py ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py ceilometer/tests/unit/event/__init__.py ceilometer/tests/unit/event/test_converter.py ceilometer/tests/unit/event/test_endpoint.py ceilometer/tests/unit/event/test_trait_plugins.py ceilometer/tests/unit/hardware/__init__.py ceilometer/tests/unit/hardware/inspector/__init__.py ceilometer/tests/unit/hardware/inspector/test_inspector.py ceilometer/tests/unit/hardware/inspector/test_snmp.py ceilometer/tests/unit/hardware/pollsters/__init__.py ceilometer/tests/unit/hardware/pollsters/test_generic.py ceilometer/tests/unit/hardware/pollsters/test_util.py ceilometer/tests/unit/image/__init__.py ceilometer/tests/unit/image/test_glance.py ceilometer/tests/unit/ipmi/__init__.py ceilometer/tests/unit/ipmi/notifications/__init__.py ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py ceilometer/tests/unit/ipmi/notifications/test_ironic.py ceilometer/tests/unit/ipmi/platform/__init__.py ceilometer/tests/unit/ipmi/platform/fake_utils.py ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py ceilometer/tests/unit/ipmi/pollsters/__init__.py ceilometer/tests/unit/ipmi/pollsters/base.py ceilometer/tests/unit/ipmi/pollsters/test_node.py ceilometer/tests/unit/ipmi/pollsters/test_sensor.py ceilometer/tests/unit/meter/__init__.py ceilometer/tests/unit/meter/test_meter_plugins.py ceilometer/tests/unit/meter/test_notifications.py ceilometer/tests/unit/network/__init__.py ceilometer/tests/unit/network/test_floating_ip.py ceilometer/tests/unit/network/services/__init__.py ceilometer/tests/unit/network/services/test_fwaas.py ceilometer/tests/unit/network/services/test_lbaas.py ceilometer/tests/unit/network/services/test_lbaas_v2.py ceilometer/tests/unit/network/services/test_vpnaas.py ceilometer/tests/unit/network/statistics/__init__.py ceilometer/tests/unit/network/statistics/test_flow.py ceilometer/tests/unit/network/statistics/test_port.py ceilometer/tests/unit/network/statistics/test_port_v2.py ceilometer/tests/unit/network/statistics/test_statistics.py ceilometer/tests/unit/network/statistics/test_switch.py ceilometer/tests/unit/network/statistics/test_table.py ceilometer/tests/unit/network/statistics/opencontrail/__init__.py ceilometer/tests/unit/network/statistics/opencontrail/test_client.py ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py ceilometer/tests/unit/network/statistics/opendaylight/__init__.py ceilometer/tests/unit/network/statistics/opendaylight/test_client.py ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py ceilometer/tests/unit/objectstore/__init__.py ceilometer/tests/unit/objectstore/test_rgw.py ceilometer/tests/unit/objectstore/test_rgw_client.py ceilometer/tests/unit/objectstore/test_swift.py ceilometer/tests/unit/polling/__init__.py ceilometer/tests/unit/polling/test_discovery.py ceilometer/tests/unit/polling/test_manager.py ceilometer/tests/unit/publisher/__init__.py ceilometer/tests/unit/publisher/test_file.py ceilometer/tests/unit/publisher/test_gnocchi.py ceilometer/tests/unit/publisher/test_http.py ceilometer/tests/unit/publisher/test_messaging_publisher.py ceilometer/tests/unit/publisher/test_udp.py ceilometer/tests/unit/publisher/test_utils.py ceilometer/tests/unit/publisher/test_zaqar.py ceilometer/tests/unit/transformer/__init__.py ceilometer/tests/unit/transformer/test_conversions.py ceilometer/tests/unit/volume/__init__.py ceilometer/tests/unit/volume/test_cinder.py ceilometer/transformer/__init__.py ceilometer/transformer/accumulator.py ceilometer/transformer/arithmetic.py ceilometer/transformer/conversions.py ceilometer/volume/__init__.py ceilometer/volume/cinder.py ceilometer/volume/discovery.py devstack/README.rst devstack/plugin.sh devstack/settings devstack/files/rpms/ceilometer devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh doc/source/conf.py doc/source/glossary.rst doc/source/index.rst doc/source/admin/index.rst doc/source/admin/telemetry-best-practices.rst doc/source/admin/telemetry-data-collection.rst doc/source/admin/telemetry-data-pipelines.rst doc/source/admin/telemetry-events.rst doc/source/admin/telemetry-measurements.rst doc/source/admin/telemetry-system-architecture.rst doc/source/admin/telemetry-troubleshooting-guide.rst doc/source/configuration/index.rst doc/source/contributor/1-agents.png doc/source/contributor/2-1-collection-notification.png doc/source/contributor/2-2-collection-poll.png doc/source/contributor/2-accessmodel.png doc/source/contributor/3-Pipeline.png doc/source/contributor/4-Transformer.png doc/source/contributor/5-multi-publish.png doc/source/contributor/6-storagemodel.png doc/source/contributor/architecture.rst doc/source/contributor/ceilo-arch.png doc/source/contributor/ceilo-gnocchi-arch.png doc/source/contributor/devstack.rst doc/source/contributor/events.rst doc/source/contributor/gmr.rst doc/source/contributor/index.rst doc/source/contributor/measurements.rst doc/source/contributor/new_resource_types.rst doc/source/contributor/overview.rst doc/source/contributor/plugins.rst doc/source/contributor/testing.rst doc/source/install/get_started.rst doc/source/install/index.rst doc/source/install/install-base-config-common.inc doc/source/install/install-base-obs.rst doc/source/install/install-base-prereq-common.inc doc/source/install/install-base-rdo.rst doc/source/install/install-base-ubuntu.rst doc/source/install/install-compute-common.inc doc/source/install/install-compute-obs.rst doc/source/install/install-compute-rdo.rst doc/source/install/install-compute-ubuntu.rst doc/source/install/install-compute.rst doc/source/install/install-controller.rst doc/source/install/install-gnocchi.inc doc/source/install/next-steps.rst doc/source/install/verify.rst doc/source/install/cinder/install-cinder-config-common.inc doc/source/install/cinder/install-cinder-obs.rst doc/source/install/cinder/install-cinder-rdo.rst doc/source/install/cinder/install-cinder-ubuntu.rst doc/source/install/glance/install-glance-obs.rst doc/source/install/glance/install-glance-rdo.rst doc/source/install/glance/install-glance-ubuntu.rst doc/source/install/heat/install-heat-obs.rst doc/source/install/heat/install-heat-rdo.rst doc/source/install/heat/install-heat-ubuntu.rst doc/source/install/neutron/install-neutron-obs.rst doc/source/install/neutron/install-neutron-rdo.rst doc/source/install/neutron/install-neutron-ubuntu.rst doc/source/install/swift/install-swift-config-common.inc doc/source/install/swift/install-swift-obs.rst doc/source/install/swift/install-swift-prereq-common.inc doc/source/install/swift/install-swift-rdo.rst doc/source/install/swift/install-swift-ubuntu.rst doc/source/releasenotes/folsom.rst doc/source/releasenotes/index.rst etc/ceilometer/ceilometer-config-generator.conf etc/ceilometer/polling.yaml etc/ceilometer/polling_all.yaml etc/ceilometer/rootwrap.conf etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml etc/ceilometer/examples/osprofiler_event_definitions.yaml etc/ceilometer/rootwrap.d/ipmi.filters playbooks/legacy/grenade-dsvm-ceilometer/post.yaml playbooks/legacy/grenade-dsvm-ceilometer/run.yaml playbooks/legacy/telemetry-dsvm-integration-ceilometer/post.yaml playbooks/legacy/telemetry-dsvm-integration-ceilometer/run.yaml releasenotes/notes/.placeholder releasenotes/notes/add-db-legacy-clean-tool-7b3e3714f414c448.yaml releasenotes/notes/add-full-snmpv3-usm-support-ab540c902fa89b9d.yaml releasenotes/notes/add-magnum-event-4c75ed0bb268d19c.yaml releasenotes/notes/add-memory-swap-metric-f1633962ab2cf0f6.yaml releasenotes/notes/add-tool-for-migrating-data-to-gnocchi-cea8d4db68ce03d0.yaml releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml releasenotes/notes/always-requeue-7a2df9243987ab67.yaml releasenotes/notes/batch-messaging-d126cc525879d58e.yaml releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml releasenotes/notes/ceilometer-api-deprecate-862bfaa54e80fa01.yaml releasenotes/notes/ceilometer-api-removal-6bd44d3eab05e593.yaml releasenotes/notes/ceilometer-event-api-removed-49c57835e307b997.yaml releasenotes/notes/cinder-capacity-samples-de94dcfed5540b6c.yaml releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml releasenotes/notes/deprecate-aggregated-disk-metrics-54a395c05e74d685.yaml releasenotes/notes/deprecate-ceilometer-collector-b793b91cd28b9e7f.yaml releasenotes/notes/deprecate-file-dispatcher-2aff376db7609136.yaml releasenotes/notes/deprecate-http-control-exchanges-026a8de6819841f8.yaml releasenotes/notes/deprecate-http-dispatcher-dbbaacee8182b550.yaml releasenotes/notes/deprecate-kafka-publisher-17b4f221758e15da.yaml releasenotes/notes/deprecate-pollster-list-ccf22b0dea44f043.yaml releasenotes/notes/deprecated_database_event_dispatcher_panko-607d558c86a90f17.yaml releasenotes/notes/drop-collector-4c207b35d67b2977.yaml releasenotes/notes/drop-image-meter-9c9b6cebd546dae7.yaml releasenotes/notes/drop-instance-meter-1b657717b21a0f55.yaml releasenotes/notes/drop-kwapi-b687bc476186d01b.yaml releasenotes/notes/event-type-race-c295baf7f1661eab.yaml releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml releasenotes/notes/fix-radosgw-name-6de6899ddcd7e06d.yaml releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml releasenotes/notes/http-dispatcher-batching-4e17fce46a196b07.yaml releasenotes/notes/http-dispatcher-verify-ssl-551d639f37849c6f.yaml releasenotes/notes/http-publisher-authentication-6371c5a9aa8d4c03.yaml releasenotes/notes/http_proxy_to_wsgi_enabled-616fa123809e1600.yaml releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml releasenotes/notes/instance-discovery-new-default-7f9b451a515dddf4.yaml releasenotes/notes/keystone-v3-fab1e257c5672965.yaml releasenotes/notes/kwapi_deprecated-c92b9e72c78365f0.yaml releasenotes/notes/less-nova-polling-ac56687da3f8b1a3.yaml releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml releasenotes/notes/manager-based-ipc-queues-85e3bf59ffdfb0ac.yaml releasenotes/notes/memory-bandwidth-meter-f86cf01178573671.yaml releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml releasenotes/notes/network-statistics-from-opendaylight-787df77484d8d751.yaml releasenotes/notes/parallel_requests_option-a3f901b6001e26e4.yaml releasenotes/notes/pecan-debug-removed-dc737efbf911bde7.yaml releasenotes/notes/perf-events-meter-b06c2a915c33bfaf.yaml releasenotes/notes/pipeline-fallback-polling-3d962a0fff49ccdd.yaml releasenotes/notes/polling-definition-efffb92e3810e571.yaml releasenotes/notes/polling-deprecation-4d5b83180893c053.yaml releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml releasenotes/notes/remove-ceilometer-dbsync-53aa1b529f194f15.yaml releasenotes/notes/remove-compute-workload-partitioning-option-26538bc1e80500e3.yaml releasenotes/notes/remove-direct-publisher-5785ee7edd16c4d9.yaml releasenotes/notes/remove-eventlet-6738321434b60c78.yaml releasenotes/notes/remove-exchange-control-options-75ecd49423639068.yaml releasenotes/notes/remove-file-dispatcher-56ba1066c20d314a.yaml releasenotes/notes/remove-gnocchi-dispatcher-dd588252976c2abb.yaml releasenotes/notes/remove-http-dispatcher-1afdce1d1dc3158d.yaml releasenotes/notes/remove-kafka-broker-publisher-7026b370cfc831db.yaml releasenotes/notes/remove-nova-http-log-option-64e97a511e58da5d.yaml releasenotes/notes/remove-pollster-list-bda30d747fb87c9e.yaml releasenotes/notes/remove-refresh-pipeline-618af089c5435db7.yaml releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml releasenotes/notes/remove-shuffle_time_before_polling_task-option-05a4d225236c64b1.yaml releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml releasenotes/notes/scan-domains-for-tenants-8f8c9edcb74cc173.yaml releasenotes/notes/selective-pipeline-notification-47e8a390b1c7dcc4.yaml releasenotes/notes/ship-yaml-files-33aa5852bedba7f0.yaml releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml releasenotes/notes/snmp-diskio-samples-fc4b5ed5f19c096c.yaml releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml releasenotes/notes/support-None-query-45abaae45f08eda4.yaml releasenotes/notes/support-cinder-volume-snapshot-backup-metering-d0a93b86bd53e803.yaml releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml releasenotes/notes/support-meter-batch-recording-mongo-6c2bdf4fbb9764eb.yaml releasenotes/notes/support-multiple-meter-definition-files-e3ce1fa73ef2e1de.yaml releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml releasenotes/notes/tooz-coordination-system-d1054b9d1a5ddf32.yaml releasenotes/notes/unify-timestamp-of-polled-data-fbfcff43cd2d04bc.yaml releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml releasenotes/notes/use-notification-transport-url-489f3d31dc66c4d2.yaml releasenotes/notes/zaqar-publisher-f7efa030b71731f4.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po tools/__init__.py tools/pretty_tox.sh tools/send_test_data.pyceilometer-10.0.0/ceilometer.egg-info/top_level.txt0000664000175100017510000000001313236733435022323 0ustar zuulzuul00000000000000ceilometer ceilometer-10.0.0/ceilometer.egg-info/pbr.json0000664000175100017510000000005613236733435021256 0ustar zuulzuul00000000000000{"git_version": "46aee0a", "is_release": true}ceilometer-10.0.0/ceilometer.egg-info/requires.txt0000664000175100017510000000140113236733435022173 0ustar zuulzuul00000000000000cachetools>=1.1.0 cotyledon>=1.3.0 futures>=3.0 futurist>=0.11.0 debtcollector>=1.2.0 jsonpath-rw-ext>=0.1.9 lxml>=2.3 monotonic msgpack-python>=0.4.0 oslo.concurrency>=3.5.0 oslo.config>=3.22.0 oslo.i18n>=2.1.0 oslo.log>=1.14.0 oslo.reports>=0.6.0 oslo.rootwrap>=2.0.0 pbr>=1.6 oslo.messaging>=5.12.0 oslo.utils>=3.5.0 pysnmp<5.0.0,>=4.2.3 python-glanceclient>=2.0.0 python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 keystoneauth1>=2.1.0 python-neutronclient>=4.2.0 python-novaclient!=2.33.0,>=2.29.0 python-swiftclient>=2.2.0 python-cinderclient!=1.7.0,!=1.7.1,>=1.6.0 PyYAML>=3.1.0 requests!=2.9.0,>=2.8.1 six>=1.9.0 stevedore>=1.9.0 tenacity>=3.2.1 tooz[zake]>=1.47.0 os-xenapi>=0.1.1 [gnocchi] oslo.cache>=1.5.0 gnocchiclient>=7.0.0 [zaqar] python-zaqarclient>=1.0.0 ceilometer-10.0.0/ceilometer.egg-info/not-zip-safe0000664000175100017510000000000113236733404022021 0ustar zuulzuul00000000000000 ceilometer-10.0.0/ceilometer.egg-info/dependency_links.txt0000664000175100017510000000000113236733435023645 0ustar zuulzuul00000000000000 ceilometer-10.0.0/.zuul.yaml0000666000175100017510000000317613236733243015724 0ustar zuulzuul00000000000000- job: name: grenade-dsvm-ceilometer parent: legacy-dsvm-base run: playbooks/legacy/grenade-dsvm-ceilometer/run.yaml post-run: playbooks/legacy/grenade-dsvm-ceilometer/post.yaml timeout: 10800 required-projects: - openstack-dev/grenade - openstack-infra/devstack-gate - openstack/ceilometer - project: check: jobs: - grenade-dsvm-ceilometer: irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ - telemetry-dsvm-integration gate: jobs: - grenade-dsvm-ceilometer: irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ - telemetry-dsvm-integration experimental: jobs: # TripleO jobs that deploy Telemetry. # Note we don't use a project-template here, so it's easier # to disable voting on one specific job if things go wrong. # tripleo-ci-centos-7-scenario00(1|2)-multinode-oooq will only # run on stable/pike while the -container will run in Queens # and beyond. # If you need any support to debug these jobs in case of # failures, please reach us on #tripleo IRC channel. # NOTE(sileht): These job takes 3 hours and fail most of the times. # While other jobs take 20-30 minutes # We can reenable it when they take less times. - tripleo-ci-centos-7-scenario001-multinode-oooq - tripleo-ci-centos-7-scenario001-multinode-oooq-container - tripleo-ci-centos-7-scenario002-multinode-oooq - tripleo-ci-centos-7-scenario002-multinode-oooq-container ceilometer-10.0.0/tox.ini0000666000175100017510000000460413236733243015273 0ustar zuulzuul00000000000000[tox] minversion = 1.8 skipsdist = True envlist = py{27,35},pep8 [testenv] deps = .[gnocchi,zaqar] -r{toxinidir}/test-requirements.txt # NOTE(tonyb): This project has chosen to *NOT* consume upper-constraints.txt install_command = pip install -U {opts} {packages} usedevelop = True setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=ceilometer/tests/unit CEILOMETER_TEST_BACKEND={env:CEILOMETER_TEST_BACKEND:none} CEILOMETER_TEST_DEBUG={env:CEILOMETER_TEST_DEBUG:} debug: CEILOMETER_TEST_DEBUG=True passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE CEILOMETER_* commands = bash -x {toxinidir}/run-tests.sh "{posargs}" oslo-config-generator --config-file=etc/ceilometer/ceilometer-config-generator.conf whitelist_externals = bash [testenv:integration] setenv = OS_TEST_PATH=./ceilometer/tests/integration OS_TEST_TIMEOUT=2400 GABBI_LIVE_FAIL_IF_NO_TEST=1 passenv = {[testenv]passenv} HEAT_* CEILOMETER_* GNOCCHI_* AODH_* PANKO_* GLANCE_* NOVA_* ADMIN_* # NOTE(sileht): run gabbi-run to failfast in case of error because testr # doesn't support --failfast, but we loose the testr report. commands = bash -c 'cd ceilometer/tests/integration/gabbi/gabbits-live && gabbi-run -x < autoscaling.yaml' [testenv:cover] setenv = OS_TEST_PATH=ceilometer/tests commands = python setup.py testr --slowest --coverage --testr-args="{posargs}" [testenv:pep8] deps = hacking<0.13,>=0.12 doc8 commands = flake8 doc8 {posargs} # Check that .po and .pot files are valid: bash -c "find ceilometer -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" [testenv:releasenotes] commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:genconfig] commands = oslo-config-generator --config-file=etc/ceilometer/ceilometer-config-generator.conf [testenv:docs] commands = python setup.py build_sphinx setenv = PYTHONHASHSEED=0 [testenv:venv] commands = {posargs} setenv = PYTHONHASHSEED=0 [doc8] ignore = D000 ignore-path = .venv,.git,.tox,*ceilometer/locale*,*lib/python*,ceilometer.egg*,doc/build,doc/source/api,releasenotes/* [flake8] ignore = exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,install-guide show-source = True [hacking] import_exceptions = ceilometer.i18n local-check-factory = ceilometer.hacking.checks.factory ceilometer-10.0.0/ceilometer/0000775000175100017510000000000013236733440016101 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/ipmi/0000775000175100017510000000000013236733440017037 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/ipmi/notifications/0000775000175100017510000000000013236733440021710 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/ipmi/notifications/ironic.py0000666000175100017510000001246213236733243023555 0ustar zuulzuul00000000000000# # Copyright 2014 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Converters for producing hardware sensor data sample messages from notification events. """ from oslo_log import log from ceilometer.pipeline import sample as endpoint from ceilometer import sample LOG = log.getLogger(__name__) # Map unit name to SI UNIT_MAP = { 'Watts': 'W', 'Volts': 'V', } def validate_reading(data): """Some sensors read "Disabled".""" return data != 'Disabled' def transform_id(data): return data.lower().replace(' ', '_') def parse_reading(data): try: volume, unit = data.split(' ', 1) unit = unit.rsplit(' ', 1)[-1] return float(volume), UNIT_MAP.get(unit, unit) except ValueError: raise InvalidSensorData('unable to parse sensor reading: %s' % data) class InvalidSensorData(ValueError): pass class SensorNotification(endpoint.SampleEndpoint): """A generic class for extracting samples from sensor data notifications. A notification message can contain multiple samples from multiple sensors, all with the same basic structure: the volume for the sample is found as part of the value of a 'Sensor Reading' key. The unit is in the same value. Subclasses exist solely to allow flexibility with stevedore configuration. """ event_types = ['hardware.ipmi.*'] metric = None def _get_sample(self, message): try: return (payload for _, payload in message['payload'][self.metric].items()) except KeyError: return [] @staticmethod def _package_payload(message, payload): # NOTE(chdent): How much of the payload should we keep? # FIXME(gordc): ironic adds timestamp and event_type in its payload # which we are using below. we should probably just use oslo.messaging # values instead? payload['node'] = message['payload']['node_uuid'] info = {'publisher_id': message['publisher_id'], 'timestamp': message['payload']['timestamp'], 'event_type': message['payload']['event_type'], 'user_id': message['payload'].get('user_id'), 'project_id': message['payload'].get('project_id'), 'payload': payload} return info def build_sample(self, message): """Read and process a notification. The guts of a message are in dict value of a 'payload' key which then itself has a payload key containing a dict of multiple sensor readings. If expected keys in the payload are missing or values are not in the expected form for transformations, KeyError and ValueError are caught and the current sensor payload is skipped. """ payloads = self._get_sample(message['payload']) for payload in payloads: try: # Provide a fallback resource_id in case parts are missing. resource_id = 'missing id' try: resource_id = '%(nodeid)s-%(sensorid)s' % { 'nodeid': message['payload']['node_uuid'], 'sensorid': transform_id(payload['Sensor ID']) } except KeyError as exc: raise InvalidSensorData('missing key in payload: %s' % exc) info = self._package_payload(message, payload) try: sensor_reading = info['payload']['Sensor Reading'] except KeyError as exc: raise InvalidSensorData( "missing 'Sensor Reading' in payload" ) if validate_reading(sensor_reading): volume, unit = parse_reading(sensor_reading) yield sample.Sample.from_notification( name='hardware.ipmi.%s' % self.metric.lower(), type=sample.TYPE_GAUGE, unit=unit, volume=volume, resource_id=resource_id, message=info, user_id=info['user_id'], project_id=info['project_id'], timestamp=info['timestamp']) except InvalidSensorData as exc: LOG.warning( 'invalid sensor data for %(resource)s: %(error)s' % dict(resource=resource_id, error=exc) ) continue class TemperatureSensorNotification(SensorNotification): metric = 'Temperature' class CurrentSensorNotification(SensorNotification): metric = 'Current' class FanSensorNotification(SensorNotification): metric = 'Fan' class VoltageSensorNotification(SensorNotification): metric = 'Voltage' ceilometer-10.0.0/ceilometer/ipmi/notifications/__init__.py0000666000175100017510000000000013236733243024012 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/ipmi/pollsters/0000775000175100017510000000000013236733440021066 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/ipmi/pollsters/sensor.py0000666000175100017510000000741513236733243022763 0ustar zuulzuul00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.i18n import _ from ceilometer.ipmi.notifications import ironic as parser from ceilometer.ipmi.platform import exception as ipmiexcept from ceilometer.ipmi.platform import ipmi_sensor from ceilometer.polling import plugin_base from ceilometer import sample LOG = log.getLogger(__name__) class InvalidSensorData(ValueError): pass class SensorPollster(plugin_base.PollsterBase): METRIC = None def setup_environment(self): super(SensorPollster, self).setup_environment() self.ipmi = ipmi_sensor.IPMISensor() self.polling_failures = 0 # Do not load this extension if no IPMI support if not self.ipmi.ipmi_support: raise plugin_base.ExtensionLoadError() @property def default_discovery(self): return 'local_node' @staticmethod def _get_sensor_types(data, sensor_type): try: return (sensor_type_data for _, sensor_type_data in data[sensor_type].items()) except KeyError: return [] def get_samples(self, manager, cache, resources): # Only one resource for IPMI pollster try: stats = self.ipmi.read_sensor_any(self.METRIC) except ipmiexcept.IPMIException: self.polling_failures += 1 LOG.warning(_( 'Polling %(mtr)s sensor failed for %(cnt)s times!') % ({'mtr': self.METRIC, 'cnt': self.polling_failures})) if 0 <= self.conf.ipmi.polling_retry < self.polling_failures: LOG.warning(_('Pollster for %s is disabled!') % self.METRIC) raise plugin_base.PollsterPermanentError(resources) else: return self.polling_failures = 0 sensor_type_data = self._get_sensor_types(stats, self.METRIC) for sensor_data in sensor_type_data: # Continue if sensor_data is not parseable. try: sensor_reading = sensor_data['Sensor Reading'] sensor_id = sensor_data['Sensor ID'] except KeyError: continue if not parser.validate_reading(sensor_reading): continue try: volume, unit = parser.parse_reading(sensor_reading) except parser.InvalidSensorData: continue resource_id = '%(host)s-%(sensor-id)s' % { 'host': self.conf.host, 'sensor-id': parser.transform_id(sensor_id) } metadata = { 'node': self.conf.host } yield sample.Sample( name='hardware.ipmi.%s' % self.METRIC.lower(), type=sample.TYPE_GAUGE, unit=unit, volume=volume, user_id=None, project_id=None, resource_id=resource_id, resource_metadata=metadata) class TemperatureSensorPollster(SensorPollster): METRIC = 'Temperature' class CurrentSensorPollster(SensorPollster): METRIC = 'Current' class FanSensorPollster(SensorPollster): METRIC = 'Fan' class VoltageSensorPollster(SensorPollster): METRIC = 'Voltage' ceilometer-10.0.0/ceilometer/ipmi/pollsters/node.py0000666000175100017510000001166713236733243022403 0ustar zuulzuul00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log import six from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as nmexcept from ceilometer.ipmi.platform import intel_node_manager as node_manager from ceilometer.polling import plugin_base from ceilometer import sample LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class _Base(plugin_base.PollsterBase): def setup_environment(self): super(_Base, self).setup_environment() self.nodemanager = node_manager.NodeManager(self.conf) self.polling_failures = 0 # Do not load this extension if no NM support if self.nodemanager.nm_version == 0: raise plugin_base.ExtensionLoadError() @property def default_discovery(self): return 'local_node' def get_value(self, stats): """Get value from statistics.""" return node_manager._hex(stats["Current_value"]) @abc.abstractmethod def read_data(self, cache): """Return data sample for IPMI.""" def get_samples(self, manager, cache, resources): # Only one resource for Node Manager pollster try: stats = self.read_data(cache) except nmexcept.IPMIException: self.polling_failures += 1 LOG.warning(_('Polling %(name)s failed for %(cnt)s times!') % ({'name': self.NAME, 'cnt': self.polling_failures})) if 0 <= self.conf.ipmi.polling_retry < self.polling_failures: LOG.warning(_('Pollster for %s is disabled!') % self.NAME) raise plugin_base.PollsterPermanentError(resources) else: return self.polling_failures = 0 metadata = { 'node': self.conf.host } if stats: data = self.get_value(stats) yield sample.Sample( name=self.NAME, type=self.TYPE, unit=self.UNIT, volume=data, user_id=None, project_id=None, resource_id=self.conf.host, resource_metadata=metadata) class InletTemperaturePollster(_Base): # Note(ildikov): The new meter name should be # "hardware.ipmi.node.inlet_temperature". As currently there # is no meter deprecation support in the code, we should use the # old name in order to avoid confusion. NAME = "hardware.ipmi.node.temperature" TYPE = sample.TYPE_GAUGE UNIT = "C" def read_data(self, cache): return self.nodemanager.read_inlet_temperature() class OutletTemperaturePollster(_Base): NAME = "hardware.ipmi.node.outlet_temperature" TYPE = sample.TYPE_GAUGE UNIT = "C" def read_data(self, cache): return self.nodemanager.read_outlet_temperature() class PowerPollster(_Base): NAME = "hardware.ipmi.node.power" TYPE = sample.TYPE_GAUGE UNIT = "W" def read_data(self, cache): return self.nodemanager.read_power_all() class AirflowPollster(_Base): NAME = "hardware.ipmi.node.airflow" TYPE = sample.TYPE_GAUGE UNIT = "CFM" def read_data(self, cache): return self.nodemanager.read_airflow() class CUPSIndexPollster(_Base): NAME = "hardware.ipmi.node.cups" TYPE = sample.TYPE_GAUGE UNIT = "CUPS" def read_data(self, cache): return self.nodemanager.read_cups_index() def get_value(self, stats): return node_manager._hex(stats["CUPS_Index"]) class _CUPSUtilPollsterBase(_Base): CACHE_KEY_CUPS = 'CUPS' def read_data(self, cache): i_cache = cache.setdefault(self.CACHE_KEY_CUPS, {}) if not i_cache: i_cache.update(self.nodemanager.read_cups_utilization()) return i_cache class CPUUtilPollster(_CUPSUtilPollsterBase): NAME = "hardware.ipmi.node.cpu_util" TYPE = sample.TYPE_GAUGE UNIT = "%" def get_value(self, stats): return node_manager._hex(stats["CPU_Utilization"]) class MemUtilPollster(_CUPSUtilPollsterBase): NAME = "hardware.ipmi.node.mem_util" TYPE = sample.TYPE_GAUGE UNIT = "%" def get_value(self, stats): return node_manager._hex(stats["Mem_Utilization"]) class IOUtilPollster(_CUPSUtilPollsterBase): NAME = "hardware.ipmi.node.io_util" TYPE = sample.TYPE_GAUGE UNIT = "%" def get_value(self, stats): return node_manager._hex(stats["IO_Utilization"]) ceilometer-10.0.0/ceilometer/ipmi/pollsters/__init__.py0000666000175100017510000000167413236733243023212 0ustar zuulzuul00000000000000# Copyright 2014 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Pollsters for IPMI and Intel Node Manager """ from oslo_config import cfg OPTS = [ cfg.IntOpt('polling_retry', default=3, help='Tolerance of IPMI/NM polling failures ' 'before disable this pollster. ' 'Negative indicates retrying forever.') ] ceilometer-10.0.0/ceilometer/ipmi/__init__.py0000666000175100017510000000000013236733243021141 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/ipmi/platform/0000775000175100017510000000000013236733440020663 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/ipmi/platform/ipmitool.py0000666000175100017510000001061213236733243023074 0ustar zuulzuul00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utils to run ipmitool for data collection""" from oslo_concurrency import processutils from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as ipmiexcept from ceilometer import utils # Following 2 functions are copied from ironic project to handle ipmitool's # sensor data output. Need code clean and sharing in future. # Check ironic/drivers/modules/ipmitool.py def _get_sensor_type(sensor_data_dict): # Have only three sensor type name IDs: 'Sensor Type (Analog)' # 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)' for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)', 'Sensor Type (Threshold)'): try: return sensor_data_dict[key].split(' ', 1)[0] except KeyError: continue raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," "unknown sensor type")) def _process_sensor(sensor_data): sensor_data_fields = sensor_data.split('\n') sensor_data_dict = {} for field in sensor_data_fields: if not field: continue kv_value = field.split(':') if len(kv_value) != 2: continue sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip() return sensor_data_dict def _translate_output(output): """Translate the return value into JSON dict :param output: output of the execution of IPMI command(sensor reading) """ sensors_data_dict = {} sensors_data_array = output.split('\n\n') for sensor_data in sensors_data_array: sensor_data_dict = _process_sensor(sensor_data) if not sensor_data_dict: continue sensor_type = _get_sensor_type(sensor_data_dict) # ignore the sensors which have no current 'Sensor Reading' data sensor_id = sensor_data_dict['Sensor ID'] if 'Sensor Reading' in sensor_data_dict: sensors_data_dict.setdefault(sensor_type, {})[sensor_id] = sensor_data_dict # get nothing, no valid sensor data if not sensors_data_dict: raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," "No data retrieved from given input")) return sensors_data_dict def _parse_output(output, template): """Parse the return value of IPMI command into dict :param output: output of the execution of IPMI command :param template: a dict that contains the expected items of IPMI command and its length. """ ret = {} index = 0 if not (output and template): return ret if "translate" in template: ret = _translate_output(output) else: output_list = output.strip().replace('\n', '').split(' ') if sum(template.values()) != len(output_list): raise ipmiexcept.IPMIException(_("ipmitool output " "length mismatch")) for item in template.items(): index_end = index + item[1] update_value = output_list[index: index_end] ret[item[0]] = update_value index = index_end return ret def execute_ipmi_cmd(template=None): """Decorator for the execution of IPMI command. It parses the output of IPMI command into dictionary. """ template = template or [] def _execute_ipmi_cmd(f): def _execute(self, **kwargs): args = ['ipmitool'] command = f(self, **kwargs) args.extend(command.split(" ")) try: (out, __) = utils.execute(*args, run_as_root=True) except processutils.ProcessExecutionError: raise ipmiexcept.IPMIException(_("running ipmitool failure")) return _parse_output(out, template) return _execute return _execute_ipmi_cmd ceilometer-10.0.0/ceilometer/ipmi/platform/__init__.py0000666000175100017510000000000013236733243022765 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/ipmi/platform/ipmi_sensor.py0000666000175100017510000000765613236733243023605 0ustar zuulzuul00000000000000# Copyright 2014 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """IPMI sensor to collect various sensor data of compute node""" from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as ipmiexcept from ceilometer.ipmi.platform import ipmitool IPMICMD = {"sdr_dump": "sdr dump", "sdr_info": "sdr info", "sensor_dump": "sdr -v", "sensor_dump_temperature": "sdr -v type Temperature", "sensor_dump_current": "sdr -v type Current", "sensor_dump_fan": "sdr -v type Fan", "sensor_dump_voltage": "sdr -v type Voltage"} # Requires translation of output into dict DICT_TRANSLATE_TEMPLATE = {"translate": 1} class IPMISensor(object): """The python implementation of IPMI sensor using ipmitool The class implements the IPMI sensor to get various sensor data of compute node. It uses ipmitool to execute the IPMI command and parse the output into dict. """ _inited = False _instance = None def __new__(cls, *args, **kwargs): """Singleton to avoid duplicated initialization.""" if not cls._instance: cls._instance = super(IPMISensor, cls).__new__(cls, *args, **kwargs) return cls._instance def __init__(self): if not (self._instance and self._inited): self.ipmi_support = False self._inited = True self.ipmi_support = self.check_ipmi() @ipmitool.execute_ipmi_cmd() def _get_sdr_info(self): """Get the SDR info.""" return IPMICMD['sdr_info'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_all(self): """Get the sensor data for type.""" return IPMICMD['sensor_dump'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_temperature(self): """Get the sensor data for Temperature.""" return IPMICMD['sensor_dump_temperature'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_voltage(self): """Get the sensor data for Voltage.""" return IPMICMD['sensor_dump_voltage'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_current(self): """Get the sensor data for Current.""" return IPMICMD['sensor_dump_current'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_fan(self): """Get the sensor data for Fan.""" return IPMICMD['sensor_dump_fan'] def read_sensor_any(self, sensor_type=''): """Get the sensor data for type.""" if not self.ipmi_support: return {} mapping = {'': self._read_sensor_all, 'Temperature': self._read_sensor_temperature, 'Fan': self._read_sensor_fan, 'Voltage': self._read_sensor_voltage, 'Current': self._read_sensor_current} try: return mapping[sensor_type]() except KeyError: raise ipmiexcept.IPMIException(_('Wrong sensor type')) def check_ipmi(self): """IPMI capability checking This function is used to detect if compute node is IPMI capable platform. Just run a simple IPMI command to get SDR info for check. """ try: self._get_sdr_info() except ipmiexcept.IPMIException: return False return True ceilometer-10.0.0/ceilometer/ipmi/platform/intel_node_manager.py0000666000175100017510000003161213236733243025055 0ustar zuulzuul00000000000000# Copyright 2014 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Node manager engine to collect power and temperature of compute node. Intel Node Manager Technology enables the datacenter IT to monitor and control actual server power, thermal and compute utilization behavior through industry defined standard IPMI. This file provides Node Manager engine to get simple system power and temperature data based on ipmitool. """ import binascii import collections import tempfile import threading import time from oslo_config import cfg import six from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as nmexcept from ceilometer.ipmi.platform import ipmitool OPTS = [ cfg.IntOpt('node_manager_init_retry', default=3, help='Number of retries upon Intel Node ' 'Manager initialization failure') ] IPMICMD = {"sdr_dump": "sdr dump", "sdr_info": "sdr info", "sensor_dump": "sdr -v"} IPMIRAWCMD = {"get_device_id": "raw 0x06 0x01", "get_nm_version": "raw 0x2e 0xca 0x57 0x01 0x00", "init_sensor_agent": "raw 0x0a 0x2c 0x01", "init_complete": "raw 0x0a 0x2c 0x00", "init_sensor_agent_status": "raw 0x0a 0x2c 0x00", "read_power_all": "raw 0x2e 0xc8 0x57 0x01 0x00 0x01 0x00 0x00", "read_inlet_temperature": "raw 0x2e 0xc8 0x57 0x01 0x00 0x02 0x00 0x00", "read_outlet_temperature": "raw 0x2e 0xc8 0x57 0x01 0x00 0x05 0x00 0x00", "read_airflow": "raw 0x2e 0xc8 0x57 0x01 0x00 0x04 0x00 0x00", "read_cups_utilization": "raw 0x2e 0x65 0x57 0x01 0x00 0x05", "read_cups_index": "raw 0x2e 0x65 0x57 0x01 0x00 0x01"} MANUFACTURER_ID_INTEL = ['57', '01', '00'] INTEL_PREFIX = '5701000d01' # The template dict are made according to the spec. It contains the expected # length of each item. And it can be used to parse the output of IPMI command. ONE_RETURN_TEMPLATE = {"ret": 1} BMC_INFO_TEMPLATE = collections.OrderedDict() BMC_INFO_TEMPLATE['Device_ID'] = 1 BMC_INFO_TEMPLATE['Device_Revision'] = 1 BMC_INFO_TEMPLATE['Firmware_Revision_1'] = 1 BMC_INFO_TEMPLATE['Firmware_Revision_2'] = 1 BMC_INFO_TEMPLATE['IPMI_Version'] = 1 BMC_INFO_TEMPLATE['Additional_Device_support'] = 1 BMC_INFO_TEMPLATE['Manufacturer_ID'] = 3 BMC_INFO_TEMPLATE['Product_ID'] = 2 BMC_INFO_TEMPLATE['Auxiliary_Firmware_Revision'] = 4 NM_STATISTICS_TEMPLATE = collections.OrderedDict() NM_STATISTICS_TEMPLATE['Manufacturer_ID'] = 3 NM_STATISTICS_TEMPLATE['Current_value'] = 2 NM_STATISTICS_TEMPLATE['Minimum_value'] = 2 NM_STATISTICS_TEMPLATE['Maximum_value'] = 2 NM_STATISTICS_TEMPLATE['Average_value'] = 2 NM_STATISTICS_TEMPLATE['Time_stamp'] = 4 NM_STATISTICS_TEMPLATE['Report_period'] = 4 NM_STATISTICS_TEMPLATE["DomainID_PolicyState"] = 1 NM_GET_DEVICE_ID_TEMPLATE = collections.OrderedDict() NM_GET_DEVICE_ID_TEMPLATE['Device_ID'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Device_revision'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Firmware_revision_1'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Firmware_Revision_2'] = 1 NM_GET_DEVICE_ID_TEMPLATE['IPMI_Version'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Additional_Device_support'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Manufacturer_ID'] = 3 NM_GET_DEVICE_ID_TEMPLATE['Product_ID_min_version'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Product_ID_major_version'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Implemented_firmware'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Firmware_build_number'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Last_digit_firmware_build_number'] = 1 NM_GET_DEVICE_ID_TEMPLATE['Image_flags'] = 1 NM_GET_VERSION_TEMPLATE = collections.OrderedDict() NM_GET_VERSION_TEMPLATE['Manufacturer_ID'] = 3 NM_GET_VERSION_TEMPLATE['NM_Version'] = 1 NM_GET_VERSION_TEMPLATE['IPMI_Version'] = 1 NM_GET_VERSION_TEMPLATE['Patch_Version'] = 1 NM_GET_VERSION_TEMPLATE['Firmware_Revision_Major'] = 1 NM_GET_VERSION_TEMPLATE['Firmware_Revision_Minor'] = 1 NM_CUPS_UTILIZATION_TEMPLATE = collections.OrderedDict() NM_CUPS_UTILIZATION_TEMPLATE['Manufacturer_ID'] = 3 NM_CUPS_UTILIZATION_TEMPLATE['CPU_Utilization'] = 8 NM_CUPS_UTILIZATION_TEMPLATE['Mem_Utilization'] = 8 NM_CUPS_UTILIZATION_TEMPLATE['IO_Utilization'] = 8 NM_CUPS_INDEX_TEMPLATE = collections.OrderedDict() NM_CUPS_INDEX_TEMPLATE['Manufacturer_ID'] = 3 NM_CUPS_INDEX_TEMPLATE['CUPS_Index'] = 2 def _hex(list=None): """Format the return value in list into hex.""" list = list or [] if list: list.reverse() return int(''.join(list), 16) return 0 class NodeManager(object): """The python implementation of Intel Node Manager engine using ipmitool The class implements the engine to read power and temperature of compute node. It uses ipmitool to execute the IPMI command and parse the output into dict. """ _instance = None _instance_lock = threading.Lock() def __new__(cls, *args, **kwargs): """Singleton to avoid duplicated initialization.""" if cls._instance: # Shortcut with no lock return cls._instance with cls._instance_lock: if not cls._instance: cls._instance = super(NodeManager, cls).__new__( cls, *args, **kwargs) return cls._instance def __init__(self, conf): self.conf = conf self.nm_version = 0 self.channel_slave = '' self.nm_version = self.check_node_manager() @staticmethod def _parse_slave_and_channel(file_path): """Parse the dumped file to get slave address and channel number. :param file_path: file path of dumped SDR file. :return: slave address and channel number of target device or None if not found. """ prefix = INTEL_PREFIX # According to Intel Node Manager spec, section 4.5, for Intel NM # discovery OEM SDR records are type C0h. It contains manufacture ID # and OEM data in the record body. # 0-2 bytes are OEM ID, byte 3 is 0Dh and byte 4 is 01h. Byte 5, 6 # is Intel NM device slave address and channel number/sensor owner LUN. with open(file_path, 'rb') as bin_fp: data_str = binascii.hexlify(bin_fp.read()) if six.PY3: data_str = data_str.decode('ascii') oem_id_index = data_str.find(prefix) if oem_id_index != -1: ret = data_str[oem_id_index + len(prefix): oem_id_index + len(prefix) + 4] # Byte 5 is slave address. [7:4] from byte 6 is channel # number, so just pick ret[2] here. return (ret[0:2], ret[2]) @ipmitool.execute_ipmi_cmd(BMC_INFO_TEMPLATE) def get_device_id(self): """IPMI command GET_DEVICE_ID.""" return IPMIRAWCMD["get_device_id"] @ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE) def _init_sensor_agent(self): """Run initialization agent.""" return IPMIRAWCMD["init_sensor_agent"] @ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE) def _init_sensor_agent_process(self): """Check the status of initialization agent.""" return IPMIRAWCMD["init_sensor_agent_status"] @ipmitool.execute_ipmi_cmd() def _dump_sdr_file(self, data_file=""): """Dump SDR into a file.""" return IPMICMD["sdr_dump"] + " " + data_file @ipmitool.execute_ipmi_cmd(NM_GET_DEVICE_ID_TEMPLATE) def _node_manager_get_device_id(self): """GET_DEVICE_ID command in Intel Node Manager Different from IPMI command GET_DEVICE_ID, it contains more information of Intel Node Manager. """ return self.channel_slave + ' ' + IPMIRAWCMD["get_device_id"] @ipmitool.execute_ipmi_cmd(NM_GET_VERSION_TEMPLATE) def _node_manager_get_version(self): """GET_NODE_MANAGER_VERSION command in Intel Node Manager Byte 4 of the response: 01h - Intel NM 1.0 02h - Intel NM 1.5 03h - Intel NM 2.0 04h - Intel NM 2.5 05h - Intel NM 3.0 """ return self.channel_slave + ' ' + IPMIRAWCMD["get_nm_version"] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_power_all(self): """Get the power consumption of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_power_all'] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_inlet_temperature(self): """Get the inlet temperature info of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_inlet_temperature'] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_outlet_temperature(self): """Get the outlet temperature info of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_outlet_temperature'] @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) def _read_airflow(self): """Get the volumetric airflow of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_airflow'] @ipmitool.execute_ipmi_cmd(NM_CUPS_UTILIZATION_TEMPLATE) def _read_cups_utilization(self): """Get the average CUPS utilization of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_utilization'] @ipmitool.execute_ipmi_cmd(NM_CUPS_INDEX_TEMPLATE) def _read_cups_index(self): """Get the CUPS Index of the whole platform.""" return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_index'] def read_power_all(self): return self._read_power_all() if self.nm_version > 0 else {} def read_inlet_temperature(self): return self._read_inlet_temperature() if self.nm_version > 0 else {} def read_outlet_temperature(self): return self._read_outlet_temperature() if self.nm_version >= 5 else {} def read_airflow(self): # only available after NM 3.0 return self._read_airflow() if self.nm_version >= 5 else {} def read_cups_utilization(self): # only available after NM 3.0 return self._read_cups_utilization() if self.nm_version >= 5 else {} def read_cups_index(self): # only available after NM 3.0 return self._read_cups_index() if self.nm_version >= 5 else {} def init_node_manager(self): if self._init_sensor_agent_process()['ret'] == ['01']: return # Run sensor initialization agent for i in range(self.conf.ipmi.node_manager_init_retry): self._init_sensor_agent() time.sleep(1) if self._init_sensor_agent_process()['ret'] == ['01']: return raise nmexcept.NodeManagerException(_('Node Manager init failed')) def discover_slave_channel(self): """Discover target slave address and channel number.""" file_path = tempfile.mkstemp()[1] self._dump_sdr_file(data_file=file_path) ret = self._parse_slave_and_channel(file_path) slave_address = ''.join(['0x', ret[0]]) channel = ''.join(['0x', ret[1]]) # String of channel and slave_address self.channel_slave = '-b ' + channel + ' -t ' + slave_address def node_manager_version(self): """Intel Node Manager capability checking This function is used to detect if compute node support Intel Node Manager(return version number) or not(return -1) and parse out the slave address and channel number of node manager. """ self.manufacturer_id = self.get_device_id()['Manufacturer_ID'] if MANUFACTURER_ID_INTEL != self.manufacturer_id: # If the manufacturer is not Intel, just set False and return. return 0 self.discover_slave_channel() support = self._node_manager_get_device_id()['Implemented_firmware'] # According to Intel Node Manager spec, return value of GET_DEVICE_ID, # bits 3 to 0 shows if Intel NM implemented or not. if int(support[0], 16) & 0xf == 0: return 0 return _hex(self._node_manager_get_version()['NM_Version']) def check_node_manager(self): """Intel Node Manager init and check This function is used to initialize Intel Node Manager and check the capability without throwing exception. It's safe to call it on non-NodeManager platform. """ try: self.init_node_manager() nm_version = self.node_manager_version() except (nmexcept.NodeManagerException, nmexcept.IPMIException): return 0 return nm_version ceilometer-10.0.0/ceilometer/ipmi/platform/exception.py0000666000175100017510000000132613236733243023240 0ustar zuulzuul00000000000000# Copyright 2014 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class NodeManagerException(Exception): pass class IPMIException(Exception): pass ceilometer-10.0.0/ceilometer/data/0000775000175100017510000000000013236733440017012 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/data/meters.d/0000775000175100017510000000000013236733440020533 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/data/meters.d/meters.yaml0000666000175100017510000003164113236733243022726 0ustar zuulzuul00000000000000--- metric: # Image - name: "image.size" event_type: - "image.upload" - "image.delete" - "image.update" type: "gauge" unit: B volume: $.payload.size resource_id: $.payload.id project_id: $.payload.owner - name: "image.download" event_type: "image.send" type: "delta" unit: "B" volume: $.payload.bytes_sent resource_id: $.payload.image_id user_id: $.payload.receiver_user_id project_id: $.payload.receiver_tenant_id - name: "image.serve" event_type: "image.send" type: "delta" unit: "B" volume: $.payload.bytes_sent resource_id: $.payload.image_id project_id: $.payload.owner_id - name: 'volume.provider.capacity.total' event_type: 'capacity.backend.*' type: 'gauge' unit: 'GB' volume: $.payload.total resource_id: $.payload.name_to_id - name: 'volume.provider.capacity.free' event_type: 'capacity.backend.*' type: 'gauge' unit: 'GB' volume: $.payload.free resource_id: $.payload.name_to_id - name: 'volume.provider.capacity.allocated' event_type: 'capacity.backend.*' type: 'gauge' unit: 'GB' volume: $.payload.allocated resource_id: $.payload.name_to_id - name: 'volume.provider.capacity.provisioned' event_type: 'capacity.backend.*' type: 'gauge' unit: 'GB' volume: $.payload.provisioned resource_id: $.payload.name_to_id - name: 'volume.provider.capacity.virtual_free' event_type: 'capacity.backend.*' type: 'gauge' unit: 'GB' volume: $.payload.virtual_free resource_id: $.payload.name_to_id - name: 'volume.provider.pool.capacity.total' event_type: 'capacity.pool.*' type: 'gauge' unit: 'GB' volume: $.payload.total resource_id: $.payload.name_to_id metadata: &provider_pool_meta provider: $.payload.name_to_id.`split(#, 0, 1)` - name: 'volume.provider.pool.capacity.free' event_type: 'capacity.pool.*' type: 'gauge' unit: 'GB' volume: $.payload.free resource_id: $.payload.name_to_id metadata: <<: *provider_pool_meta - name: 'volume.provider.pool.capacity.allocated' event_type: 'capacity.pool.*' type: 'gauge' unit: 'GB' volume: $.payload.allocated resource_id: $.payload.name_to_id metadata: <<: *provider_pool_meta - name: 'volume.provider.pool.capacity.provisioned' event_type: 'capacity.pool.*' type: 'gauge' unit: 'GB' volume: $.payload.provisioned resource_id: $.payload.name_to_id metadata: <<: *provider_pool_meta - name: 'volume.provider.pool.capacity.virtual_free' event_type: 'capacity.pool.*' type: 'gauge' unit: 'GB' volume: $.payload.virtual_free resource_id: $.payload.name_to_id metadata: <<: *provider_pool_meta - name: 'volume.size' event_type: - 'volume.exists' - 'volume.create.*' - 'volume.delete.*' - 'volume.resize.*' - 'volume.attach.*' - 'volume.detach.*' - 'volume.update.*' type: 'gauge' unit: 'GB' volume: $.payload.size user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.volume_id metadata: display_name: $.payload.display_name volume_type: $.payload.volume_type - name: 'snapshot.size' event_type: - 'snapshot.exists' - 'snapshot.create.*' - 'snapshot.delete.*' type: 'gauge' unit: 'GB' volume: $.payload.volume_size user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.snapshot_id metadata: display_name: $.payload.display_name - name: 'backup.size' event_type: - 'backup.exists' - 'backup.create.*' - 'backup.delete.*' - 'backup.restore.*' type: 'gauge' unit: 'GB' volume: $.payload.size user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.backup_id metadata: display_name: $.payload.display_name # Magnum - name: $.payload.metrics.[*].name event_type: 'magnum.bay.metrics.*' type: 'gauge' unit: $.payload.metrics.[*].unit volume: $.payload.metrics.[*].value user_id: $.payload.user_id project_id: $.payload.project_id resource_id: $.payload.resource_id lookup: ['name', 'unit', 'volume'] # Swift - name: $.payload.measurements.[*].metric.[*].name event_type: 'objectstore.http.request' type: 'delta' unit: $.payload.measurements.[*].metric.[*].unit volume: $.payload.measurements.[*].result resource_id: $.payload.target.id user_id: $.payload.initiator.id project_id: $.payload.initiator.project_id lookup: ['name', 'unit', 'volume'] - name: 'memory' event_type: &instance_events compute.instance.(?!create.start).* type: 'gauge' unit: 'MB' volume: $.payload.memory_mb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: &instance_meta host: $.payload.host flavor_id: $.payload.instance_flavor_id flavor_name: $.payload.instance_type display_name: $.payload.display_name image_ref: $.payload.image_meta.base_image_ref - name: 'vcpus' event_type: *instance_events type: 'gauge' unit: 'vcpu' volume: $.payload.vcpus user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: <<: *instance_meta - name: 'compute.instance.booting.time' event_type: 'compute.instance.create.end' type: 'gauge' unit: 'sec' volume: fields: [$.payload.created_at, $.payload.launched_at] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: <<: *instance_meta - name: 'disk.root.size' event_type: *instance_events type: 'gauge' unit: 'GB' volume: $.payload.root_gb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: <<: *instance_meta - name: 'disk.ephemeral.size' event_type: *instance_events type: 'gauge' unit: 'GB' volume: $.payload.ephemeral_gb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: <<: *instance_meta - name: 'bandwidth' event_type: 'l3.meter' type: 'delta' unit: 'B' volume: $.payload.bytes project_id: $.payload.tenant_id resource_id: $.payload.label_id - name: 'compute.node.cpu.frequency' event_type: 'compute.metrics.update' type: 'gauge' unit: 'MHz' volume: $.payload.metrics[?(@.name='cpu.frequency')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.frequency')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.frequency')].source - name: 'compute.node.cpu.user.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.user.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.user.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.user.time')].source - name: 'compute.node.cpu.kernel.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.kernel.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.kernel.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.kernel.time')].source - name: 'compute.node.cpu.idle.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.idle.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.idle.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.idle.time')].source - name: 'compute.node.cpu.iowait.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.iowait.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.iowait.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.iowait.time')].source - name: 'compute.node.cpu.kernel.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.kernel.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.kernel.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.kernel.percent')].source - name: 'compute.node.cpu.idle.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.idle.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.idle.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.idle.percent')].source - name: 'compute.node.cpu.user.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.user.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.user.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.user.percent')].source - name: 'compute.node.cpu.iowait.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.iowait.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.iowait.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.iowait.percent')].source - name: 'compute.node.cpu.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.percent')].source # Identity # NOTE(gordc): hack because jsonpath-rw-ext can't concat starting with string. - name: $.payload.outcome - $.payload.outcome + 'identity.authenticate.' + $.payload.outcome type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.authenticate' resource_id: $.payload.initiator.id user_id: $.payload.initiator.id # DNS - name: 'dns.domain.exists' event_type: 'dns.domain.exists' type: 'cumulative' unit: 's' volume: fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.id user_id: $.ctxt.user metadata: status: $.payload.status pool_id: $.payload.pool_id host: $.publisher_id # Trove - name: 'trove.instance.exists' event_type: 'trove.instance.exists' type: 'cumulative' unit: 's' volume: fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_id: $.payload.user_id metadata: nova_instance_id: $.payload.nova_instance_id state: $.payload.state service_id: $.payload.service_id instance_type: $.payload.instance_type instance_type_id: $.payload.instance_type_id # Manila - name: 'manila.share.size' event_type: - 'share.create.*' - 'share.delete.*' - 'share.extend.*' - 'share.shrink.*' type: 'gauge' unit: 'GB' volume: $.payload.size user_id: $.payload.user_id project_id: $.payload.project_id resource_id: $.payload.share_id metadata: name: $.payload.name host: $.payload.host status: $.payload.status availability_zone: $.payload.availability_zone protocol: $.payload.proto ceilometer-10.0.0/ceilometer/hacking/0000775000175100017510000000000013236733440017505 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/hacking/__init__.py0000666000175100017510000000000013236733243021607 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/hacking/checks.py0000666000175100017510000000330413236733243021322 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for Ceilometer specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range X3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the C3xx value. - List the new rule in the top level HACKING.rst file """ def no_log_warn(logical_line): """Disallow 'LOG.warn(' https://bugs.launchpad.net/tempest/+bug/1508442 C301 """ if logical_line.startswith('LOG.warn('): yield(0, 'C301 Use LOG.warning() rather than LOG.warn()') def no_os_popen(logical_line): """Disallow 'os.popen(' Deprecated library function os.popen() Replace it using subprocess https://bugs.launchpad.net/tempest/+bug/1529836 C302 """ if 'os.popen(' in logical_line: yield(0, 'C302 Deprecated library function os.popen(). ' 'Replace it using subprocess module. ') def factory(register): register(no_log_warn) register(no_os_popen) ceilometer-10.0.0/ceilometer/service.py0000666000175100017510000000366713236733243020132 0ustar zuulzuul00000000000000# Copyright 2012-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg import oslo_i18n from oslo_log import log from oslo_reports import guru_meditation_report as gmr from ceilometer import keystone_client from ceilometer import messaging from ceilometer import opts from ceilometer import sample from ceilometer import utils from ceilometer import version def prepare_service(argv=None, config_files=None, conf=None): if argv is None: argv = sys.argv if conf is None: conf = cfg.ConfigOpts() oslo_i18n.enable_lazy() for group, options in opts.list_opts(): conf.register_opts(list(options), group=None if group == "DEFAULT" else group) keystone_client.register_keystoneauth_opts(conf) log.register_options(conf) log_levels = (conf.default_log_levels + ['futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO']) log.set_defaults(default_log_levels=log_levels) conf(argv[1:], project='ceilometer', validate_default_values=True, version=version.version_info.version_string(), default_config_files=config_files) keystone_client.post_register_keystoneauth_opts(conf) log.setup(conf, 'ceilometer') utils.setup_root_helper(conf) sample.setup(conf) gmr.TextGuruMeditation.setup_autorun(version) messaging.setup() return conf ceilometer-10.0.0/ceilometer/cmd/0000775000175100017510000000000013236733440016644 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/cmd/sample.py0000666000175100017510000000606013236733243020504 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # # Copyright 2012-2014 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for creating meter for Ceilometer. """ import logging import sys from oslo_config import cfg from oslo_utils import timeutils from ceilometer.pipeline import sample as sample_pipe from ceilometer import sample from ceilometer import service def send_sample(): conf = cfg.ConfigOpts() conf.register_cli_opts([ cfg.StrOpt('sample-name', short='n', help='Meter name.', required=True), cfg.StrOpt('sample-type', short='y', help='Meter type (gauge, delta, cumulative).', default='gauge', required=True), cfg.StrOpt('sample-unit', short='U', help='Meter unit.'), cfg.IntOpt('sample-volume', short='l', help='Meter volume value.', default=1), cfg.StrOpt('sample-resource', short='r', help='Meter resource id.', required=True), cfg.StrOpt('sample-user', short='u', help='Meter user id.'), cfg.StrOpt('sample-project', short='p', help='Meter project id.'), cfg.StrOpt('sample-timestamp', short='i', help='Meter timestamp.', default=timeutils.utcnow().isoformat()), cfg.StrOpt('sample-metadata', short='m', help='Meter metadata.'), ]) service.prepare_service(conf=conf) # Set up logging to use the console console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) pipeline_manager = sample_pipe.SamplePipelineManager(conf) with pipeline_manager.publisher() as p: p([sample.Sample( name=conf.sample_name, type=conf.sample_type, unit=conf.sample_unit, volume=conf.sample_volume, user_id=conf.sample_user, project_id=conf.sample_project, resource_id=conf.sample_resource, timestamp=conf.sample_timestamp, resource_metadata=conf.sample_metadata and eval( conf.sample_metadata))]) ceilometer-10.0.0/ceilometer/cmd/__init__.py0000666000175100017510000000000013236733243020746 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/cmd/polling.py0000666000175100017510000000561313236733243020672 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014-2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cotyledon from cotyledon import oslo_config_glue from oslo_config import cfg from oslo_log import log from ceilometer.polling import manager from ceilometer import service LOG = log.getLogger(__name__) class MultiChoicesOpt(cfg.Opt): def __init__(self, name, choices=None, **kwargs): super(MultiChoicesOpt, self).__init__( name, type=DeduplicatedCfgList(choices), **kwargs) self.choices = choices def _get_argparse_kwargs(self, group, **kwargs): """Extends the base argparse keyword dict for multi choices options.""" kwargs = super(MultiChoicesOpt, self)._get_argparse_kwargs(group) kwargs['nargs'] = '+' choices = kwargs.get('choices', self.choices) if choices: kwargs['choices'] = choices return kwargs class DeduplicatedCfgList(cfg.types.List): def __init__(self, choices=None, **kwargs): super(DeduplicatedCfgList, self).__init__(**kwargs) self.choices = choices or [] def __call__(self, *args, **kwargs): result = super(DeduplicatedCfgList, self).__call__(*args, **kwargs) result_set = set(result) if len(result) != len(result_set): LOG.warning("Duplicated values: %s found in CLI options, " "auto de-duplicated", result) result = list(result_set) if self.choices and not (result_set <= set(self.choices)): raise Exception('Valid values are %s, but found %s' % (self.choices, result)) return result CLI_OPTS = [ MultiChoicesOpt('polling-namespaces', default=['compute', 'central'], choices=['compute', 'central', 'ipmi'], dest='polling_namespaces', help='Polling namespace(s) to be used while ' 'resource polling'), ] def create_polling_service(worker_id, conf): return manager.AgentManager(worker_id, conf, conf.polling_namespaces) def main(): conf = cfg.ConfigOpts() conf.register_cli_opts(CLI_OPTS) service.prepare_service(conf=conf) sm = cotyledon.ServiceManager() sm.add(create_polling_service, args=(conf,)) oslo_config_glue.setup(sm, conf) sm.run() ceilometer-10.0.0/ceilometer/cmd/storage.py0000666000175100017510000000362513236733243020673 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import tenacity from ceilometer import service LOG = log.getLogger(__name__) def upgrade(): conf = cfg.ConfigOpts() conf.register_cli_opts([ cfg.BoolOpt('skip-gnocchi-resource-types', help='Skip gnocchi resource-types upgrade.', default=False), cfg.IntOpt('retry', min=0, help='Number of times to retry on failure. ' 'Default is to retry forever.'), ]) service.prepare_service(conf=conf) if conf.skip_gnocchi_resource_types: LOG.info("Skipping Gnocchi resource types upgrade") else: LOG.debug("Upgrading Gnocchi resource types") from ceilometer import gnocchi_client from gnocchiclient import exceptions if conf.retry is None: stop = tenacity.stop_never else: stop = tenacity.stop_after_attempt(conf.retry) tenacity.Retrying( stop=stop, retry=tenacity.retry_if_exception_type(( exceptions.ConnectionFailure, exceptions.UnknownConnectionError, exceptions.ConnectionTimeout, exceptions.SSLError, )) )(gnocchi_client.upgrade_resource_types, conf) ceilometer-10.0.0/ceilometer/cmd/agent_notification.py0000666000175100017510000000172513236733243023072 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cotyledon from cotyledon import oslo_config_glue from ceilometer import notification from ceilometer import service def main(): conf = service.prepare_service() sm = cotyledon.ServiceManager() sm.add(notification.NotificationService, workers=conf.notification.workers, args=(conf,)) oslo_config_glue.setup(sm, conf) sm.run() ceilometer-10.0.0/ceilometer/meter/0000775000175100017510000000000013236733440017215 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/meter/notifications.py0000666000175100017510000002235513236733243022452 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import itertools import os import re import pkg_resources import six from oslo_config import cfg from oslo_log import log from stevedore import extension from ceilometer import declarative from ceilometer.i18n import _ from ceilometer.pipeline import sample as endpoint from ceilometer import sample as sample_util OPTS = [ cfg.StrOpt('meter_definitions_cfg_file', deprecated_for_removal=True, help="Configuration file for defining meter " "notifications. This option is deprecated " "and use meter_definitions_dirs to " "configure meter notification file. Meter " "definitions configuration file will be sought " "according to the parameter." ), cfg.MultiStrOpt('meter_definitions_dirs', default=["/etc/ceilometer/meters.d", os.path.abspath( os.path.join( os.path.split( os.path.dirname(__file__))[0], "data", "meters.d"))], help="List directory to find files of " "defining meter notifications." ), ] LOG = log.getLogger(__name__) class MeterDefinition(object): SAMPLE_ATTRIBUTES = ["name", "type", "volume", "unit", "timestamp", "user_id", "project_id", "resource_id"] REQUIRED_FIELDS = ['name', 'type', 'event_type', 'unit', 'volume', 'resource_id'] def __init__(self, definition_cfg, conf, plugin_manager): self.conf = conf self.cfg = definition_cfg missing = [field for field in self.REQUIRED_FIELDS if not self.cfg.get(field)] if missing: raise declarative.MeterDefinitionException( _("Required fields %s not specified") % missing, self.cfg) self._event_type = self.cfg.get('event_type') if isinstance(self._event_type, six.string_types): self._event_type = [self._event_type] self._event_type = [re.compile(etype) for etype in self._event_type] if ('type' not in self.cfg.get('lookup', []) and self.cfg['type'] not in sample_util.TYPES): raise declarative.MeterDefinitionException( _("Invalid type %s specified") % self.cfg['type'], self.cfg) self._fallback_user_id = declarative.Definition( 'user_id', "ctxt.user_id|ctxt.user", plugin_manager) self._fallback_project_id = declarative.Definition( 'project_id', "ctxt.tenant_id|ctxt.tenant", plugin_manager) self._attributes = {} self._metadata_attributes = {} self._user_meta = None for name in self.SAMPLE_ATTRIBUTES: attr_cfg = self.cfg.get(name) if attr_cfg: self._attributes[name] = declarative.Definition( name, attr_cfg, plugin_manager) metadata = self.cfg.get('metadata', {}) for name in metadata: self._metadata_attributes[name] = declarative.Definition( name, metadata[name], plugin_manager) user_meta = self.cfg.get('user_metadata') if user_meta: self._user_meta = declarative.Definition(None, user_meta, plugin_manager) # List of fields we expected when multiple meter are in the payload self.lookup = self.cfg.get('lookup') if isinstance(self.lookup, six.string_types): self.lookup = [self.lookup] def match_type(self, meter_name): for t in self._event_type: if t.match(meter_name): return True def to_samples(self, message, all_values=False): # Sample defaults sample = { 'name': self.cfg["name"], 'type': self.cfg["type"], 'unit': self.cfg["unit"], 'volume': None, 'timestamp': None, 'user_id': self._fallback_user_id.parse(message), 'project_id': self._fallback_project_id.parse(message), 'resource_id': None, 'message': message, 'metadata': {}, } for name, parser in self._metadata_attributes.items(): value = parser.parse(message) if value: sample['metadata'][name] = value if self._user_meta: meta = self._user_meta.parse(message) if meta: sample_util.add_reserved_user_metadata( self.conf, meta, sample['metadata']) # NOTE(sileht): We expect multiple samples in the payload # so put each attribute into a list if self.lookup: for name in sample: sample[name] = [sample[name]] for name in self.SAMPLE_ATTRIBUTES: parser = self._attributes.get(name) if parser is not None: value = parser.parse(message, bool(self.lookup)) # NOTE(sileht): If we expect multiple samples # some attributes are overridden even we don't get any # result. Also note in this case value is always a list if ((not self.lookup and value is not None) or (self.lookup and ((name in self.lookup + ["name"]) or value))): sample[name] = value if self.lookup: nb_samples = len(sample['name']) # skip if no meters in payload if nb_samples <= 0: raise StopIteration attributes = self.SAMPLE_ATTRIBUTES + ["message", "metadata"] samples_values = [] for name in attributes: values = sample.get(name) nb_values = len(values) if nb_values == nb_samples: samples_values.append(values) elif nb_values == 1 and name not in self.lookup: samples_values.append(itertools.cycle(values)) else: nb = (0 if nb_values == 1 and values[0] is None else nb_values) LOG.warning('Only %(nb)d fetched meters contain ' '"%(name)s" field instead of %(total)d.' % dict(name=name, nb=nb, total=nb_samples)) raise StopIteration # NOTE(sileht): Transform the sample with multiple values per # attribute into multiple samples with one value per attribute. for values in zip(*samples_values): yield dict((attributes[idx], value) for idx, value in enumerate(values)) else: yield sample class ProcessMeterNotifications(endpoint.SampleEndpoint): event_types = [] def __init__(self, conf, publisher): super(ProcessMeterNotifications, self).__init__(conf, publisher) self.definitions = self._load_definitions() def _load_definitions(self): plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait_plugin') definitions = {} mfs = [] for dir in self.conf.meter.meter_definitions_dirs: for filepath in sorted(glob.glob(os.path.join(dir, "*.yaml"))): if filepath is not None: mfs.append(filepath) if self.conf.meter.meter_definitions_cfg_file is not None: mfs.append( pkg_resources.resource_filename( self.conf.meter.meter_definitions_cfg_file) ) for mf in mfs: meters_cfg = declarative.load_definitions( self.conf, {}, mf) for meter_cfg in reversed(meters_cfg['metric']): if meter_cfg.get('name') in definitions: # skip duplicate meters LOG.warning("Skipping duplicate meter definition %s" % meter_cfg) continue try: md = MeterDefinition(meter_cfg, self.conf, plugin_manager) except declarative.DefinitionException as e: errmsg = "Error loading meter definition: %s" LOG.error(errmsg, six.text_type(e)) else: definitions[meter_cfg['name']] = md return definitions.values() def build_sample(self, notification): for d in self.definitions: if d.match_type(notification['event_type']): for s in d.to_samples(notification): yield sample_util.Sample.from_notification(**s) ceilometer-10.0.0/ceilometer/meter/__init__.py0000666000175100017510000000000013236733243021317 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/nova_client.py0000666000175100017510000001153113236733243020760 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import glanceclient import novaclient from novaclient import api_versions from novaclient import client as nova_client from oslo_config import cfg from oslo_log import log from ceilometer import keystone_client SERVICE_OPTS = [ cfg.StrOpt('nova', default='compute', help='Nova service type.'), ] LOG = log.getLogger(__name__) def logged(func): @functools.wraps(func) def with_logging(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: LOG.exception(e) raise return with_logging class Client(object): """A client which gets information via python-novaclient.""" def __init__(self, conf): """Initialize a nova client object.""" creds = conf.service_credentials ks_session = keystone_client.get_session(conf) self.nova_client = nova_client.Client( version=api_versions.APIVersion('2.1'), session=ks_session, # nova adapter options region_name=creds.region_name, endpoint_type=creds.interface, service_type=conf.service_types.nova) self.glance_client = glanceclient.Client( version='2', session=ks_session, region_name=creds.region_name, interface=creds.interface, service_type=conf.service_types.glance) def _with_flavor_and_image(self, instances): flavor_cache = {} image_cache = {} for instance in instances: self._with_flavor(instance, flavor_cache) self._with_image(instance, image_cache) return instances def _with_flavor(self, instance, cache): fid = instance.flavor['id'] if fid in cache: flavor = cache.get(fid) else: try: flavor = self.nova_client.flavors.get(fid) except novaclient.exceptions.NotFound: flavor = None cache[fid] = flavor attr_defaults = [('name', 'unknown-id-%s' % fid), ('vcpus', 0), ('ram', 0), ('disk', 0), ('ephemeral', 0)] for attr, default in attr_defaults: if not flavor: instance.flavor[attr] = default continue instance.flavor[attr] = getattr(flavor, attr, default) def _with_image(self, instance, cache): try: iid = instance.image['id'] except TypeError: instance.image = None instance.kernel_id = None instance.ramdisk_id = None return if iid in cache: image = cache.get(iid) else: try: image = self.glance_client.images.get(iid) except glanceclient.exc.HTTPNotFound: image = None cache[iid] = image attr_defaults = [('kernel_id', None), ('ramdisk_id', None)] instance.image['name'] = ( getattr(image, 'name') if image else 'unknown-id-%s' % iid) image_metadata = getattr(image, 'metadata', None) for attr, default in attr_defaults: ameta = image_metadata.get(attr) if image_metadata else default setattr(instance, attr, ameta) @logged def instance_get_all_by_host(self, hostname, since=None): """Returns list of instances on particular host. If since is supplied, it will return the instances changed since that datetime. since should be in ISO Format '%Y-%m-%dT%H:%M:%SZ' """ search_opts = {'host': hostname, 'all_tenants': True} if since: search_opts['changes-since'] = since return self._with_flavor_and_image(self.nova_client.servers.list( detailed=True, search_opts=search_opts)) @logged def instance_get_all(self, since=None): """Returns list of all instances. If since is supplied, it will return the instances changes since that datetime. since should be in ISO Format '%Y-%m-%dT%H:%M:%SZ' """ search_opts = {'all_tenants': True} if since: search_opts['changes-since'] = since return self.nova_client.servers.list( detailed=True, search_opts=search_opts) ceilometer-10.0.0/ceilometer/event/0000775000175100017510000000000013236733440017222 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/event/trait_plugins.py0000666000175100017510000002052613236733243022470 0ustar zuulzuul00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from debtcollector import moves from oslo_log import log from oslo_utils import timeutils import six LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class TraitPluginBase(object): """Base class for plugins. It converts notification fields to Trait values. """ support_return_all_values = False """If True, an exception will be raised if the user expect the plugin to return one trait per match_list, but the plugin doesn't allow/support that. """ def __init__(self, **kw): """Setup the trait plugin. For each Trait definition a plugin is used on in a conversion definition, a new instance of the plugin will be created, and initialized with the parameters (if any) specified in the config file. :param kw: the parameters specified in the event definitions file. """ super(TraitPluginBase, self).__init__() @moves.moved_method('trait_values', version=6.0, removal_version="?") def trait_value(self, match_list): pass def trait_values(self, match_list): """Convert a set of fields to one or multiple Trait values. This method is called each time a trait is attempted to be extracted from a notification. It will be called *even if* no matching fields are found in the notification (in that case, the match_list will be empty). If this method returns None, the trait *will not* be added to the event. Any other value returned by this method will be used as the value for the trait. Values returned will be coerced to the appropriate type for the trait. :param match_list: A list (may be empty if no matches) of *tuples*. Each tuple is (field_path, value) where field_path is the jsonpath for that specific field. Example:: trait's fields definition: ['payload.foobar', 'payload.baz', 'payload.thing.*'] notification body: { 'metadata': {'message_id': '12345'}, 'publisher': 'someservice.host', 'payload': { 'foobar': 'test', 'thing': { 'bar': 12, 'boing': 13, } } } match_list will be: [('payload.foobar','test'), ('payload.thing.bar',12), ('payload.thing.boing',13)] Here is a plugin that emulates the default (no plugin) behavior: .. code-block:: python class DefaultPlugin(TraitPluginBase): "Plugin that returns the first field value." def __init__(self, **kw): super(DefaultPlugin, self).__init__() def trait_value(self, match_list): if not match_list: return None return [ match[1] for match in match_list] """ # For backwards compatibility for the renamed method. return [self.trait_value(match_list)] class SplitterTraitPlugin(TraitPluginBase): """Plugin that splits a piece off of a string value.""" support_return_all_values = True def __init__(self, separator=".", segment=0, max_split=None, **kw): """Setup how do split the field. :param separator: String to split on. default "." :param segment: Which segment to return. (int) default 0 :param max_split: Limit number of splits. Default: None (no limit) """ LOG.warning('split plugin is deprecated, ' 'add ".`split(%(sep)s, %(segment)d, ' '%(max_split)d)`" to your jsonpath instead' % dict(sep=separator, segment=segment, max_split=(-1 if max_split is None else max_split))) self.separator = separator self.segment = segment self.max_split = max_split super(SplitterTraitPlugin, self).__init__(**kw) def trait_values(self, match_list): return [self._trait_value(match) for match in match_list] def _trait_value(self, match): value = six.text_type(match[1]) if self.max_split is not None: values = value.split(self.separator, self.max_split) else: values = value.split(self.separator) try: return values[self.segment] except IndexError: return None class BitfieldTraitPlugin(TraitPluginBase): """Plugin to set flags on a bitfield.""" def __init__(self, initial_bitfield=0, flags=None, **kw): """Setup bitfield trait. :param initial_bitfield: (int) initial value for the bitfield Flags that are set will be OR'ed with this. :param flags: List of dictionaries defining bitflags to set depending on data in the notification. Each one has the following keys: path: jsonpath of field to match. bit: (int) number of bit to set (lsb is bit 0) value: set bit if corresponding field's value matches this. If value is not provided, bit will be set if the field exists (and is non-null), regardless of its value. """ self.initial_bitfield = initial_bitfield if flags is None: flags = [] self.flags = flags super(BitfieldTraitPlugin, self).__init__(**kw) def trait_values(self, match_list): matches = dict(match_list) bitfield = self.initial_bitfield for flagdef in self.flags: path = flagdef['path'] bit = 2 ** int(flagdef['bit']) if path in matches: if 'value' in flagdef: if matches[path] == flagdef['value']: bitfield |= bit else: bitfield |= bit return [bitfield] class TimedeltaPluginMissedFields(Exception): def __init__(self): msg = ('It is required to use two timestamp field with Timedelta ' 'plugin.') super(TimedeltaPluginMissedFields, self).__init__(msg) class TimedeltaPlugin(TraitPluginBase): """Setup timedelta meter volume of two timestamps fields. Example:: trait's fields definition: ['payload.created_at', 'payload.launched_at'] value is been created as total seconds between 'launched_at' and 'created_at' timestamps. """ # TODO(idegtiarov): refactor code to have meter_plugins separate from # trait_plugins def trait_value(self, match_list): if len(match_list) != 2: LOG.warning('Timedelta plugin is required two timestamp fields' ' to create timedelta value.') return start, end = match_list try: start_time = timeutils.parse_isotime(start[1]) end_time = timeutils.parse_isotime(end[1]) except Exception as err: LOG.warning('Failed to parse date from set fields, both ' 'fields %(start)s and %(end)s must be datetime: ' '%(err)s' % dict(start=start[0], end=end[0], err=err) ) return return abs((end_time - start_time).total_seconds()) ceilometer-10.0.0/ceilometer/event/converter.py0000666000175100017510000002703713236733243021617 0ustar zuulzuul00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import fnmatch from oslo_utils import timeutils import pkg_resources import six from ceilometer import declarative from ceilometer.event import models from ceilometer.i18n import _ OPTS = [ cfg.StrOpt('definitions_cfg_file', default="event_definitions.yaml", help="Configuration file for event definitions." ), cfg.BoolOpt('drop_unmatched_notifications', default=False, help='Drop notifications if no event definition matches. ' '(Otherwise, we convert them with just the default traits)'), cfg.MultiStrOpt('store_raw', default=[], help='Store the raw notification for select priority ' 'levels (info and/or error). By default, raw details are ' 'not captured.') ] LOG = log.getLogger(__name__) class TraitDefinition(declarative.Definition): def __init__(self, name, trait_cfg, plugin_manager): super(TraitDefinition, self).__init__(name, trait_cfg, plugin_manager) type_name = (trait_cfg.get('type', 'text') if isinstance(trait_cfg, dict) else 'text') self.trait_type = models.Trait.get_type_by_name(type_name) if self.trait_type is None: raise declarative.EventDefinitionException( _("Invalid trait type '%(type)s' for trait %(trait)s") % dict(type=type_name, trait=name), self.cfg) def to_trait(self, notification_body): value = self.parse(notification_body) if value is None: return None # NOTE(mdragon): some openstack projects (mostly Nova) emit '' # for null fields for things like dates. if self.trait_type != models.Trait.TEXT_TYPE and value == '': return None value = models.Trait.convert_value(self.trait_type, value) return models.Trait(self.name, self.trait_type, value) class EventDefinition(object): DEFAULT_TRAITS = dict( service=dict(type='text', fields='publisher_id'), request_id=dict(type='text', fields='ctxt.request_id'), project_id=dict(type='text', fields=['payload.tenant_id', 'ctxt.tenant']), user_id=dict(type='text', fields=['payload.user_id', 'ctxt.user_id']), # TODO(dikonoor):tenant_id is old terminology and should # be deprecated tenant_id=dict(type='text', fields=['payload.tenant_id', 'ctxt.tenant']), ) def __init__(self, definition_cfg, trait_plugin_mgr, raw_levels): self._included_types = [] self._excluded_types = [] self.traits = dict() self.cfg = definition_cfg self.raw_levels = raw_levels try: event_type = definition_cfg['event_type'] traits = definition_cfg['traits'] except KeyError as err: raise declarative.EventDefinitionException( _("Required field %s not specified") % err.args[0], self.cfg) if isinstance(event_type, six.string_types): event_type = [event_type] for t in event_type: if t.startswith('!'): self._excluded_types.append(t[1:]) else: self._included_types.append(t) if self._excluded_types and not self._included_types: self._included_types.append('*') for trait_name in self.DEFAULT_TRAITS: self.traits[trait_name] = TraitDefinition( trait_name, self.DEFAULT_TRAITS[trait_name], trait_plugin_mgr) for trait_name in traits: self.traits[trait_name] = TraitDefinition( trait_name, traits[trait_name], trait_plugin_mgr) def included_type(self, event_type): for t in self._included_types: if fnmatch.fnmatch(event_type, t): return True return False def excluded_type(self, event_type): for t in self._excluded_types: if fnmatch.fnmatch(event_type, t): return True return False def match_type(self, event_type): return (self.included_type(event_type) and not self.excluded_type(event_type)) @property def is_catchall(self): return '*' in self._included_types and not self._excluded_types def to_event(self, priority, notification_body): event_type = notification_body['event_type'] message_id = notification_body['metadata']['message_id'] when = timeutils.normalize_time(timeutils.parse_isotime( notification_body['metadata']['timestamp'])) traits = (self.traits[t].to_trait(notification_body) for t in self.traits) # Only accept non-None value traits ... traits = [trait for trait in traits if trait is not None] raw = notification_body if priority in self.raw_levels else {} event = models.Event(message_id, event_type, when, traits, raw) return event class NotificationEventsConverter(object): """Notification Event Converter The NotificationEventsConverter handles the conversion of Notifications from openstack systems into Ceilometer Events. The conversion is handled according to event definitions in a config file. The config is a list of event definitions. Order is significant, a notification will be processed according to the LAST definition that matches it's event_type. (We use the last matching definition because that allows you to use YAML merge syntax in the definitions file.) Each definition is a dictionary with the following keys (all are required): - event_type: this is a list of notification event_types this definition will handle. These can be wildcarded with unix shell glob (not regex!) wildcards. An exclusion listing (starting with a '!') will exclude any types listed from matching. If ONLY exclusions are listed, the definition will match anything not matching the exclusions. This item can also be a string, which will be taken as equivalent to 1 item list. Examples: * ['compute.instance.exists'] will only match compute.instance.exists notifications * "compute.instance.exists" Same as above. * ["image.create", "image.delete"] will match image.create and image.delete, but not anything else. * "compute.instance.*" will match compute.instance.create.start but not image.upload * ['*.start','*.end', '!scheduler.*'] will match compute.instance.create.start, and image.delete.end, but NOT compute.instance.exists or scheduler.run_instance.start * '!image.*' matches any notification except image notifications. * ['*', '!image.*'] same as above. - traits: (dict) The keys are trait names, the values are the trait definitions. Each trait definition is a dictionary with the following keys: - type (optional): The data type for this trait. (as a string) Valid options are: 'text', 'int', 'float' and 'datetime', defaults to 'text' if not specified. - fields: a path specification for the field(s) in the notification you wish to extract. The paths can be specified with a dot syntax (e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is also supported. In either case, if the key for the field you are looking for contains special characters, like '.', it will need to be quoted (with double or single quotes) like so:: "payload.image_meta.'org.openstack__1__architecture'" The syntax used for the field specification is a variant of JSONPath, and is fairly flexible. (see: https://github.com/kennknowles/python-jsonpath-rw for more info) Specifications can be written to match multiple possible fields, the value for the trait will be derived from the matching fields that exist and have a non-null (i.e. is not None) values in the notification. By default the value will be the first such field. (plugins can alter that, if they wish) This configuration value is normally a string, for convenience, it can be specified as a list of specifications, which will be OR'ed together (a union query in jsonpath terms) - plugin (optional): (dictionary) with the following keys: - name: (string) name of a plugin to load - parameters: (optional) Dictionary of keyword args to pass to the plugin on initialization. See documentation on each plugin to see what arguments it accepts. For convenience, this value can also be specified as a string, which is interpreted as a plugin name, which will be loaded with no parameters. """ def __init__(self, conf, events_config, trait_plugin_mgr): self.conf = conf raw_levels = [level.lower() for level in self.conf.event.store_raw] self.definitions = [ EventDefinition(event_def, trait_plugin_mgr, raw_levels) for event_def in reversed(events_config)] add_catchall = not self.conf.event.drop_unmatched_notifications if add_catchall and not any(d.is_catchall for d in self.definitions): event_def = dict(event_type='*', traits={}) self.definitions.append(EventDefinition(event_def, trait_plugin_mgr, raw_levels)) def to_event(self, priority, notification_body): event_type = notification_body['event_type'] message_id = notification_body['metadata']['message_id'] edef = None for d in self.definitions: if d.match_type(event_type): edef = d break if edef is None: msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)') % dict(type=event_type, msgid=message_id)) if self.conf.event.drop_unmatched_notifications: LOG.debug(msg) else: # If drop_unmatched_notifications is False, this should # never happen. (mdragon) LOG.error(msg) return None return edef.to_event(priority, notification_body) def setup_events(conf, trait_plugin_mgr): """Setup the event definitions from yaml config file.""" return NotificationEventsConverter( conf, declarative.load_definitions( conf, [], conf.event.definitions_cfg_file, pkg_resources.resource_filename( 'ceilometer', "pipeline/data/event_definitions.yaml")), trait_plugin_mgr) ceilometer-10.0.0/ceilometer/event/__init__.py0000666000175100017510000000000013236733243021324 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/event/models.py0000666000175100017510000001133413236733243021064 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes for use in the events storage API. """ from oslo_utils import timeutils import six def serialize_dt(value): """Serializes parameter if it is datetime.""" return value.isoformat() if hasattr(value, 'isoformat') else value class Model(object): """Base class for storage API models.""" def __init__(self, **kwds): self.fields = list(kwds) for k, v in six.iteritems(kwds): setattr(self, k, v) def as_dict(self): d = {} for f in self.fields: v = getattr(self, f) if isinstance(v, Model): v = v.as_dict() elif isinstance(v, list) and v and isinstance(v[0], Model): v = [sub.as_dict() for sub in v] d[f] = v return d def __eq__(self, other): return self.as_dict() == other.as_dict() def __ne__(self, other): return not self.__eq__(other) class Event(Model): """A raw event from the source system. Events have Traits. Metrics will be derived from one or more Events. """ DUPLICATE = 1 UNKNOWN_PROBLEM = 2 INCOMPATIBLE_TRAIT = 3 def __init__(self, message_id, event_type, generated, traits, raw): """Create a new event. :param message_id: Unique ID for the message this event stemmed from. This is different than the Event ID, which comes from the underlying storage system. :param event_type: The type of the event. :param generated: UTC time for when the event occurred. :param traits: list of Traits on this Event. :param raw: Unindexed raw notification details. """ Model.__init__(self, message_id=message_id, event_type=event_type, generated=generated, traits=traits, raw=raw) def append_trait(self, trait_model): self.traits.append(trait_model) def __repr__(self): trait_list = [] if self.traits: trait_list = [six.text_type(trait) for trait in self.traits] return ("" % (self.message_id, self.event_type, self.generated, " ".join(trait_list))) def serialize(self): return {'message_id': self.message_id, 'event_type': self.event_type, 'generated': serialize_dt(self.generated), 'traits': [trait.serialize() for trait in self.traits], 'raw': self.raw} class Trait(Model): """A Trait is a key/value pair of data on an Event. The value is variant record of basic data types (int, date, float, etc). """ NONE_TYPE = 0 TEXT_TYPE = 1 INT_TYPE = 2 FLOAT_TYPE = 3 DATETIME_TYPE = 4 type_names = { NONE_TYPE: "none", TEXT_TYPE: "string", INT_TYPE: "integer", FLOAT_TYPE: "float", DATETIME_TYPE: "datetime" } def __init__(self, name, dtype, value): if not dtype: dtype = Trait.NONE_TYPE Model.__init__(self, name=name, dtype=dtype, value=value) def __repr__(self): return "" % (self.name, self.dtype, self.value) def serialize(self): return self.name, self.dtype, serialize_dt(self.value) def get_type_name(self): return self.get_name_by_type(self.dtype) @classmethod def get_type_by_name(cls, type_name): return getattr(cls, '%s_TYPE' % type_name.upper(), None) @classmethod def get_type_names(cls): return cls.type_names.values() @classmethod def get_name_by_type(cls, type_id): return cls.type_names.get(type_id, "none") @classmethod def convert_value(cls, trait_type, value): if trait_type is cls.INT_TYPE: return int(value) if trait_type is cls.FLOAT_TYPE: return float(value) if trait_type is cls.DATETIME_TYPE: return timeutils.normalize_time(timeutils.parse_isotime(value)) # Cropping the text value to match the TraitText value size if isinstance(value, six.binary_type): return value.decode('utf-8')[:255] return six.text_type(value)[:255] ceilometer-10.0.0/ceilometer/sample.py0000666000175100017510000001342613236733243017745 0ustar zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample class for holding data about a metering event. A Sample doesn't really do anything, but we need a way to ensure that all of the appropriate fields have been filled in by the plugins that create them. """ import copy import uuid from oslo_config import cfg from oslo_utils import timeutils import six OPTS = [ cfg.StrOpt('sample_source', default='openstack', help='Source for samples emitted on this instance.'), cfg.ListOpt('reserved_metadata_namespace', default=['metering.'], help='List of metadata prefixes reserved for metering use.'), cfg.IntOpt('reserved_metadata_length', default=256, help='Limit on length of reserved metadata values.'), cfg.ListOpt('reserved_metadata_keys', default=[], help='List of metadata keys reserved for metering use. And ' 'these keys are additional to the ones included in the ' 'namespace.'), ] def add_reserved_user_metadata(conf, src_metadata, dest_metadata): limit = conf.reserved_metadata_length user_metadata = {} for prefix in conf.reserved_metadata_namespace: md = dict( (k[len(prefix):].replace('.', '_'), v[:limit] if isinstance(v, six.string_types) else v) for k, v in src_metadata.items() if (k.startswith(prefix) and k[len(prefix):].replace('.', '_') not in dest_metadata) ) user_metadata.update(md) for metadata_key in conf.reserved_metadata_keys: md = dict( (k.replace('.', '_'), v[:limit] if isinstance(v, six.string_types) else v) for k, v in src_metadata.items() if (k == metadata_key and k.replace('.', '_') not in dest_metadata) ) user_metadata.update(md) if user_metadata: dest_metadata['user_metadata'] = user_metadata return dest_metadata # Fields explanation: # # Source: the source of this sample # Name: the name of the meter, must be unique # Type: the type of the meter, must be either: # - cumulative: the value is incremented and never reset to 0 # - delta: the value is reset to 0 each time it is sent # - gauge: the value is an absolute value and is not a counter # Unit: the unit of the meter # Volume: the sample value # User ID: the user ID # Project ID: the project ID # Resource ID: the resource ID # Timestamp: when the sample has been read # Resource metadata: various metadata # id: an uuid of a sample, can be taken from API when post sample via API class Sample(object): SOURCE_DEFAULT = "openstack" def __init__(self, name, type, unit, volume, user_id, project_id, resource_id, timestamp=None, resource_metadata=None, source=None, id=None, monotonic_time=None): self.name = name self.type = type self.unit = unit self.volume = volume self.user_id = user_id self.project_id = project_id self.resource_id = resource_id self.timestamp = timestamp self.resource_metadata = resource_metadata or {} self.source = source or self.SOURCE_DEFAULT self.id = id or str(uuid.uuid1()) self.monotonic_time = monotonic_time def as_dict(self): return copy.copy(self.__dict__) def __repr__(self): return '' % ( self.name, self.volume, self.resource_id, self.timestamp) @classmethod def from_notification(cls, name, type, volume, unit, user_id, project_id, resource_id, message, timestamp=None, metadata=None, source=None): if not metadata: metadata = (copy.copy(message['payload']) if isinstance(message['payload'], dict) else {}) metadata['event_type'] = message['event_type'] metadata['host'] = message['publisher_id'] ts = timestamp if timestamp else message['metadata']['timestamp'] ts = timeutils.parse_isotime(ts).isoformat() # add UTC if necessary return cls(name=name, type=type, volume=volume, unit=unit, user_id=user_id, project_id=project_id, resource_id=resource_id, timestamp=ts, resource_metadata=metadata, source=source) def set_timestamp(self, timestamp): self.timestamp = timestamp def get_iso_timestamp(self): return timeutils.parse_isotime(self.timestamp) def __eq__(self, other): if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ return False def __ne__(self, other): return not self.__eq__(other) def setup(conf): # NOTE(sileht): Instead of passing the cfg.CONF everywhere in ceilometer # prepare_service will override this default Sample.SOURCE_DEFAULT = conf.sample_source TYPE_GAUGE = 'gauge' TYPE_DELTA = 'delta' TYPE_CUMULATIVE = 'cumulative' TYPES = (TYPE_GAUGE, TYPE_DELTA, TYPE_CUMULATIVE) ceilometer-10.0.0/ceilometer/pipeline/0000775000175100017510000000000013236733440017706 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/pipeline/data/0000775000175100017510000000000013236733440020617 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/pipeline/data/pipeline.yaml0000666000175100017510000000471313236733243023320 0ustar zuulzuul00000000000000--- sources: - name: meter_source meters: - "*" sinks: - meter_sink - name: cpu_source meters: - "cpu" sinks: - cpu_sink - cpu_delta_sink - name: disk_source meters: - "disk.read.bytes" - "disk.read.requests" - "disk.write.bytes" - "disk.write.requests" - "disk.device.read.bytes" - "disk.device.read.requests" - "disk.device.write.bytes" - "disk.device.write.requests" sinks: - disk_sink - name: network_source meters: - "network.incoming.bytes" - "network.incoming.packets" - "network.outgoing.bytes" - "network.outgoing.packets" sinks: - network_sink sinks: - name: meter_sink transformers: publishers: - gnocchi:// - name: cpu_sink transformers: - name: "rate_of_change" parameters: target: name: "cpu_util" unit: "%" type: "gauge" max: 100 scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" publishers: - gnocchi:// - name: cpu_delta_sink transformers: - name: "delta" parameters: target: name: "cpu.delta" growth_only: True publishers: - gnocchi:// - name: disk_sink transformers: - name: "rate_of_change" parameters: source: map_from: name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)" unit: "(B|request)" target: map_to: name: "\\1.\\2.\\3.rate" unit: "\\1/s" type: "gauge" publishers: - gnocchi:// - name: network_sink transformers: - name: "rate_of_change" parameters: source: map_from: name: "network\\.(incoming|outgoing)\\.(bytes|packets)" unit: "(B|packet)" target: map_to: name: "network.\\1.\\2.rate" unit: "\\1/s" type: "gauge" publishers: - gnocchi:// ceilometer-10.0.0/ceilometer/pipeline/data/event_definitions.yaml0000666000175100017510000004275213236733243025234 0ustar zuulzuul00000000000000--- - event_type: 'compute.instance.*' traits: &instance_traits tenant_id: fields: payload.tenant_id user_id: fields: payload.user_id instance_id: fields: payload.instance_id display_name: fields: payload.display_name resource_id: fields: payload.instance_id host: fields: publisher_id.`split(., 1, 1)` service: fields: publisher_id.`split(., 0, -1)` memory_mb: type: int fields: payload.memory_mb disk_gb: type: int fields: payload.disk_gb root_gb: type: int fields: payload.root_gb ephemeral_gb: type: int fields: payload.ephemeral_gb vcpus: type: int fields: payload.vcpus instance_type_id: type: int fields: payload.instance_type_id instance_type: fields: payload.instance_type state: fields: payload.state os_architecture: fields: payload.image_meta.'org.openstack__1__architecture' os_version: fields: payload.image_meta.'org.openstack__1__os_version' os_distro: fields: payload.image_meta.'org.openstack__1__os_distro' launched_at: type: datetime fields: payload.launched_at deleted_at: type: datetime fields: payload.deleted_at - event_type: compute.instance.update traits: <<: *instance_traits old_state: fields: payload.old_state - event_type: compute.instance.exists traits: <<: *instance_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] traits: &cinder_traits user_id: fields: payload.user_id project_id: fields: payload.tenant_id availability_zone: fields: payload.availability_zone display_name: fields: payload.display_name replication_status: fields: payload.replication_status status: fields: payload.status created_at: fields: payload.created_at - event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*'] traits: <<: *cinder_traits resource_id: fields: payload.volume_id host: fields: payload.host size: fields: payload.size type: fields: payload.volume_type replication_status: fields: payload.replication_status - event_type: ['share.create.*', 'share.delete.*', 'share.extend.*', 'share.shrink.*'] traits: &share_traits share_id: fields: payload.share_id user_id: fields: payload.user_id project_id: fields: payload.tenant_id snapshot_id: fields: payload.snapshot_id availability_zone: fields: payload.availability_zone status: fields: payload.status created_at: fields: payload.created_at share_group_id: fields: payload.share_group_id size: fields: payload.size name: fields: payload.name proto: fields: payload.proto is_public: fields: payload.is_public description: fields: payload.description host: fields: payload.host - event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] traits: <<: *cinder_traits resource_id: fields: payload.snapshot_id volume_id: fields: payload.volume_id - event_type: ['image_volume_cache.*'] traits: image_id: fields: payload.image_id host: fields: payload.host - event_type: ['image.create', 'image.update', 'image.upload', 'image.delete'] traits: &glance_crud project_id: fields: payload.owner resource_id: fields: payload.id name: fields: payload.name status: fields: payload.status created_at: fields: payload.created_at user_id: fields: payload.owner deleted_at: fields: payload.deleted_at size: fields: payload.size - event_type: image.send traits: &glance_send receiver_project: fields: payload.receiver_tenant_id receiver_user: fields: payload.receiver_user_id user_id: fields: payload.owner_id image_id: fields: payload.image_id destination_ip: fields: payload.destination_ip bytes_sent: type: int fields: payload.bytes_sent - event_type: orchestration.stack.* traits: &orchestration_crud project_id: fields: payload.tenant_id user_id: fields: ['ctxt.trustor_user_id', 'ctxt.user_id'] resource_id: fields: payload.stack_identity name: fields: payload.name - event_type: sahara.cluster.* traits: &sahara_crud project_id: fields: payload.project_id user_id: fields: ctxt.user_id resource_id: fields: payload.cluster_id name: fields: payload.name - event_type: sahara.cluster.health traits: &sahara_health <<: *sahara_crud verification_id: fields: payload.verification_id health_check_status: fields: payload.health_check_status health_check_name: fields: payload.health_check_name health_check_description: fields: payload.health_check_description created_at: type: datetime fields: payload.created_at updated_at: type: datetime fields: payload.updated_at - event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*', 'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*'] traits: &identity_crud resource_id: fields: payload.resource_info initiator_id: fields: payload.initiator.id project_id: fields: payload.initiator.project_id domain_id: fields: payload.initiator.domain_id - event_type: identity.role_assignment.* traits: &identity_role_assignment role: fields: payload.role group: fields: payload.group domain: fields: payload.domain user: fields: payload.user project: fields: payload.project - event_type: identity.authenticate traits: &identity_authenticate typeURI: fields: payload.typeURI id: fields: payload.id action: fields: payload.action eventType: fields: payload.eventType eventTime: fields: payload.eventTime outcome: fields: payload.outcome initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_name: fields: payload.initiator.name initiator_host_agent: fields: payload.initiator.host.agent initiator_host_addr: fields: payload.initiator.host.address target_typeURI: fields: payload.target.typeURI target_id: fields: payload.target.id observer_typeURI: fields: payload.observer.typeURI observer_id: fields: payload.observer.id - event_type: objectstore.http.request traits: &objectstore_request typeURI: fields: payload.typeURI id: fields: payload.id action: fields: payload.action eventType: fields: payload.eventType eventTime: fields: payload.eventTime outcome: fields: payload.outcome initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_project_id: fields: payload.initiator.project_id target_typeURI: fields: payload.target.typeURI target_id: fields: payload.target.id target_action: fields: payload.target.action target_metadata_path: fields: payload.target.metadata.path target_metadata_version: fields: payload.target.metadata.version target_metadata_container: fields: payload.target.metadata.container target_metadata_object: fields: payload.target.metadata.object observer_id: fields: payload.observer.id - event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*'] traits: &network_traits user_id: fields: ctxt.user_id project_id: fields: ctxt.tenant_id - event_type: network.* traits: <<: *network_traits name: fields: payload.network.name resource_id: fields: ['payload.network.id', 'payload.id'] - event_type: subnet.* traits: <<: *network_traits name: fields: payload.subnet.name resource_id: fields: ['payload.subnet.id', 'payload.id'] - event_type: port.* traits: <<: *network_traits name: fields: payload.port.name resource_id: fields: ['payload.port.id', 'payload.id'] - event_type: router.* traits: <<: *network_traits name: fields: payload.router.name resource_id: fields: ['payload.router.id', 'payload.id'] - event_type: floatingip.* traits: <<: *network_traits resource_id: fields: ['payload.floatingip.id', 'payload.id'] - event_type: pool.* traits: <<: *network_traits name: fields: payload.pool.name resource_id: fields: ['payload.pool.id', 'payload.id'] - event_type: vip.* traits: <<: *network_traits resource_id: fields: ['payload.vip.id', 'payload.id'] - event_type: member.* traits: <<: *network_traits resource_id: fields: ['payload.member.id', 'payload.id'] - event_type: health_monitor.* traits: <<: *network_traits name: fields: payload.health_monitor.name resource_id: fields: ['payload.health_monitor.id', 'payload.id'] - event_type: healthmonitor.* traits: <<: *network_traits name: fields: payload.healthmonitor.name resource_id: fields: ['payload.healthmonitor.id', 'payload.id'] - event_type: listener.* traits: <<: *network_traits name: fields: payload.listener.name resource_id: fields: ['payload.listener.id', 'payload.id'] - event_type: loadbalancer.* traits: <<: *network_traits name: fields: payload.loadbalancer.name resource_id: fields: ['payload.loadbalancer.id', 'payload.id'] - event_type: firewall.* traits: <<: *network_traits name: fields: payload.firewall.name resource_id: fields: ['payload.firewall.id', 'payload.id'] - event_type: firewall_policy.* traits: <<: *network_traits name: fields: payload.firewall_policy.name resource_id: fields: ['payload.firewall_policy.id', 'payload.id'] - event_type: firewall_rule.* traits: <<: *network_traits name: fields: payload.firewall_rule.name resource_id: fields: ['payload.firewall_rule.id', 'payload.id'] - event_type: vpnservice.* traits: <<: *network_traits name: fields: payload.vpnservice.name resource_id: fields: ['payload.vpnservice.id', 'payload.id'] - event_type: ipsecpolicy.* traits: <<: *network_traits name: fields: payload.ipsecpolicy.name resource_id: fields: ['payload.ipsecpolicy.id', 'payload.id'] - event_type: ikepolicy.* traits: <<: *network_traits name: fields: payload.ikepolicy.name resource_id: fields: ['payload.ikepolicy.id', 'payload.id'] - event_type: ipsec_site_connection.* traits: <<: *network_traits resource_id: fields: ['payload.ipsec_site_connection.id', 'payload.id'] - event_type: '*http.*' traits: &http_audit project_id: fields: payload.initiator.project_id user_id: fields: payload.initiator.id typeURI: fields: payload.typeURI eventType: fields: payload.eventType action: fields: payload.action outcome: fields: payload.outcome id: fields: payload.id eventTime: fields: payload.eventTime requestPath: fields: payload.requestPath observer_id: fields: payload.observer.id target_id: fields: payload.target.id target_typeURI: fields: payload.target.typeURI target_name: fields: payload.target.name initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_name: fields: payload.initiator.name initiator_host_address: fields: payload.initiator.host.address - event_type: '*http.response' traits: <<: *http_audit reason_code: fields: payload.reason.reasonCode - event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete'] traits: &dns_domain_traits status: fields: payload.status retry: fields: payload.retry description: fields: payload.description expire: fields: payload.expire email: fields: payload.email ttl: fields: payload.ttl action: fields: payload.action name: fields: payload.name resource_id: fields: payload.id created_at: fields: payload.created_at updated_at: fields: payload.updated_at version: fields: payload.version parent_domain_id: fields: parent_domain_id serial: fields: payload.serial - event_type: dns.domain.exists traits: <<: *dns_domain_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: trove.* traits: &trove_base_traits instance_type: fields: payload.instance_type user_id: fields: payload.user_id resource_id: fields: payload.instance_id instance_type_id: fields: payload.instance_type_id launched_at: type: datetime fields: payload.launched_at instance_name: fields: payload.instance_name state: fields: payload.state nova_instance_id: fields: payload.nova_instance_id service_id: fields: payload.service_id created_at: type: datetime fields: payload.created_at region: fields: payload.region - event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete'] traits: &trove_common_traits name: fields: payload.name availability_zone: fields: payload.availability_zone instance_size: type: int fields: payload.instance_size volume_size: type: int fields: payload.volume_size nova_volume_id: fields: payload.nova_volume_id - event_type: trove.instance.create traits: <<: [*trove_base_traits, *trove_common_traits] - event_type: trove.instance.modify_volume traits: <<: [*trove_base_traits, *trove_common_traits] old_volume_size: type: int fields: payload.old_volume_size modify_at: type: datetime fields: payload.modify_at - event_type: trove.instance.modify_flavor traits: <<: [*trove_base_traits, *trove_common_traits] old_instance_size: type: int fields: payload.old_instance_size modify_at: type: datetime fields: payload.modify_at - event_type: trove.instance.delete traits: <<: [*trove_base_traits, *trove_common_traits] deleted_at: type: datetime fields: payload.deleted_at - event_type: trove.instance.exists traits: <<: *trove_base_traits display_name: fields: payload.display_name audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: profiler.* traits: project: fields: payload.project service: fields: payload.service name: fields: payload.name base_id: fields: payload.base_id trace_id: fields: payload.trace_id parent_id: fields: payload.parent_id timestamp: fields: payload.timestamp host: fields: payload.info.host path: fields: payload.info.request.path query: fields: payload.info.request.query method: fields: payload.info.request.method scheme: fields: payload.info.request.scheme db.statement: fields: payload.info.db.statement db.params: fields: payload.info.db.params - event_type: 'magnum.cluster.*' traits: &magnum_cluster_crud id: fields: payload.id typeURI: fields: payload.typeURI eventType: fields: payload.eventType eventTime: fields: payload.eventTime action: fields: payload.action outcome: fields: payload.outcome initiator_id: fields: payload.initiator.id initiator_typeURI: fields: payload.initiator.typeURI initiator_name: fields: payload.initiator.name initiator_host_agent: fields: payload.initiator.host.agent initiator_host_address: fields: payload.initiator.host.address target_id: fields: payload.target.id target_typeURI: fields: payload.target.typeURI observer_id: fields: payload.observer.id observer_typeURI: fields: payload.observer.typeURI - event_type: 'alarm.*' traits: id: fields: payload.alarm_id user_id: fields: payload.user_id project_id: fields: payload.project_id on_behalf_of: fields: payload.on_behalf_of severity: fields: payload.severity detail: fields: payload.detail type: fields: payload.type ceilometer-10.0.0/ceilometer/pipeline/data/event_pipeline.yaml0000666000175100017510000000030313236733243024510 0ustar zuulzuul00000000000000--- sources: - name: event_source events: - "*" sinks: - event_sink sinks: - name: event_sink transformers: publishers: - gnocchi:// ceilometer-10.0.0/ceilometer/pipeline/sample.py0000666000175100017510000002323413236733243021550 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from itertools import chain from oslo_log import log from stevedore import extension from ceilometer import agent from ceilometer.pipeline import base from ceilometer.publisher import utils as publisher_utils from ceilometer import sample as sample_util LOG = log.getLogger(__name__) class SampleEndpoint(base.MainNotificationEndpoint): def info(self, notifications): """Convert message at info level to Ceilometer sample. :param notifications: list of notifications """ return self.process_notifications('info', notifications) def sample(self, notifications): """Convert message at sample level to Ceilometer Event. :param notifications: list of notifications """ return self.process_notifications('sample', notifications) def process_notifications(self, priority, notifications): for message in notifications: try: with self.publisher as p: p(list(self.build_sample(message))) except Exception: LOG.error('Fail to process notification', exc_info=True) def build_sample(notification): """Build sample from provided notification.""" pass class InterimSampleEndpoint(base.NotificationEndpoint): def __init__(self, conf, publisher, pipe_name): self.event_types = [pipe_name] super(InterimSampleEndpoint, self).__init__(conf, publisher) def sample(self, notifications): return self.process_notifications('sample', notifications) def process_notifications(self, priority, notifications): samples = chain.from_iterable(m["payload"] for m in notifications) samples = [ sample_util.Sample(name=s['counter_name'], type=s['counter_type'], unit=s['counter_unit'], volume=s['counter_volume'], user_id=s['user_id'], project_id=s['project_id'], resource_id=s['resource_id'], timestamp=s['timestamp'], resource_metadata=s['resource_metadata'], source=s.get('source'), # NOTE(sileht): May come from an older node, # Put None in this case. monotonic_time=s.get('monotonic_time')) for s in samples if publisher_utils.verify_signature( s, self.conf.publisher.telemetry_secret) ] with self.publisher as p: p(samples) class SampleSource(base.PipelineSource): """Represents a source of samples. In effect it is a set of notification handlers processing samples for a set of matching meters. Each source encapsulates meter name matching and mapping to one or more sinks for publication. """ def __init__(self, cfg): super(SampleSource, self).__init__(cfg) try: self.meters = cfg['meters'] except KeyError: raise base.PipelineException("Missing meters value", cfg) try: self.check_source_filtering(self.meters, 'meters') except agent.SourceException as err: raise base.PipelineException(err.msg, cfg) def support_meter(self, meter_name): return self.is_supported(self.meters, meter_name) class SampleSink(base.Sink): def _transform_sample(self, start, sample): try: for transformer in self.transformers[start:]: sample = transformer.handle_sample(sample) if not sample: LOG.debug( "Pipeline %(pipeline)s: Sample dropped by " "transformer %(trans)s", {'pipeline': self, 'trans': transformer}) return return sample except Exception: LOG.error("Pipeline %(pipeline)s: Exit after error " "from transformer %(trans)s " "for %(smp)s" % {'pipeline': self, 'trans': transformer, 'smp': sample}, exc_info=True) def _publish_samples(self, start, samples): """Push samples into pipeline for publishing. :param start: The first transformer that the sample will be injected. This is mainly for flush() invocation that transformer may emit samples. :param samples: Sample list. """ transformed_samples = [] if not self.transformers: transformed_samples = samples else: for sample in samples: LOG.debug( "Pipeline %(pipeline)s: Transform sample " "%(smp)s from %(trans)s transformer", {'pipeline': self, 'smp': sample, 'trans': start}) sample = self._transform_sample(start, sample) if sample: transformed_samples.append(sample) if transformed_samples: for p in self.publishers: try: p.publish_samples(transformed_samples) except Exception: LOG.error("Pipeline %(pipeline)s: Continue after " "error from publisher %(pub)s" % {'pipeline': self, 'pub': p}, exc_info=True) def publish_samples(self, samples): self._publish_samples(0, samples) def flush(self): """Flush data after all samples have been injected to pipeline.""" for (i, transformer) in enumerate(self.transformers): try: self._publish_samples(i + 1, list(transformer.flush())) except Exception: LOG.error("Pipeline %(pipeline)s: Error " "flushing transformer %(trans)s" % {'pipeline': self, 'trans': transformer}, exc_info=True) class SamplePipeline(base.Pipeline): """Represents a pipeline for Samples.""" default_grouping_key = ['resource_id'] def _validate_volume(self, s): volume = s.volume if volume is None: LOG.warning( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has no volume (volume: None), the sample will' ' be dropped' % {'counter_name': s.name, 'resource_id': s.resource_id, 'timestamp': s.timestamp if s.timestamp else 'NO TIMESTAMP'} ) return False if not isinstance(volume, (int, float)): try: volume = float(volume) except ValueError: LOG.warning( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has volume which is not a number ' '(volume: %(counter_volume)s), the sample will be dropped' % {'counter_name': s.name, 'resource_id': s.resource_id, 'timestamp': ( s.timestamp if s.timestamp else 'NO TIMESTAMP'), 'counter_volume': volume} ) return False return True def publish_data(self, samples): if not isinstance(samples, list): samples = [samples] supported = [s for s in samples if self.supported(s) and self._validate_volume(s)] self.sink.publish_samples(supported) def serializer(self, sample): return publisher_utils.meter_message_from_counter( sample, self.conf.publisher.telemetry_secret) def supported(self, sample): return self.source.support_meter(sample.name) class SamplePipelineManager(base.PipelineManager): pm_type = 'sample' pm_pipeline = SamplePipeline pm_source = SampleSource pm_sink = SampleSink def __init__(self, conf, partition=False): super(SamplePipelineManager, self).__init__( conf, conf.pipeline_cfg_file, self.get_transform_manager(), partition) @staticmethod def get_transform_manager(): return extension.ExtensionManager('ceilometer.transformer') def get_main_endpoints(self): exts = extension.ExtensionManager( namespace='ceilometer.sample.endpoint', invoke_on_load=True, invoke_args=(self.conf, self.get_main_publisher())) return [ext.obj for ext in exts] def get_interim_endpoints(self): # FIXME(gordc): change this so we shard data rather than per # pipeline. this will allow us to use self.publisher and less # queues. return [InterimSampleEndpoint( self.conf, base.PublishContext([pipe]), pipe.name) for pipe in self.pipelines] ceilometer-10.0.0/ceilometer/pipeline/__init__.py0000666000175100017510000000000013236733243022010 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/pipeline/base.py0000666000175100017510000003744613236733243021213 0ustar zuulzuul00000000000000# # Copyright 2013 Intel Corp. # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log import oslo_messaging import six from ceilometer import agent from ceilometer import messaging from ceilometer import publisher OPTS = [ cfg.StrOpt('pipeline_cfg_file', default="pipeline.yaml", help="Configuration file for pipeline definition." ), cfg.StrOpt('event_pipeline_cfg_file', default="event_pipeline.yaml", help="Configuration file for event pipeline definition." ), ] LOG = log.getLogger(__name__) class PipelineException(agent.ConfigException): def __init__(self, message, cfg): super(PipelineException, self).__init__('Pipeline', message, cfg) class InterimPublishContext(object): """Publisher to hash/shard data to pipelines""" def __init__(self, conf, mgr): self.conf = conf self.mgr = mgr self.notifiers = self._get_notifiers(messaging.get_transport(conf)) def _get_notifiers(self, transport): notifiers = [] for x in range(self.conf.notification.pipeline_processing_queues): notifiers.append(oslo_messaging.Notifier( transport, driver=self.conf.publisher_notifier.telemetry_driver, topics=['-'.join( [self.mgr.NOTIFICATION_IPC, self.mgr.pm_type, str(x)])])) return notifiers @staticmethod def hash_grouping(datapoint, grouping_keys): # FIXME(gordc): this logic only supports a single grouping_key. we # need to change to support pipeline with multiple transformers and # different grouping_keys value = '' for key in grouping_keys or []: value += datapoint.get(key) if datapoint.get(key) else '' return hash(value) def __enter__(self): def p(data): data = [data] if not isinstance(data, list) else data for datapoint in data: for pipe in self.mgr.pipelines: if pipe.supported(datapoint): serialized_data = pipe.serializer(datapoint) key = (self.hash_grouping(serialized_data, pipe.get_grouping_key()) % len(self.notifiers)) self.notifiers[key].sample({}, event_type=pipe.name, payload=[serialized_data]) return p def __exit__(self, exc_type, exc_value, traceback): pass class PublishContext(object): def __init__(self, pipelines): self.pipelines = pipelines or [] def __enter__(self): def p(data): for p in self.pipelines: p.publish_data(data) return p def __exit__(self, exc_type, exc_value, traceback): for p in self.pipelines: p.flush() class PipelineSource(agent.Source): """Represents a source of samples or events.""" def __init__(self, cfg): try: super(PipelineSource, self).__init__(cfg) except agent.SourceException as err: raise PipelineException(err.msg, cfg) try: self.sinks = cfg['sinks'] except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) def check_sinks(self, sinks): if not self.sinks: raise PipelineException( "No sink defined in source %s" % self, self.cfg) for sink in self.sinks: if sink not in sinks: raise PipelineException( "Dangling sink %s from source %s" % (sink, self), self.cfg) class Sink(object): """Represents a sink for the transformation and publication of data. Each sink config is concerned *only* with the transformation rules and publication conduits for data. In effect, a sink describes a chain of handlers. The chain starts with zero or more transformers and ends with one or more publishers. The first transformer in the chain is passed data from the corresponding source, takes some action such as deriving rate of change, performing unit conversion, or aggregating, before passing the modified data to next step. The subsequent transformers, if any, handle the data similarly. At the end of the chain, publishers publish the data. The exact publishing method depends on publisher type, for example, pushing into data storage via the message bus providing guaranteed delivery, or for loss-tolerant data UDP may be used. If no transformers are included in the chain, the publishers are passed data directly from the sink which are published unchanged. """ def __init__(self, conf, cfg, transformer_manager, publisher_manager): self.conf = conf self.cfg = cfg try: self.name = cfg['name'] # It's legal to have no transformer specified self.transformer_cfg = cfg.get('transformers') or [] except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) if not cfg.get('publishers'): raise PipelineException("No publisher specified", cfg) self.publishers = [] for p in cfg['publishers']: if '://' not in p: # Support old format without URL p = p + "://" try: self.publishers.append(publisher_manager.get(p)) except Exception: LOG.error("Unable to load publisher %s", p, exc_info=True) self.multi_publish = True if len(self.publishers) > 1 else False self.transformers = self._setup_transformers(cfg, transformer_manager) def __str__(self): return self.name def _setup_transformers(self, cfg, transformer_manager): transformers = [] for transformer in self.transformer_cfg: parameter = transformer['parameters'] or {} try: ext = transformer_manager[transformer['name']] except KeyError: raise PipelineException( "No transformer named %s loaded" % transformer['name'], cfg) transformers.append(ext.plugin(**parameter)) LOG.info( "Pipeline %(pipeline)s: Setup transformer instance %(name)s " "with parameter %(param)s" % ({'pipeline': self, 'name': transformer['name'], 'param': parameter})) return transformers @staticmethod def flush(): """Flush data after all events have been injected to pipeline.""" @six.add_metaclass(abc.ABCMeta) class Pipeline(object): """Represents a coupling between a sink and a corresponding source.""" def __init__(self, conf, source, sink): self.conf = conf self.source = source self.sink = sink self.name = str(self) def __str__(self): return (self.source.name if self.source.name == self.sink.name else '%s:%s' % (self.source.name, self.sink.name)) def flush(self): self.sink.flush() @property def publishers(self): return self.sink.publishers @abc.abstractmethod def publish_data(self, data): """Publish data from pipeline.""" @abc.abstractproperty def default_grouping_key(self): """Attribute to hash data on. Pass if no partitioning.""" @abc.abstractmethod def supported(self, data): """Attribute to filter on. Pass if no partitioning.""" @abc.abstractmethod def serializer(self, data): """Serialize data for interim transport. Pass if no partitioning.""" def get_grouping_key(self): keys = [] for transformer in self.sink.transformers: keys += transformer.grouping_keys return list(set(keys)) or self.default_grouping_key class PublisherManager(object): def __init__(self, conf, purpose): self._loaded_publishers = {} self._conf = conf self._purpose = purpose def get(self, url): if url not in self._loaded_publishers: p = publisher.get_publisher( self._conf, url, 'ceilometer.%s.publisher' % self._purpose) self._loaded_publishers[url] = p return self._loaded_publishers[url] class PipelineManager(agent.ConfigManagerBase): """Pipeline Manager Pipeline manager sets up pipelines according to config file """ NOTIFICATION_IPC = 'ceilometer_ipc' def __init__(self, conf, cfg_file, transformer_manager, partition): """Setup the pipelines according to config. The configuration is supported as follows: Decoupled: the source and sink configuration are separately specified before being linked together. This allows source- specific configuration, such as meter handling, to be kept focused only on the fine-grained source while avoiding the necessity for wide duplication of sink-related config. The configuration is provided in the form of separate lists of dictionaries defining sources and sinks, for example: {"sources": [{"name": source_1, "meters" : ["meter_1", "meter_2"], "sinks" : ["sink_1", "sink_2"] }, {"name": source_2, "meters" : ["meter_3"], "sinks" : ["sink_2"] }, ], "sinks": [{"name": sink_1, "transformers": [ {"name": "Transformer_1", "parameters": {"p1": "value"}}, {"name": "Transformer_2", "parameters": {"p1": "value"}}, ], "publishers": ["publisher_1", "publisher_2"] }, {"name": sink_2, "publishers": ["publisher_3"] }, ] } Valid meter format is '*', '!meter_name', or 'meter_name'. '*' is wildcard symbol means any meters; '!meter_name' means "meter_name" will be excluded; 'meter_name' means 'meter_name' will be included. Valid meters definition is all "included meter names", all "excluded meter names", wildcard and "excluded meter names", or only wildcard. Transformer's name is plugin name in setup.cfg. Publisher's name is plugin name in setup.cfg """ super(PipelineManager, self).__init__(conf) cfg = self.load_config(cfg_file) self.pipelines = [] if not ('sources' in cfg and 'sinks' in cfg): raise PipelineException("Both sources & sinks are required", cfg) LOG.info('detected decoupled pipeline config format') publisher_manager = PublisherManager(self.conf, self.pm_type) unique_names = set() sources = [] for s in cfg.get('sources'): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) sources.append(self.pm_source(s)) unique_names.clear() sinks = {} for s in cfg.get('sinks'): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated sink names: %s" % name, self) else: unique_names.add(name) sinks[s['name']] = self.pm_sink(self.conf, s, transformer_manager, publisher_manager) unique_names.clear() for source in sources: source.check_sinks(sinks) for target in source.sinks: pipe = self.pm_pipeline(self.conf, source, sinks[target]) if pipe.name in unique_names: raise PipelineException( "Duplicate pipeline name: %s. Ensure pipeline" " names are unique. (name is the source and sink" " names combined)" % pipe.name, cfg) else: unique_names.add(pipe.name) self.pipelines.append(pipe) unique_names.clear() self.partition = partition @abc.abstractproperty def pm_type(self): """Pipeline manager type.""" @abc.abstractproperty def pm_pipeline(self): """Pipeline class""" @abc.abstractproperty def pm_source(self): """Pipeline source class""" @abc.abstractproperty def pm_sink(self): """Pipeline sink class""" def publisher(self): """Build publisher for pipeline publishing.""" return PublishContext(self.pipelines) def interim_publisher(self): """Build publishing context for IPC.""" return InterimPublishContext(self.conf, self) def get_main_publisher(self): """Return the publishing context to use""" return (self.interim_publisher() if self.partition else self.publisher()) def get_main_endpoints(self): """Return endpoints for main queue.""" pass def get_interim_endpoints(self): """Return endpoints for interim pipeline queues.""" pass class NotificationEndpoint(object): """Base Endpoint for plugins that support the notification API.""" event_types = [] """List of strings to filter messages on.""" def __init__(self, conf, publisher): super(NotificationEndpoint, self).__init__() # NOTE(gordc): this is filter rule used by oslo.messaging to dispatch # messages to an endpoint. if self.event_types: self.filter_rule = oslo_messaging.NotificationFilter( event_type='|'.join(self.event_types)) self.conf = conf self.publisher = publisher @abc.abstractmethod def process_notifications(self, priority, notifications): """Return a sequence of Counter instances for the given message. :param message: Message to process. """ @classmethod def _consume_and_drop(cls, notifications): """RPC endpoint for useless notification level""" # NOTE(sileht): nothing special todo here, but because we listen # for the generic notification exchange we have to consume all its # queues class MainNotificationEndpoint(NotificationEndpoint): """Listens to queues on all priority levels and clears by default.""" audit = NotificationEndpoint._consume_and_drop critical = NotificationEndpoint._consume_and_drop debug = NotificationEndpoint._consume_and_drop error = NotificationEndpoint._consume_and_drop info = NotificationEndpoint._consume_and_drop sample = NotificationEndpoint._consume_and_drop warn = NotificationEndpoint._consume_and_drop ceilometer-10.0.0/ceilometer/pipeline/event.py0000666000175100017510000001474313236733243021415 0ustar zuulzuul00000000000000# Copyright 2012-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from itertools import chain from oslo_log import log import oslo_messaging from oslo_utils import timeutils from stevedore import extension from ceilometer import agent from ceilometer.event import converter from ceilometer.event import models from ceilometer.pipeline import base from ceilometer.publisher import utils as publisher_utils LOG = log.getLogger(__name__) class EventEndpoint(base.MainNotificationEndpoint): event_types = [] def __init__(self, conf, publisher): super(EventEndpoint, self).__init__(conf, publisher) LOG.debug('Loading event definitions') self.event_converter = converter.setup_events( conf, extension.ExtensionManager( namespace='ceilometer.event.trait_plugin')) def info(self, notifications): """Convert message at info level to Ceilometer Event. :param notifications: list of notifications """ return self.process_notifications('info', notifications) def error(self, notifications): """Convert message at error level to Ceilometer Event. :param notifications: list of notifications """ return self.process_notifications('error', notifications) def process_notifications(self, priority, notifications): for message in notifications: try: event = self.event_converter.to_event(priority, message) if event is not None: with self.publisher as p: p(event) except Exception: if not self.conf.notification.ack_on_event_error: return oslo_messaging.NotificationResult.REQUEUE LOG.error('Fail to process a notification', exc_info=True) return oslo_messaging.NotificationResult.HANDLED class InterimEventEndpoint(base.NotificationEndpoint): def __init__(self, conf, publisher, pipe_name): self.event_types = [pipe_name] super(InterimEventEndpoint, self).__init__(conf, publisher) def sample(self, notifications): return self.process_notifications('sample', notifications) def process_notifications(self, priority, notifications): events = chain.from_iterable(m["payload"] for m in notifications) events = [ models.Event( message_id=ev['message_id'], event_type=ev['event_type'], generated=timeutils.normalize_time( timeutils.parse_isotime(ev['generated'])), traits=[models.Trait(name, dtype, models.Trait.convert_value(dtype, value)) for name, dtype, value in ev['traits']], raw=ev.get('raw', {})) for ev in events if publisher_utils.verify_signature( ev, self.conf.publisher.telemetry_secret) ] try: with self.publisher as p: p(events) except Exception: if not self.conf.notification.ack_on_event_error: return oslo_messaging.NotificationResult.REQUEUE raise return oslo_messaging.NotificationResult.HANDLED class EventSource(base.PipelineSource): """Represents a source of events. In effect it is a set of notification handlers capturing events for a set of matching notifications. """ def __init__(self, cfg): super(EventSource, self).__init__(cfg) self.events = cfg.get('events') try: self.check_source_filtering(self.events, 'events') except agent.SourceException as err: raise base.PipelineException(err.msg, cfg) def support_event(self, event_name): return self.is_supported(self.events, event_name) class EventSink(base.Sink): def publish_events(self, events): if events: for p in self.publishers: try: p.publish_events(events) except Exception: LOG.error("Pipeline %(pipeline)s: %(status)s " "after error from publisher %(pub)s" % {'pipeline': self, 'status': 'Continue' if self.multi_publish else 'Exit', 'pub': p}, exc_info=True) if not self.multi_publish: raise class EventPipeline(base.Pipeline): """Represents a pipeline for Events.""" default_grouping_key = ['event_type'] def __str__(self): # NOTE(gordc): prepend a namespace so we ensure event and sample # pipelines do not have the same name. return 'event:%s' % super(EventPipeline, self).__str__() def publish_data(self, events): if not isinstance(events, list): events = [events] supported = [e for e in events if self.supported(e)] self.sink.publish_events(supported) def serializer(self, event): return publisher_utils.message_from_event( event, self.conf.publisher.telemetry_secret) def supported(self, event): return self.source.support_event(event.event_type) class EventPipelineManager(base.PipelineManager): pm_type = 'event' pm_pipeline = EventPipeline pm_source = EventSource pm_sink = EventSink def __init__(self, conf, partition=False): super(EventPipelineManager, self).__init__( conf, conf.event_pipeline_cfg_file, {}, partition) def get_main_endpoints(self): return [EventEndpoint(self.conf, self.get_main_publisher())] def get_interim_endpoints(self): # FIXME(gordc): change this so we shard data rather than per # pipeline. this will allow us to use self.publisher and less # queues. return [InterimEventEndpoint( self.conf, base.PublishContext([pipe]), pipe.name) for pipe in self.pipelines] ceilometer-10.0.0/ceilometer/version.py0000666000175100017510000000121113236733243020136 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('ceilometer') ceilometer-10.0.0/ceilometer/opts.py0000666000175100017510000001454313236733243017452 0ustar zuulzuul00000000000000# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import socket from keystoneauth1 import loading from oslo_config import cfg import ceilometer.compute.discovery import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.utils import ceilometer.compute.virt.vmware.inspector import ceilometer.compute.virt.xenapi.inspector import ceilometer.event.converter import ceilometer.hardware.discovery import ceilometer.hardware.pollsters.generic import ceilometer.image.discovery import ceilometer.ipmi.platform.intel_node_manager import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.middleware import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline.base import ceilometer.polling.manager import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.utils import ceilometer.volume.discovery OPTS = [ cfg.HostAddressOpt('host', default=socket.gethostname(), sample_default='', help='Name of this node, which must be valid in an ' 'AMQP key. Can be an opaque identifier. For ZeroMQ ' 'only, must be a valid host name, FQDN, or IP ' 'address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests. Set it to None to ' 'disable timeout.'), cfg.IntOpt('max_parallel_requests', default=64, min=1, help='Maximum number of parallel requests for ' 'services to handle at the same time.'), ] def list_opts(): # FIXME(sileht): readd pollster namespaces in the generated configfile # This have been removed due to a recursive import issue return [ ('DEFAULT', itertools.chain(ceilometer.polling.manager.OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.utils.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.base.OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, OPTS)), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', [ cfg.StrOpt( 'backend_url', help='The backend URL to use for distributed coordination. If ' 'left empty, per-deployment central agent and per-host ' 'compute agent won\'t do workload ' 'partitioning and will only function correctly if a ' 'single instance of that service is running.'), cfg.FloatOpt( 'check_watchers', default=10.0, help='Number of seconds between checks to see if group ' 'membership has changed'), ]), ('dispatcher_gnocchi', ( cfg.StrOpt( 'filter_project', deprecated_for_removal=True, default='gnocchi', help='Gnocchi project used to filter out samples ' 'generated by Gnocchi service activity'), cfg.StrOpt( 'archive_policy', deprecated_for_removal=True, help='The archive policy to use when the dispatcher ' 'create a new metric.'), cfg.StrOpt( 'resources_definition_file', deprecated_for_removal=True, default='gnocchi_resources.yaml', help=('The Yaml file that defines mapping between samples ' 'and gnocchi resources/metrics')), cfg.FloatOpt( 'request_timeout', default=6.05, min=0.0, deprecated_for_removal=True, help='Number of seconds before request to gnocchi times out'))), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, ceilometer.hardware.pollsters.generic.OPTS)), ('ipmi', itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, ceilometer.ipmi.pollsters.OPTS)), ('meter', ceilometer.meter.notifications.OPTS), ('notification', itertools.chain(ceilometer.notification.OPTS, ceilometer.notification.EXCHANGES_OPTS)), ('polling', ceilometer.polling.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), ('service_types', itertools.chain(ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)), ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] def list_keystoneauth_opts(): # NOTE(sileht): the configuration file contains only the options # for the password plugin that handles keystone v2 and v3 API # with discovery. But other options are possible. return [('service_credentials', itertools.chain( loading.get_auth_common_conf_options(), loading.get_auth_plugin_conf_options('password'), ceilometer.keystone_client.CLI_OPTS ))] ceilometer-10.0.0/ceilometer/neutron_client.py0000666000175100017510000003747413236733243021525 0ustar zuulzuul00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutronclient.common import exceptions from neutronclient.v2_0 import client as clientv20 from oslo_config import cfg from oslo_log import log from ceilometer import keystone_client SERVICE_OPTS = [ cfg.StrOpt('neutron', default='network', help='Neutron service type.'), cfg.StrOpt('neutron_lbaas_version', default='v2', choices=('v1', 'v2'), help='Neutron load balancer version.') ] LOG = log.getLogger(__name__) def logged(func): @functools.wraps(func) def with_logging(*args, **kwargs): try: return func(*args, **kwargs) except exceptions.NeutronClientException as e: if e.status_code == 404: LOG.warning("The resource could not be found.") else: LOG.warning(e) return [] except Exception as e: LOG.exception(e) raise return with_logging class Client(object): """A client which gets information via python-neutronclient.""" def __init__(self, conf): creds = conf.service_credentials params = { 'session': keystone_client.get_session(conf), 'endpoint_type': creds.interface, 'region_name': creds.region_name, 'service_type': conf.service_types.neutron, } self.client = clientv20.Client(**params) self.lb_version = conf.service_types.neutron_lbaas_version @logged def port_get_all(self): resp = self.client.list_ports() return resp.get('ports') @logged def vip_get_all(self): resp = self.client.list_vips() return resp.get('vips') @logged def pool_get_all(self): resources = [] if self.lb_version == 'v1': resp = self.client.list_pools() resources = resp.get('pools') elif self.lb_version == 'v2': resources = self.list_pools_v2() return resources @logged def member_get_all(self): resources = [] if self.lb_version == 'v1': resp = self.client.list_members() resources = resp.get('members') elif self.lb_version == 'v2': resources = self.list_members_v2() return resources @logged def health_monitor_get_all(self): resources = [] if self.lb_version == 'v1': resp = self.client.list_health_monitors() resources = resp.get('health_monitors') elif self.lb_version == 'v2': resources = self.list_health_monitors_v2() return resources @logged def pool_stats(self, pool): return self.client.retrieve_pool_stats(pool) @logged def vpn_get_all(self): resp = self.client.list_vpnservices() return resp.get('vpnservices') @logged def ipsec_site_connections_get_all(self): resp = self.client.list_ipsec_site_connections() return resp.get('ipsec_site_connections') @logged def firewall_get_all(self): resp = self.client.list_firewalls() return resp.get('firewalls') @logged def fw_policy_get_all(self): resp = self.client.list_firewall_policies() return resp.get('firewall_policies') @logged def fip_get_all(self): fips = self.client.list_floatingips()['floatingips'] return fips @logged def list_pools_v2(self): """This method is used to get the pools list. This method uses Load Balancer v2_0 API to achieve the detailed list of the pools. :returns: The list of the pool resources """ pool_status = dict() resp = self.client.list_lbaas_pools() temp_pools = resp.get('pools') resources = [] pool_listener_dict = self._get_pool_and_listener_ids(temp_pools) for k, v in pool_listener_dict.items(): loadbalancer_id = self._get_loadbalancer_id_with_listener_id(v) status = self._get_pool_status(loadbalancer_id, v) for k, v in status.items(): pool_status[k] = v for pool in temp_pools: pool_id = pool.get('id') pool['status'] = pool_status[pool_id] pool['lb_method'] = pool.get('lb_algorithm') pool['status_description'] = pool['status'] # Based on the LBaaSv2 design, the properties 'vip_id' # and 'subnet_id' should belong to the loadbalancer resource and # not to the pool resource. However, because we don't want to # change the metadata of the pool resource this release, # we set them to empty values manually. pool['provider'] = '' pool['vip_id'] = '' pool['subnet_id'] = '' resources.append(pool) return resources @logged def list_members_v2(self): """Method is used to list the members info. This method is used to get the detailed list of the members with Load Balancer v2_0 API :returns: The list of the member resources """ resources = [] pools = self.client.list_lbaas_pools().get('pools') for pool in pools: pool_id = pool.get('id') listeners = pool.get('listeners') if not listeners: continue # NOTE(sileht): Can we have more than 1 listener listener_id = listeners[0].get('id') lb_id = self._get_loadbalancer_id_with_listener_id(listener_id) status = self._get_member_status(lb_id, [listener_id, pool_id]) resp = self.client.list_lbaas_members(pool_id) temp_members = resp.get('members') for member in temp_members: member['status'] = status[member.get('id')] member['pool_id'] = pool_id member['status_description'] = member['status'] resources.append(member) return resources @logged def list_health_monitors_v2(self): """Method is used to list the health monitors This method is used to get the detailed list of the health monitors with Load Balancer v2_0 :returns: The list of the health monitor resources """ resp = self.client.list_lbaas_healthmonitors() resources = resp.get('healthmonitors') return resources def _get_pool_and_listener_ids(self, pools): """Method is used to get the mapping between pool and listener This method is used to get the pool ids and listener ids from the pool list. :param pools: The list of the polls :returns: The relationship between pool and listener. It's a dictionary type. The key of this dict is the id of pool and the value of it is the id of the first listener which the pool belongs to """ pool_listener_dict = dict() for pool in pools: key = pool.get("id") value = pool.get('listeners')[0].get('id') pool_listener_dict[key] = value return pool_listener_dict def _retrieve_loadbalancer_status_tree(self, loadbalancer_id): """Method is used to get the status of a LB. This method is used to get the status tree of a specific Load Balancer. :param loadbalancer_id: The ID of the specific Load Balancer. :returns: The status of the specific Load Balancer. It consists of the load balancer and all of its children's provisioning and operating statuses """ lb_status_tree = self.client.retrieve_loadbalancer_status( loadbalancer_id) return lb_status_tree def _get_loadbalancer_id_with_listener_id(self, listener_id): """This method is used to get the loadbalancer id. :param listener_id: The ID of the listener :returns: The ID of the Loadbalancer """ listener = self.client.show_listener(listener_id) listener_lbs = listener.get('listener').get('loadbalancers') loadbalancer_id = listener_lbs[0].get('id') return loadbalancer_id def _get_member_status(self, loadbalancer_id, parent_id): """Method used to get the status of member resource. This method is used to get the status of member resource belonged to the specific Load Balancer. :param loadbalancer_id: The ID of the Load Balancer. :param parent_id: The parent ID list of the member resource. For the member resource, the parent_id should be [listener_id, pool_id]. :returns: The status dictionary of the member resource. The key is the ID of the member. The value is the operating status of the member resource. """ # FIXME(liamji) the following meters are experimental and # may generate a large load against neutron api. The future # enhancements can be tracked against: # https://review.openstack.org/#/c/218560. # After it has been merged and the neutron client supports # with the corresponding apis, will change to use the new # method to get the status of the members. resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) status_tree = resp.get('statuses').get('loadbalancer') status_dict = dict() listeners_status = status_tree.get('listeners') for listener_status in listeners_status: listener_id = listener_status.get('id') if listener_id == parent_id[0]: pools_status = listener_status.get('pools') for pool_status in pools_status: if pool_status.get('id') == parent_id[1]: members_status = pool_status.get('members') for member_status in members_status: key = member_status.get('id') # If the item has no the property 'id', skip # it. if key is None: continue # The situation that the property # 'operating_status' is none is handled in # the method get_sample() in lbaas.py. value = member_status.get('operating_status') status_dict[key] = value break break return status_dict def _get_listener_status(self, loadbalancer_id): """Method used to get the status of the listener resource. This method is used to get the status of the listener resources belonged to the specific Load Balancer. :param loadbalancer_id: The ID of the Load Balancer. :returns: The status dictionary of the listener resource. The key is the ID of the listener resource. The value is the operating status of the listener resource. """ # FIXME(liamji) the following meters are experimental and # may generate a large load against neutron api. The future # enhancements can be tracked against: # https://review.openstack.org/#/c/218560. # After it has been merged and the neutron client supports # with the corresponding apis, will change to use the new # method to get the status of the listeners. resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) status_tree = resp.get('statuses').get('loadbalancer') status_dict = dict() listeners_status = status_tree.get('listeners') for listener_status in listeners_status: key = listener_status.get('id') # If the item has no the property 'id', skip # it. if key is None: continue # The situation that the property # 'operating_status' is none is handled in # the method get_sample() in lbaas.py. value = listener_status.get('operating_status') status_dict[key] = value return status_dict def _get_pool_status(self, loadbalancer_id, parent_id): """Method used to get the status of pool resource. This method is used to get the status of the pool resources belonged to the specific Load Balancer. :param loadbalancer_id: The ID of the Load Balancer. :param parent_id: The parent ID of the pool resource. :returns: The status dictionary of the pool resource. The key is the ID of the pool resource. The value is the operating status of the pool resource. """ # FIXME(liamji) the following meters are experimental and # may generate a large load against neutron api. The future # enhancements can be tracked against: # https://review.openstack.org/#/c/218560. # After it has been merged and the neutron client supports # with the corresponding apis, will change to use the new # method to get the status of the pools. resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) status_tree = resp.get('statuses').get('loadbalancer') status_dict = dict() listeners_status = status_tree.get('listeners') for listener_status in listeners_status: listener_id = listener_status.get('id') if listener_id == parent_id: pools_status = listener_status.get('pools') for pool_status in pools_status: key = pool_status.get('id') # If the item has no the property 'id', skip # it. if key is None: continue # The situation that the property # 'operating_status' is none is handled in # the method get_sample() in lbaas.py. value = pool_status.get('operating_status') status_dict[key] = value break return status_dict @logged def list_listener(self): """This method is used to get the list of the listeners.""" resources = [] if self.lb_version == 'v2': # list_listeners works only with lbaas v2 extension resp = self.client.list_listeners() resources = resp.get('listeners') for listener in resources: loadbalancer_id = listener.get('loadbalancers')[0].get('id') status = self._get_listener_status(loadbalancer_id) listener['operating_status'] = status[listener.get('id')] return resources @logged def list_loadbalancer(self): """This method is used to get the list of the loadbalancers.""" resources = [] if self.lb_version == 'v2': # list_loadbalancers works only with lbaas v2 extension resp = self.client.list_loadbalancers() resources = resp.get('loadbalancers') return resources @logged def get_loadbalancer_stats(self, loadbalancer_id): """This method is used to get the statistics of the loadbalancer. :param loadbalancer_id: the ID of the specified loadbalancer """ resp = self.client.retrieve_loadbalancer_stats(loadbalancer_id) resource = resp.get('stats') return resource ceilometer-10.0.0/ceilometer/__init__.py0000666000175100017510000000117013236733243020214 0ustar zuulzuul00000000000000# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class NotImplementedError(NotImplementedError): pass ceilometer-10.0.0/ceilometer/utils.py0000666000175100017510000000335513236733243017624 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import threading from oslo_concurrency import processutils from oslo_config import cfg ROOTWRAP_CONF = "/etc/ceilometer/rootwrap.conf" OPTS = [ cfg.StrOpt('rootwrap_config', default=ROOTWRAP_CONF, help='Path to the rootwrap configuration file to ' 'use for running commands as root'), ] def _get_root_helper(): global ROOTWRAP_CONF return 'sudo ceilometer-rootwrap %s' % ROOTWRAP_CONF def setup_root_helper(conf): global ROOTWRAP_CONF ROOTWRAP_CONF = conf.rootwrap_config def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() method.""" if 'run_as_root' in kwargs and 'root_helper' not in kwargs: kwargs['root_helper'] = _get_root_helper() return processutils.execute(*cmd, **kwargs) def spawn_thread(target, *args, **kwargs): t = threading.Thread(target=target, args=args, kwargs=kwargs) t.daemon = True t.start() return t ceilometer-10.0.0/ceilometer/network/0000775000175100017510000000000013236733440017572 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/network/services/0000775000175100017510000000000013236733440021415 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/network/services/vpnaas.py0000666000175100017510000000620613236733243023266 0ustar zuulzuul00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import sample LOG = log.getLogger(__name__) class VPNServicesPollster(base.BaseServicesPollster): """Pollster to capture VPN status samples.""" FIELDS = ['admin_state_up', 'description', 'name', 'status', 'subnet_id', 'router_id' ] @property def default_discovery(self): return 'vpn_services' def get_samples(self, manager, cache, resources): resources = resources or [] for vpn in resources: LOG.debug("VPN : %s" % vpn) status = self.get_status_id(vpn['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on vpn " "%(id)s, skipping sample") % {'stat': vpn['status'], 'id': vpn['id']}) continue yield sample.Sample( name='network.services.vpn', type=sample.TYPE_GAUGE, unit='vpnservice', volume=status, user_id=None, project_id=vpn['tenant_id'], resource_id=vpn['id'], resource_metadata=self.extract_metadata(vpn) ) class IPSecConnectionsPollster(base.BaseServicesPollster): """Pollster to capture vpn ipsec connections status samples.""" FIELDS = ['name', 'description', 'peer_address', 'peer_id', 'peer_cidrs', 'psk', 'initiator', 'ikepolicy_id', 'dpd', 'ipsecpolicy_id', 'vpnservice_id', 'mtu', 'admin_state_up', 'status', 'tenant_id' ] @property def default_discovery(self): return 'ipsec_connections' def get_samples(self, manager, cache, resources): resources = resources or [] for conn in resources: LOG.debug("IPSec Connection Info: %s" % conn) yield sample.Sample( name='network.services.vpn.connections', type=sample.TYPE_GAUGE, unit='ipsec_site_connection', volume=1, user_id=None, project_id=conn['tenant_id'], resource_id=conn['id'], resource_metadata=self.extract_metadata(conn) ) ceilometer-10.0.0/ceilometer/network/services/lbaas.py0000666000175100017510000003525513236733243023066 0ustar zuulzuul00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections from oslo_log import log import six from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import neutron_client from ceilometer import sample LOG = log.getLogger(__name__) LBStatsData = collections.namedtuple( 'LBStats', ['active_connections', 'total_connections', 'bytes_in', 'bytes_out'] ) LOAD_BALANCER_STATUS_V2 = { 'offline': 0, 'online': 1, 'no_monitor': 3, 'error': 4, 'degraded': 5, 'disabled': 6 } class BaseLBPollster(base.BaseServicesPollster): """Base Class for Load Balancer pollster""" def __init__(self, conf): super(BaseLBPollster, self).__init__(conf) self.lb_version = self.conf.service_types.neutron_lbaas_version def get_load_balancer_status_id(self, value): if self.lb_version == 'v1': resource_status = self.get_status_id(value) elif self.lb_version == 'v2': status = value.lower() resource_status = LOAD_BALANCER_STATUS_V2.get(status, -1) return resource_status class LBPoolPollster(BaseLBPollster): """Pollster to capture Load Balancer pool status samples.""" FIELDS = ['admin_state_up', 'description', 'lb_method', 'name', 'protocol', 'provider', 'status', 'status_description', 'subnet_id', 'vip_id' ] @property def default_discovery(self): return 'lb_pools' def get_samples(self, manager, cache, resources): resources = resources or [] for pool in resources: LOG.debug("Load Balancer Pool : %s" % pool) status = self.get_load_balancer_status_id(pool['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on pool " "%(id)s, skipping sample") % {'stat': pool['status'], 'id': pool['id']}) continue yield sample.Sample( name='network.services.lb.pool', type=sample.TYPE_GAUGE, unit='pool', volume=status, user_id=None, project_id=pool['tenant_id'], resource_id=pool['id'], resource_metadata=self.extract_metadata(pool) ) class LBVipPollster(base.BaseServicesPollster): """Pollster to capture Load Balancer Vip status samples.""" FIELDS = ['admin_state_up', 'address', 'connection_limit', 'description', 'name', 'pool_id', 'port_id', 'protocol', 'protocol_port', 'status', 'status_description', 'subnet_id', 'session_persistence', ] @property def default_discovery(self): return 'lb_vips' def get_samples(self, manager, cache, resources): resources = resources or [] for vip in resources: LOG.debug("Load Balancer Vip : %s" % vip) status = self.get_status_id(vip['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on vip " "%(id)s, skipping sample") % {'stat': vip['status'], 'id': vip['id']}) continue yield sample.Sample( name='network.services.lb.vip', type=sample.TYPE_GAUGE, unit='vip', volume=status, user_id=None, project_id=vip['tenant_id'], resource_id=vip['id'], resource_metadata=self.extract_metadata(vip) ) class LBMemberPollster(BaseLBPollster): """Pollster to capture Load Balancer Member status samples.""" FIELDS = ['admin_state_up', 'address', 'pool_id', 'protocol_port', 'status', 'status_description', 'weight', ] @property def default_discovery(self): return 'lb_members' def get_samples(self, manager, cache, resources): resources = resources or [] for member in resources: LOG.debug("Load Balancer Member : %s" % member) status = self.get_load_balancer_status_id(member['status']) if status == -1: LOG.warning(_("Unknown status %(stat)s received on member " "%(id)s, skipping sample") % {'stat': member['status'], 'id': member['id']}) continue yield sample.Sample( name='network.services.lb.member', type=sample.TYPE_GAUGE, unit='member', volume=status, user_id=None, project_id=member['tenant_id'], resource_id=member['id'], resource_metadata=self.extract_metadata(member) ) class LBHealthMonitorPollster(base.BaseServicesPollster): """Pollster to capture Load Balancer Health probes status samples.""" FIELDS = ['admin_state_up', 'delay', 'max_retries', 'pools', 'timeout', 'type' ] @property def default_discovery(self): return 'lb_health_probes' def get_samples(self, manager, cache, resources): for probe in resources: LOG.debug("Load Balancer Health probe : %s" % probe) yield sample.Sample( name='network.services.lb.health_monitor', type=sample.TYPE_GAUGE, unit='health_monitor', volume=1, user_id=None, project_id=probe['tenant_id'], resource_id=probe['id'], resource_metadata=self.extract_metadata(probe) ) @six.add_metaclass(abc.ABCMeta) class _LBStatsPollster(base.BaseServicesPollster): """Base Statistics pollster. It is capturing the statistics info and yielding samples for connections and bandwidth. """ def __init__(self, conf): super(_LBStatsPollster, self).__init__(conf) self.client = neutron_client.Client(self.conf) self.lb_version = self.conf.service_types.neutron_lbaas_version @staticmethod def make_sample_from_pool(pool, name, type, unit, volume, resource_metadata=None): if not resource_metadata: resource_metadata = {} return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=None, project_id=pool['tenant_id'], resource_id=pool['id'], resource_metadata=resource_metadata, ) def _populate_stats_cache(self, pool_id, cache): i_cache = cache.setdefault("lbstats", {}) if pool_id not in i_cache: stats = self.client.pool_stats(pool_id)['stats'] i_cache[pool_id] = LBStatsData( active_connections=stats['active_connections'], total_connections=stats['total_connections'], bytes_in=stats['bytes_in'], bytes_out=stats['bytes_out'], ) return i_cache[pool_id] def _populate_stats_cache_v2(self, loadbalancer_id, cache): i_cache = cache.setdefault("lbstats", {}) if loadbalancer_id not in i_cache: stats = self.client.get_loadbalancer_stats(loadbalancer_id) i_cache[loadbalancer_id] = LBStatsData( active_connections=stats['active_connections'], total_connections=stats['total_connections'], bytes_in=stats['bytes_in'], bytes_out=stats['bytes_out'], ) return i_cache[loadbalancer_id] @property def default_discovery(self): discovery_resource = 'lb_pools' if self.lb_version == 'v2': discovery_resource = 'lb_loadbalancers' return discovery_resource @abc.abstractmethod def _get_sample(pool, c_data): """Return one Sample.""" def get_samples(self, manager, cache, resources): if self.lb_version == 'v1': for pool in resources: try: c_data = self._populate_stats_cache(pool['id'], cache) yield self._get_sample(pool, c_data) except Exception: LOG.exception('Ignoring pool %(pool_id)s', {'pool_id': pool['id']}) elif self.lb_version == 'v2': for loadbalancer in resources: try: c_data = self._populate_stats_cache_v2(loadbalancer['id'], cache) yield self._get_sample(loadbalancer, c_data) except Exception: LOG.exception( 'Ignoring loadbalancer %(loadbalancer_id)s', {'loadbalancer_id': loadbalancer['id']}) class LBActiveConnectionsPollster(_LBStatsPollster): """Pollster to capture Active Load Balancer connections.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.active.connections', type=sample.TYPE_GAUGE, unit='connection', volume=data.active_connections, ) class LBTotalConnectionsPollster(_LBStatsPollster): """Pollster to capture Total Load Balancer connections.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.total.connections', type=sample.TYPE_CUMULATIVE, unit='connection', volume=data.total_connections, ) class LBBytesInPollster(_LBStatsPollster): """Pollster to capture incoming bytes.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.incoming.bytes', type=sample.TYPE_GAUGE, unit='B', volume=data.bytes_in, ) class LBBytesOutPollster(_LBStatsPollster): """Pollster to capture outgoing bytes.""" @staticmethod def _get_sample(pool, data): return make_sample_from_pool( pool, name='network.services.lb.outgoing.bytes', type=sample.TYPE_GAUGE, unit='B', volume=data.bytes_out, ) def make_sample_from_pool(pool, name, type, unit, volume, resource_metadata=None): resource_metadata = resource_metadata or {} return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=None, project_id=pool['tenant_id'], resource_id=pool['id'], resource_metadata=resource_metadata, ) class LBListenerPollster(BaseLBPollster): """Pollster to capture Load Balancer Listener status samples.""" FIELDS = ['admin_state_up', 'connection_limit', 'description', 'name', 'default_pool_id', 'protocol', 'protocol_port', 'operating_status', 'loadbalancers' ] @property def default_discovery(self): return 'lb_listeners' def get_samples(self, manager, cache, resources): resources = resources or [] for listener in resources: LOG.debug("Load Balancer Listener : %s" % listener) status = self.get_load_balancer_status_id( listener['operating_status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on listener " "%(id)s, skipping sample") % {'stat': listener['operating_status'], 'id': listener['id']}) continue yield sample.Sample( name='network.services.lb.listener', type=sample.TYPE_GAUGE, unit='listener', volume=status, user_id=None, project_id=listener['tenant_id'], resource_id=listener['id'], resource_metadata=self.extract_metadata(listener) ) class LBLoadBalancerPollster(BaseLBPollster): """Pollster to capture Load Balancer status samples.""" FIELDS = ['admin_state_up', 'description', 'vip_address', 'listeners', 'name', 'vip_subnet_id', 'operating_status', ] @property def default_discovery(self): return 'lb_loadbalancers' def get_samples(self, manager, cache, resources): resources = resources or [] for loadbalancer in resources: LOG.debug("Load Balancer: %s" % loadbalancer) status = self.get_load_balancer_status_id( loadbalancer['operating_status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received " "on Load Balancer " "%(id)s, skipping sample") % {'stat': loadbalancer['operating_status'], 'id': loadbalancer['id']}) continue yield sample.Sample( name='network.services.lb.loadbalancer', type=sample.TYPE_GAUGE, unit='loadbalancer', volume=status, user_id=None, project_id=loadbalancer['tenant_id'], resource_id=loadbalancer['id'], resource_metadata=self.extract_metadata(loadbalancer) ) ceilometer-10.0.0/ceilometer/network/services/discovery.py0000666000175100017510000000736313236733243024012 0ustar zuulzuul00000000000000# # Copyright (c) 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer import neutron_client from ceilometer.polling import plugin_base class _BaseServicesDiscovery(plugin_base.DiscoveryBase): KEYSTONE_REQUIRED_FOR_SERVICE = 'neutron' def __init__(self, conf): super(_BaseServicesDiscovery, self).__init__(conf) self.neutron_cli = neutron_client.Client(conf) class LBPoolsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" pools = self.neutron_cli.pool_get_all() return [i for i in pools if i.get('status') != 'error'] class LBVipsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" vips = self.neutron_cli.vip_get_all() return [i for i in vips if i.get('status', None) != 'error'] class LBMembersDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" members = self.neutron_cli.member_get_all() return [i for i in members if i.get('status', None) != 'error'] class LBListenersDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover load balancer listener resources to monitor.""" listeners = self.neutron_cli.list_listener() return [i for i in listeners if i.get('operating_status', None) != 'error'] class LBLoadBalancersDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover load balancer resources to monitor.""" loadbalancers = self.neutron_cli.list_loadbalancer() return [i for i in loadbalancers if i.get('operating_status', None) != 'error'] class LBHealthMonitorsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" probes = self.neutron_cli.health_monitor_get_all() return probes class VPNServicesDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" vpnservices = self.neutron_cli.vpn_get_all() return [i for i in vpnservices if i.get('status', None) != 'error'] class IPSecConnectionsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" conns = self.neutron_cli.ipsec_site_connections_get_all() return conns class FirewallDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" fw = self.neutron_cli.firewall_get_all() return [i for i in fw if i.get('status', None) != 'error'] class FirewallPolicyDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" return self.neutron_cli.fw_policy_get_all() class FloatingIPDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover floating IP resources to monitor.""" return self.neutron_cli.fip_get_all() ceilometer-10.0.0/ceilometer/network/services/fwaas.py0000666000175100017510000000554713236733243023106 0ustar zuulzuul00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import sample LOG = log.getLogger(__name__) class FirewallPollster(base.BaseServicesPollster): """Pollster to capture firewalls status samples.""" FIELDS = ['admin_state_up', 'description', 'name', 'status', 'firewall_policy_id', ] @property def default_discovery(self): return 'fw_services' def get_samples(self, manager, cache, resources): resources = resources or [] for fw in resources: LOG.debug("Firewall : %s" % fw) status = self.get_status_id(fw['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on fw %(id)s," "skipping sample") % {'stat': fw['status'], 'id': fw['id']}) continue yield sample.Sample( name='network.services.firewall', type=sample.TYPE_GAUGE, unit='firewall', volume=status, user_id=None, project_id=fw['tenant_id'], resource_id=fw['id'], resource_metadata=self.extract_metadata(fw) ) class FirewallPolicyPollster(base.BaseServicesPollster): """Pollster to capture firewall policy samples.""" FIELDS = ['name', 'description', 'name', 'firewall_rules', 'shared', 'audited', ] @property def default_discovery(self): return 'fw_policy' def get_samples(self, manager, cache, resources): resources = resources or [] for fw in resources: LOG.debug("Firewall Policy: %s" % fw) yield sample.Sample( name='network.services.firewall.policy', type=sample.TYPE_GAUGE, unit='firewall_policy', volume=1, user_id=None, project_id=fw['tenant_id'], resource_id=fw['id'], resource_metadata=self.extract_metadata(fw) ) ceilometer-10.0.0/ceilometer/network/services/__init__.py0000666000175100017510000000000013236733243023517 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/network/services/base.py0000666000175100017510000000217013236733243022704 0ustar zuulzuul00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.polling import plugin_base # status map for converting metric status to volume int STATUS = { 'inactive': 0, 'active': 1, 'pending_create': 2, 'down': 3, 'created': 4, 'pending_update': 5, 'pending_delete': 6, 'error': 7, } class BaseServicesPollster(plugin_base.PollsterBase): FIELDS = [] def extract_metadata(self, metric): return dict((k, metric[k]) for k in self.FIELDS) @staticmethod def get_status_id(value): status = value.lower() return STATUS.get(status, -1) ceilometer-10.0.0/ceilometer/network/floatingip.py0000666000175100017510000000354513236733243022312 0ustar zuulzuul00000000000000# Copyright 2016 Sungard Availability Services # Copyright 2016 Red Hat # Copyright 2012 eNovance # Copyright 2013 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.network.services import base from ceilometer import sample LOG = log.getLogger(__name__) class FloatingIPPollster(base.BaseServicesPollster): FIELDS = ['router_id', 'status', 'floating_network_id', 'fixed_ip_address', 'port_id', 'floating_ip_address', ] @property def default_discovery(self): return 'fip_services' def get_samples(self, manager, cache, resources): for fip in resources or []: if fip['status'] is None: LOG.warning("Invalid status, skipping IP address %s" % fip['floating_ip_address']) continue status = self.get_status_id(fip['status']) yield sample.Sample( name='ip.floating', type=sample.TYPE_GAUGE, unit='ip', volume=status, user_id=fip.get('user_id'), project_id=fip['tenant_id'], resource_id=fip['id'], resource_metadata=self.extract_metadata(fip) ) ceilometer-10.0.0/ceilometer/network/__init__.py0000666000175100017510000000000013236733243021674 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/network/statistics/0000775000175100017510000000000013236733440021764 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/network/statistics/flow.py0000666000175100017510000000263413236733243023315 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class FlowPollster(statistics._Base): meter_name = 'switch.flow' meter_type = sample.TYPE_GAUGE meter_unit = 'flow' class FlowPollsterDurationSeconds(statistics._Base): meter_name = 'switch.flow.duration_seconds' meter_type = sample.TYPE_GAUGE meter_unit = 's' class FlowPollsterDurationNanoseconds(statistics._Base): meter_name = 'switch.flow.duration_nanoseconds' meter_type = sample.TYPE_GAUGE meter_unit = 'ns' class FlowPollsterPackets(statistics._Base): meter_name = 'switch.flow.packets' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class FlowPollsterBytes(statistics._Base): meter_name = 'switch.flow.bytes' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'B' ceilometer-10.0.0/ceilometer/network/statistics/opendaylight/0000775000175100017510000000000013236733440024453 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/network/statistics/opendaylight/client.py0000666000175100017510000001421413236733243026310 0ustar zuulzuul00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log import requests from requests import auth import six from ceilometer.i18n import _ LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class _Base(object): """Base class of OpenDaylight REST APIs Clients.""" @abc.abstractproperty def base_url(self): """Returns base url for each REST API.""" def __init__(self, client): self.client = client def request(self, path, container_name): return self.client.request(self.base_url + path, container_name) class OpenDaylightRESTAPIFailed(Exception): pass class StatisticsAPIClient(_Base): """OpenDaylight Statistics REST API Client Base URL: {endpoint}/statistics/{containerName} """ base_url = '/statistics/%(container_name)s' def get_port_statistics(self, container_name): """Get port statistics URL: {Base URL}/port """ return self.request('/port', container_name) def get_flow_statistics(self, container_name): """Get flow statistics URL: {Base URL}/flow """ return self.request('/flow', container_name) def get_table_statistics(self, container_name): """Get table statistics URL: {Base URL}/table """ return self.request('/table', container_name) class TopologyAPIClient(_Base): """OpenDaylight Topology REST API Client Base URL: {endpoint}/topology/{containerName} """ base_url = '/topology/%(container_name)s' def get_topology(self, container_name): """Get topology URL: {Base URL} """ return self.request('', container_name) def get_user_links(self, container_name): """Get user links URL: {Base URL}/userLinks """ return self.request('/userLinks', container_name) class SwitchManagerAPIClient(_Base): """OpenDaylight Switch Manager REST API Client Base URL: {endpoint}/switchmanager/{containerName} """ base_url = '/switchmanager/%(container_name)s' def get_nodes(self, container_name): """Get node information URL: {Base URL}/nodes """ return self.request('/nodes', container_name) class HostTrackerAPIClient(_Base): """OpenDaylight Host Tracker REST API Client Base URL: {endpoint}/hosttracker/{containerName} """ base_url = '/hosttracker/%(container_name)s' def get_active_hosts(self, container_name): """Get active hosts information URL: {Base URL}/hosts/active """ return self.request('/hosts/active', container_name) def get_inactive_hosts(self, container_name): """Get inactive hosts information URL: {Base URL}/hosts/inactive """ return self.request('/hosts/inactive', container_name) class Client(object): def __init__(self, conf, endpoint, params): self.statistics = StatisticsAPIClient(self) self.topology = TopologyAPIClient(self) self.switch_manager = SwitchManagerAPIClient(self) self.host_tracker = HostTrackerAPIClient(self) self._endpoint = endpoint self.conf = conf self._req_params = self._get_req_params(params) def _get_req_params(self, params): req_params = { 'headers': { 'Accept': 'application/json' }, 'timeout': self.conf.http_timeout, } auth_way = params.get('auth') if auth_way in ['basic', 'digest']: user = params.get('user') password = params.get('password') if auth_way == 'basic': auth_class = auth.HTTPBasicAuth else: auth_class = auth.HTTPDigestAuth req_params['auth'] = auth_class(user, password) return req_params def _log_req(self, url): curl_command = ['REQ: curl -i -X GET ', '"%s" ' % (url)] if 'auth' in self._req_params: auth_class = self._req_params['auth'] if isinstance(auth_class, auth.HTTPBasicAuth): curl_command.append('--basic ') else: curl_command.append('--digest ') curl_command.append('--user "%s":"***" ' % auth_class.username) for name, value in six.iteritems(self._req_params['headers']): curl_command.append('-H "%s: %s" ' % (name, value)) LOG.debug(''.join(curl_command)) @staticmethod def _log_res(resp): dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, resp.status_code, resp.reason)] dump.extend('%s: %s\n' % (k, v) for k, v in six.iteritems(resp.headers)) dump.append('\n') if resp.content: dump.extend([resp.content, '\n']) LOG.debug(''.join(dump)) def _http_request(self, url): if self.conf.debug: self._log_req(url) resp = requests.get(url, **self._req_params) if self.conf.debug: self._log_res(resp) if resp.status_code // 100 != 2: raise OpenDaylightRESTAPIFailed( _('OpenDaylight API returned %(status)s %(reason)s') % {'status': resp.status_code, 'reason': resp.reason}) return resp.json() def request(self, path, container_name): url = self._endpoint + path % {'container_name': container_name} return self._http_request(url) ceilometer-10.0.0/ceilometer/network/statistics/opendaylight/__init__.py0000666000175100017510000000000013236733243026555 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/network/statistics/opendaylight/driver.py0000666000175100017510000004375113236733243026335 0ustar zuulzuul00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import six from six import moves from six.moves.urllib import parse as urlparse from ceilometer.network.statistics import driver from ceilometer.network.statistics.opendaylight import client LOG = log.getLogger(__name__) def _get_properties(properties, prefix='properties'): resource_meta = {} if properties is not None: for k, v in six.iteritems(properties): value = v['value'] key = prefix + '_' + k if 'name' in v: key += '_' + v['name'] resource_meta[key] = value return resource_meta def _get_int_sample(key, statistic, resource_id, resource_meta): if key not in statistic: return None return int(statistic[key]), resource_id, resource_meta class OpenDayLightDriver(driver.Driver): """Driver of network info collector from OpenDaylight. This driver uses resources in "pipeline.yaml". Resource requires below conditions: * resource is url * scheme is "opendaylight" This driver can be configured via query parameters. Supported parameters: * scheme: The scheme of request url to OpenDaylight REST API endpoint. (default http) * auth: Auth strategy of http. This parameter can be set basic and digest.(default None) * user: This is username that is used by auth.(default None) * password: This is password that is used by auth.(default None) * container_name: Name of container of OpenDaylight.(default "default") This parameter allows multi values. e.g.:: opendaylight://127.0.0.1:8080/controller/nb/v2?container_name=default& container_name=egg&auth=basic&user=admin&password=admin&scheme=http In this case, the driver send request to below URLs: http://127.0.0.1:8080/controller/nb/v2/statistics/default/flow http://127.0.0.1:8080/controller/nb/v2/statistics/egg/flow """ def _prepare_cache(self, endpoint, params, cache): if 'network.statistics.opendaylight' in cache: return cache['network.statistics.opendaylight'] data = {} container_names = params.get('container_name', ['default']) odl_params = {} if 'auth' in params: odl_params['auth'] = params['auth'][0] if 'user' in params: odl_params['user'] = params['user'][0] if 'password' in params: odl_params['password'] = params['password'][0] cs = client.Client(self.conf, endpoint, odl_params) for container_name in container_names: try: container_data = {} # get flow statistics container_data['flow'] = cs.statistics.get_flow_statistics( container_name) # get port statistics container_data['port'] = cs.statistics.get_port_statistics( container_name) # get table statistics container_data['table'] = cs.statistics.get_table_statistics( container_name) # get topology container_data['topology'] = cs.topology.get_topology( container_name) # get switch information container_data['switch'] = cs.switch_manager.get_nodes( container_name) # get and optimize user links # e.g. # before: # "OF|2@OF|00:00:00:00:00:00:00:02" # after: # { # 'port': { # 'type': 'OF', # 'id': '2'}, # 'node': { # 'type': 'OF', # 'id': '00:00:00:00:00:00:00:02' # } # } user_links_raw = cs.topology.get_user_links(container_name) user_links = [] container_data['user_links'] = user_links for user_link_row in user_links_raw['userLinks']: user_link = {} for k, v in six.iteritems(user_link_row): if (k == "dstNodeConnector" or k == "srcNodeConnector"): port_raw, node_raw = v.split('@') port = {} port['type'], port['id'] = port_raw.split('|') node = {} node['type'], node['id'] = node_raw.split('|') v = {'port': port, 'node': node} user_link[k] = v user_links.append(user_link) # get link status to hosts container_data['active_hosts'] = ( cs.host_tracker.get_active_hosts(container_name)) container_data['inactive_hosts'] = ( cs.host_tracker.get_inactive_hosts(container_name)) data[container_name] = container_data except Exception: LOG.exception('Request failed to connect to OpenDaylight' ' with NorthBound REST API') cache['network.statistics.opendaylight'] = data return data def get_sample_data(self, meter_name, parse_url, params, cache): extractor = self._get_extractor(meter_name) if extractor is None: # The way to getting meter is not implemented in this driver or # OpenDaylight REST API has not api to getting meter. return None iter = self._get_iter(meter_name) if iter is None: # The way to getting meter is not implemented in this driver or # OpenDaylight REST API has not api to getting meter. return None parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], parse_url.netloc, parse_url.path, None, None, None) endpoint = urlparse.urlunparse(parts) data = self._prepare_cache(endpoint, params, cache) samples = [] for name, value in six.iteritems(data): for sample in iter(extractor, value): if sample is not None: # set controller name and container name # to resource_metadata sample[2]['controller'] = 'OpenDaylight' sample[2]['container'] = name samples.append(sample + (None, )) return samples def _get_iter(self, meter_name): if meter_name == 'switch': return self._iter_switch elif meter_name.startswith('switch.flow'): return self._iter_flow elif meter_name.startswith('switch.table'): return self._iter_table elif meter_name.startswith('switch.port'): return self._iter_port def _get_extractor(self, meter_name): method_name = '_' + meter_name.replace('.', '_') return getattr(self, method_name, None) @staticmethod def _iter_switch(extractor, data): for switch in data['switch']['nodeProperties']: yield extractor(switch, switch['node']['id'], {}) @staticmethod def _switch(statistic, resource_id, resource_meta): resource_meta.update(_get_properties(statistic.get('properties'))) return 1, resource_id, resource_meta @staticmethod def _iter_port(extractor, data): for port_statistic in data['port']['portStatistics']: for statistic in port_statistic['portStatistic']: resource_meta = {'port': statistic['nodeConnector']['id']} yield extractor(statistic, port_statistic['node']['id'], resource_meta, data) @staticmethod def _switch_port(statistic, resource_id, resource_meta, data): my_node_id = resource_id my_port_id = statistic['nodeConnector']['id'] # link status from topology edge_properties = data['topology']['edgeProperties'] for edge_property in edge_properties: edge = edge_property['edge'] if (edge['headNodeConnector']['node']['id'] == my_node_id and edge['headNodeConnector']['id'] == my_port_id): target_node = edge['tailNodeConnector'] elif (edge['tailNodeConnector']['node']['id'] == my_node_id and edge['tailNodeConnector']['id'] == my_port_id): target_node = edge['headNodeConnector'] else: continue resource_meta['topology_node_id'] = target_node['node']['id'] resource_meta['topology_node_port'] = target_node['id'] resource_meta.update(_get_properties( edge_property.get('properties'), prefix='topology')) break # link status from user links for user_link in data['user_links']: if (user_link['dstNodeConnector']['node']['id'] == my_node_id and user_link['dstNodeConnector']['port']['id'] == my_port_id): target_node = user_link['srcNodeConnector'] elif (user_link['srcNodeConnector']['node']['id'] == my_node_id and user_link['srcNodeConnector']['port']['id'] == my_port_id): target_node = user_link['dstNodeConnector'] else: continue resource_meta['user_link_node_id'] = target_node['node']['id'] resource_meta['user_link_node_port'] = target_node['port']['id'] resource_meta['user_link_status'] = user_link['status'] resource_meta['user_link_name'] = user_link['name'] break # link status to hosts for hosts, status in moves.zip( [data['active_hosts'], data['inactive_hosts']], ['active', 'inactive']): for host_config in hosts['hostConfig']: if (host_config['nodeId'] != my_node_id or host_config['nodeConnectorId'] != my_port_id): continue resource_meta['host_status'] = status for key in ['dataLayerAddress', 'vlan', 'staticHost', 'networkAddress']: if key in host_config: resource_meta['host_' + key] = host_config[key] break return 1, resource_id, resource_meta @staticmethod def _switch_port_receive_packets(statistic, resource_id, resource_meta, data): return _get_int_sample('receivePackets', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_packets(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitPackets', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_bytes(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveBytes', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_bytes(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitBytes', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_drops(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveDrops', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_drops(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitDrops', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_errors(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveErrors', statistic, resource_id, resource_meta) @staticmethod def _switch_port_transmit_errors(statistic, resource_id, resource_meta, data): return _get_int_sample('transmitErrors', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_frame_error(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveFrameError', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_overrun_error(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveOverRunError', statistic, resource_id, resource_meta) @staticmethod def _switch_port_receive_crc_error(statistic, resource_id, resource_meta, data): return _get_int_sample('receiveCrcError', statistic, resource_id, resource_meta) @staticmethod def _switch_port_collision_count(statistic, resource_id, resource_meta, data): return _get_int_sample('collisionCount', statistic, resource_id, resource_meta) @staticmethod def _iter_table(extractor, data): for table_statistic in data['table']['tableStatistics']: for statistic in table_statistic['tableStatistic']: resource_meta = {'table_id': statistic['nodeTable']['id']} yield extractor(statistic, table_statistic['node']['id'], resource_meta) @staticmethod def _switch_table(statistic, resource_id, resource_meta): return 1, resource_id, resource_meta @staticmethod def _switch_table_active_entries(statistic, resource_id, resource_meta): return _get_int_sample('activeCount', statistic, resource_id, resource_meta) @staticmethod def _switch_table_lookup_packets(statistic, resource_id, resource_meta): return _get_int_sample('lookupCount', statistic, resource_id, resource_meta) @staticmethod def _switch_table_matched_packets(statistic, resource_id, resource_meta): return _get_int_sample('matchedCount', statistic, resource_id, resource_meta) def dict_to_keyval(self, value, key_base=None): """Expand a given dict to its corresponding key-value pairs. Generated keys are fully qualified, delimited using dot notation. ie. key = 'key.child_key.grandchild_key[0]' """ val_iter, key_func = None, None if isinstance(value, dict): val_iter = six.iteritems(value) key_func = lambda k: key_base + '.' + k if key_base else k elif isinstance(value, (tuple, list)): val_iter = enumerate(value) key_func = lambda k: key_base + '[%d]' % k if val_iter: for k, v in val_iter: key_gen = key_func(k) if isinstance(v, dict) or isinstance(v, (tuple, list)): for key_gen, v in self.dict_to_keyval(v, key_gen): yield key_gen, v else: yield key_gen, v def _iter_flow(self, extractor, data): for flow_statistic in data['flow']['flowStatistics']: for statistic in flow_statistic['flowStatistic']: resource_meta = {'flow_id': statistic['flow']['id'], 'table_id': statistic['tableId']} for key, value in self.dict_to_keyval(statistic['flow'], 'flow'): resource_meta[key.replace('.', '_')] = value yield extractor(statistic, flow_statistic['node']['id'], resource_meta) @staticmethod def _switch_flow(statistic, resource_id, resource_meta): return 1, resource_id, resource_meta @staticmethod def _switch_flow_duration_seconds(statistic, resource_id, resource_meta): return _get_int_sample('durationSeconds', statistic, resource_id, resource_meta) @staticmethod def _switch_flow_duration_nanoseconds(statistic, resource_id, resource_meta): return _get_int_sample('durationNanoseconds', statistic, resource_id, resource_meta) @staticmethod def _switch_flow_packets(statistic, resource_id, resource_meta): return _get_int_sample('packetCount', statistic, resource_id, resource_meta) @staticmethod def _switch_flow_bytes(statistic, resource_id, resource_meta): return _get_int_sample('byteCount', statistic, resource_id, resource_meta) ceilometer-10.0.0/ceilometer/network/statistics/opencontrail/0000775000175100017510000000000013236733440024461 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/network/statistics/opencontrail/client.py0000666000175100017510000000677513236733243026333 0ustar zuulzuul00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log import requests import six from six.moves.urllib import parse as urlparse from ceilometer.i18n import _ LOG = log.getLogger(__name__) class OpencontrailAPIFailed(Exception): pass class AnalyticsAPIBaseClient(object): """Opencontrail Base Statistics REST API Client.""" def __init__(self, conf, endpoint, data): self.conf = conf self.endpoint = endpoint self.data = data or {} def request(self, path, fqdn_uuid, data=None): req_data = copy.copy(self.data) if data: req_data.update(data) req_params = self._get_req_params(data=req_data) url = urlparse.urljoin(self.endpoint, path + fqdn_uuid) self._log_req(url, req_params) resp = requests.get(url, **req_params) self._log_res(resp) if resp.status_code != 200: raise OpencontrailAPIFailed( _('Opencontrail API returned %(status)s %(reason)s') % {'status': resp.status_code, 'reason': resp.reason}) return resp def _get_req_params(self, data=None): req_params = { 'headers': { 'Accept': 'application/json' }, 'data': data, 'allow_redirects': False, 'timeout': self.conf.http_timeout, } return req_params def _log_req(self, url, req_params): if not self.conf.debug: return curl_command = ['REQ: curl -i -X GET '] params = [] for name, value in six.iteritems(req_params['data']): params.append("%s=%s" % (name, value)) curl_command.append('"%s?%s" ' % (url, '&'.join(params))) for name, value in six.iteritems(req_params['headers']): curl_command.append('-H "%s: %s" ' % (name, value)) LOG.debug(''.join(curl_command)) def _log_res(self, resp): if not self.conf.debug: return dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, resp.status_code, resp.reason)] dump.extend('%s: %s\n' % (k, v) for k, v in six.iteritems(resp.headers)) dump.append('\n') if resp.content: dump.extend([resp.content, '\n']) LOG.debug(''.join(dump)) class NetworksAPIClient(AnalyticsAPIBaseClient): """Opencontrail Statistics REST API Client.""" def get_vm_statistics(self, fqdn_uuid, data=None): """Get statistics of a virtual-machines. URL: {endpoint}/analytics/uves/virtual-machine/{fqdn_uuid} """ path = '/analytics/uves/virtual-machine/' resp = self.request(path, fqdn_uuid, data) return resp.json() class Client(object): def __init__(self, conf, endpoint, data=None): self.networks = NetworksAPIClient(conf, endpoint, data) ceilometer-10.0.0/ceilometer/network/statistics/opencontrail/__init__.py0000666000175100017510000000000013236733243026563 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/network/statistics/opencontrail/driver.py0000666000175100017510000001545413236733243026342 0ustar zuulzuul00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from six.moves.urllib import parse as urlparse from ceilometer.network.statistics import driver from ceilometer.network.statistics.opencontrail import client from ceilometer import neutron_client class OpencontrailDriver(driver.Driver): """Driver of network analytics of Opencontrail. This driver uses resources in "pipeline.yaml". Resource requires below conditions: * resource is url * scheme is "opencontrail" This driver can be configured via query parameters. Supported parameters: * scheme: The scheme of request url to Opencontrail Analytics endpoint. (default "http") * virtual_network Specify the virtual network. (default None) * fqdn_uuid: Specify the VM fqdn UUID. (default "*") * resource: The resource on which the counters are retrieved. (default "if_stats_list") * fip_stats_list: Traffic on floating ips * if_stats_list: Traffic on VM interfaces e.g.:: opencontrail://localhost:8081/?resource=fip_stats_list& virtual_network=default-domain:openstack:public """ def _prepare_cache(self, endpoint, params, cache): if 'network.statistics.opencontrail' in cache: return cache['network.statistics.opencontrail'] data = { 'o_client': client.Client(self.conf, endpoint), 'n_client': neutron_client.Client(self.conf) } cache['network.statistics.opencontrail'] = data return data def get_sample_data(self, meter_name, parse_url, params, cache): parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], parse_url.netloc, parse_url.path, None, None, None) endpoint = urlparse.urlunparse(parts) iter = self._get_iter(meter_name) if iter is None: # The extractor for this meter is not implemented or the API # doesn't have method to get this meter. return extractor = self._get_extractor(meter_name) if extractor is None: # The extractor for this meter is not implemented or the API # doesn't have method to get this meter. return data = self._prepare_cache(endpoint, params, cache) ports = data['n_client'].port_get_all() ports_map = dict((port['id'], port) for port in ports) resource = params.get('resource', ['if_stats_list'])[0] fqdn_uuid = params.get('fqdn_uuid', ['*'])[0] virtual_network = params.get('virtual_network', [None])[0] statistics = data['o_client'].networks.get_vm_statistics(fqdn_uuid) if not statistics: return for value in statistics['value']: for sample in iter(extractor, value, ports_map, resource, virtual_network): if sample is not None: yield sample + (None, ) def _get_iter(self, meter_name): if meter_name.startswith('switch.port'): return self._iter_port def _get_extractor(self, meter_name): method_name = '_' + meter_name.replace('.', '_') return getattr(self, method_name, None) @staticmethod def _explode_name(fq_name): m = re.match( "(?P[^:]+):(?P.+):(?P[^:]+)", fq_name) if not m: return return m.group('domain'), m.group('project'), m.group('port_id') @staticmethod def _get_resource_meta(ports_map, stat, resource, network): if resource == 'fip_stats_list': if network and (network != stat['virtual_network']): return name = stat['iface_name'] else: name = stat['name'] domain, project, port_id = OpencontrailDriver._explode_name(name) port = ports_map.get(port_id) tenant_id = None network_id = None device_owner_id = None if port: tenant_id = port['tenant_id'] network_id = port['network_id'] device_owner_id = port['device_id'] resource_meta = {'device_owner_id': device_owner_id, 'network_id': network_id, 'project_id': tenant_id, 'project': project, 'resource': resource, 'domain': domain} return port_id, resource_meta @staticmethod def _iter_port(extractor, value, ports_map, resource, virtual_network=None): stats = value['value']['UveVirtualMachineAgent'].get(resource, []) for stat in stats: if type(stat) is list: for sub_stats, node in zip(*[iter(stat)] * 2): for sub_stat in sub_stats: result = OpencontrailDriver._get_resource_meta( ports_map, sub_stat, resource, virtual_network) if not result: continue port_id, resource_meta = result yield extractor(sub_stat, port_id, resource_meta) else: result = OpencontrailDriver._get_resource_meta( ports_map, stat, resource, virtual_network) if not result: continue port_id, resource_meta = result yield extractor(stat, port_id, resource_meta) @staticmethod def _switch_port_receive_packets(statistic, resource_id, resource_meta): return int(statistic['in_pkts']), resource_id, resource_meta @staticmethod def _switch_port_transmit_packets(statistic, resource_id, resource_meta): return int(statistic['out_pkts']), resource_id, resource_meta @staticmethod def _switch_port_receive_bytes(statistic, resource_id, resource_meta): return int(statistic['in_bytes']), resource_id, resource_meta @staticmethod def _switch_port_transmit_bytes(statistic, resource_id, resource_meta): return int(statistic['out_bytes']), resource_id, resource_meta ceilometer-10.0.0/ceilometer/network/statistics/switch.py0000666000175100017510000000166113236733243023646 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class SWPollster(statistics._Base): meter_name = 'switch' meter_type = sample.TYPE_GAUGE meter_unit = 'switch' class SwitchPollsterPorts(statistics._Base): meter_name = 'switch.ports' meter_type = sample.TYPE_GAUGE meter_unit = 'ports' ceilometer-10.0.0/ceilometer/network/statistics/__init__.py0000666000175100017510000000654213236733243024107 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_utils import netutils import six from six.moves.urllib import parse as urlparse from stevedore import driver as _driver from ceilometer.polling import plugin_base from ceilometer import sample @six.add_metaclass(abc.ABCMeta) class _Base(plugin_base.PollsterBase): NAMESPACE = 'network.statistics.drivers' drivers = {} @property def default_discovery(self): # this signifies that the pollster gets its resources from # elsewhere, in this case they're manually listed in the # pipeline configuration return None @abc.abstractproperty def meter_name(self): """Return a Meter Name.""" @abc.abstractproperty def meter_type(self): """Return a Meter Type.""" @abc.abstractproperty def meter_unit(self): """Return a Meter Unit.""" @staticmethod def _parse_my_resource(resource): parse_url = netutils.urlsplit(resource) params = urlparse.parse_qs(parse_url.query) parts = urlparse.ParseResult(parse_url.scheme, parse_url.netloc, parse_url.path, None, None, None) return parts, params @staticmethod def get_driver(conf, scheme): if scheme not in _Base.drivers: _Base.drivers[scheme] = _driver.DriverManager(_Base.NAMESPACE, scheme).driver(conf) return _Base.drivers[scheme] def get_samples(self, manager, cache, resources): resources = resources or [] for resource in resources: parse_url, params = self._parse_my_resource(resource) ext = self.get_driver(self.conf, parse_url.scheme) sample_data = ext.get_sample_data(self.meter_name, parse_url, params, cache) for data in sample_data or []: if data is None: continue if not isinstance(data, list): data = [data] for (volume, resource_id, resource_metadata, project_id) in data: yield sample.Sample( name=self.meter_name, type=self.meter_type, unit=self.meter_unit, volume=volume, user_id=None, project_id=project_id, resource_id=resource_id, resource_metadata=resource_metadata ) ceilometer-10.0.0/ceilometer/network/statistics/port.py0000666000175100017510000000563113236733243023332 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class PortPollster(statistics._Base): meter_name = 'switch.port' meter_type = sample.TYPE_GAUGE meter_unit = 'port' class PortPollsterUptime(statistics._Base): meter_name = 'switch.port.uptime' meter_type = sample.TYPE_GAUGE meter_unit = 's' class PortPollsterReceivePackets(statistics._Base): meter_name = 'switch.port.receive.packets' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterTransmitPackets(statistics._Base): meter_name = 'switch.port.transmit.packets' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveBytes(statistics._Base): meter_name = 'switch.port.receive.bytes' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'B' class PortPollsterTransmitBytes(statistics._Base): meter_name = 'switch.port.transmit.bytes' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'B' class PortPollsterReceiveDrops(statistics._Base): meter_name = 'switch.port.receive.drops' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterTransmitDrops(statistics._Base): meter_name = 'switch.port.transmit.drops' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveErrors(statistics._Base): meter_name = 'switch.port.receive.errors' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterTransmitErrors(statistics._Base): meter_name = 'switch.port.transmit.errors' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveFrameErrors(statistics._Base): meter_name = 'switch.port.receive.frame_error' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveOverrunErrors(statistics._Base): meter_name = 'switch.port.receive.overrun_error' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveCRCErrors(statistics._Base): meter_name = 'switch.port.receive.crc_error' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterCollisionCount(statistics._Base): meter_name = 'switch.port.collision.count' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' ceilometer-10.0.0/ceilometer/network/statistics/driver.py0000666000175100017510000000172513236733243023641 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class Driver(object): def __init__(self, conf): self.conf = conf @abc.abstractmethod def get_sample_data(self, meter_name, parse_url, params, cache): """Return volume, resource_id, resource_metadata, timestamp in tuple. If not implemented for meter_name, returns None """ ceilometer-10.0.0/ceilometer/network/statistics/table.py0000666000175100017510000000242713236733243023435 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class TablePollster(statistics._Base): meter_name = 'switch.table' meter_type = sample.TYPE_GAUGE meter_unit = 'table' class TablePollsterActiveEntries(statistics._Base): meter_name = 'switch.table.active.entries' meter_type = sample.TYPE_GAUGE meter_unit = 'entry' class TablePollsterLookupPackets(statistics._Base): meter_name = 'switch.table.lookup.packets' meter_type = sample.TYPE_GAUGE meter_unit = 'packet' class TablePollsterMatchedPackets(statistics._Base): meter_name = 'switch.table.matched.packets' meter_type = sample.TYPE_GAUGE meter_unit = 'packet' ceilometer-10.0.0/ceilometer/network/statistics/port_v2.py0000666000175100017510000000355113236733243023740 0ustar zuulzuul00000000000000# # Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network import statistics from ceilometer import sample class PortPollster(statistics._Base): meter_name = 'port' meter_type = sample.TYPE_GAUGE meter_unit = 'port' class PortPollsterUptime(statistics._Base): meter_name = 'port.uptime' meter_type = sample.TYPE_GAUGE meter_unit = 's' class PortPollsterReceivePackets(statistics._Base): meter_name = 'port.receive.packets' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterTransmitPackets(statistics._Base): meter_name = 'port.transmit.packets' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveBytes(statistics._Base): meter_name = 'port.receive.bytes' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'B' class PortPollsterTransmitBytes(statistics._Base): meter_name = 'port.transmit.bytes' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'B' class PortPollsterReceiveDrops(statistics._Base): meter_name = 'port.receive.drops' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' class PortPollsterReceiveErrors(statistics._Base): meter_name = 'port.receive.errors' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'packet' ceilometer-10.0.0/ceilometer/telemetry/0000775000175100017510000000000013236733440020113 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/telemetry/notifications.py0000666000175100017510000000301213236733243023335 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.pipeline import sample as endpoint from ceilometer import sample class TelemetryIpc(endpoint.SampleEndpoint): """Handle sample from notification bus Telemetry samples polled by polling agent. """ event_types = ['telemetry.polling'] def build_sample(self, message): samples = message['payload']['samples'] for sample_dict in samples: yield sample.Sample( name=sample_dict['counter_name'], type=sample_dict['counter_type'], unit=sample_dict['counter_unit'], volume=sample_dict['counter_volume'], user_id=sample_dict['user_id'], project_id=sample_dict['project_id'], resource_id=sample_dict['resource_id'], timestamp=sample_dict['timestamp'], resource_metadata=sample_dict['resource_metadata'], source=sample_dict['source'], id=sample_dict['message_id']) ceilometer-10.0.0/ceilometer/telemetry/__init__.py0000666000175100017510000000000013236733243022215 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/i18n.py0000666000175100017510000000204713236733243017240 0ustar zuulzuul00000000000000# Copyright 2014 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/usage.html """ import oslo_i18n DOMAIN = 'ceilometer' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ceilometer-10.0.0/ceilometer/volume/0000775000175100017510000000000013236733440017410 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/volume/cinder.py0000666000175100017510000000651313236733243021236 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with volumes """ from __future__ import absolute_import from ceilometer.polling import plugin_base from ceilometer import sample class _Base(plugin_base.PollsterBase): def extract_metadata(self, obj): return dict((k, getattr(obj, k)) for k in self.FIELDS) class VolumeSizePollster(_Base): @property def default_discovery(self): return 'volumes' FIELDS = ['name', 'status', 'volume_type', 'os-vol-host-attr:host', 'migration_status', 'attachments', 'snapshot_id', 'source_volid'] def get_samples(self, manager, cache, resources): for volume in resources: yield sample.Sample( name='volume.size', type=sample.TYPE_GAUGE, unit='GB', volume=volume.size, user_id=volume.user_id, project_id=getattr(volume, 'os-vol-tenant-attr:tenant_id'), resource_id=volume.id, resource_metadata=self.extract_metadata(volume), ) class VolumeSnapshotSize(_Base): @property def default_discovery(self): return 'volume_snapshots' FIELDS = ['name', 'volume_id', 'status', 'description', 'metadata', 'os-extended-snapshot-attributes:progress', ] def get_samples(self, manager, cache, resources): for snapshot in resources: yield sample.Sample( name='volume.snapshot.size', type=sample.TYPE_GAUGE, unit='GB', volume=snapshot.size, user_id=snapshot.user_id, project_id=getattr( snapshot, 'os-extended-snapshot-attributes:project_id'), resource_id=snapshot.id, resource_metadata=self.extract_metadata(snapshot), ) class VolumeBackupSize(_Base): @property def default_discovery(self): return 'volume_backups' FIELDS = ['name', 'object_count', 'container', 'volume_id', 'status', 'description'] def get_samples(self, manager, cache, resources): for backup in resources: yield sample.Sample( name='volume.backup.size', type=sample.TYPE_GAUGE, unit='GB', volume=backup.size, user_id=None, project_id=getattr( backup, 'os-backup-project-attr:project_id', None), resource_id=backup.id, resource_metadata=self.extract_metadata(backup), ) ceilometer-10.0.0/ceilometer/volume/discovery.py0000666000175100017510000000367713236733243022011 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinderclient import client as cinder_client from oslo_config import cfg from ceilometer import keystone_client from ceilometer.polling import plugin_base SERVICE_OPTS = [ cfg.StrOpt('cinder', deprecated_name='cinderv2', default='volumev3', help='Cinder service type.'), ] class _BaseDiscovery(plugin_base.DiscoveryBase): def __init__(self, conf): super(_BaseDiscovery, self).__init__(conf) creds = conf.service_credentials self.client = cinder_client.Client( version='3', session=keystone_client.get_session(conf), region_name=creds.region_name, interface=creds.interface, service_type=conf.service_types.cinder ) class VolumeDiscovery(_BaseDiscovery): def discover(self, manager, param=None): """Discover volume resources to monitor.""" return self.client.volumes.list(search_opts={'all_tenants': True}) class VolumeSnapshotsDiscovery(_BaseDiscovery): def discover(self, manager, param=None): """Discover snapshot resources to monitor.""" return self.client.volume_snapshots.list( search_opts={'all_tenants': True}) class VolumeBackupsDiscovery(_BaseDiscovery): def discover(self, manager, param=None): """Discover volume resources to monitor.""" return self.client.backups.list(search_opts={'all_tenants': True}) ceilometer-10.0.0/ceilometer/volume/__init__.py0000666000175100017510000000000013236733243021512 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/keystone_client.py0000666000175100017510000000723113236733243021660 0ustar zuulzuul00000000000000# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from keystoneauth1 import loading as ka_loading from keystoneclient.v3 import client as ks_client_v3 from oslo_config import cfg DEFAULT_GROUP = "service_credentials" # List of group that can set auth_section to use a different # credentials section OVERRIDABLE_GROUPS = ['gnocchi', 'zaqar'] def get_session(conf, requests_session=None, group=None, timeout=None): """Get a ceilometer service credentials auth session.""" group = group or DEFAULT_GROUP auth_plugin = ka_loading.load_auth_from_conf_options(conf, group) kwargs = {'auth': auth_plugin, 'session': requests_session} if timeout is not None: kwargs['timeout'] = timeout session = ka_loading.load_session_from_conf_options(conf, group, **kwargs) return session def get_client(conf, trust_id=None, requests_session=None, group=DEFAULT_GROUP): """Return a client for keystone v3 endpoint, optionally using a trust.""" session = get_session(conf, requests_session=requests_session, group=group) return ks_client_v3.Client(session=session, trust_id=trust_id, region_name=conf[group].region_name) def get_service_catalog(client): return client.session.auth.get_access(client.session).service_catalog def get_auth_token(client): return client.session.auth.get_access(client.session).auth_token CLI_OPTS = [ cfg.StrOpt('region-name', deprecated_group="DEFAULT", deprecated_name="os-region-name", default=os.environ.get('OS_REGION_NAME'), help='Region name to use for OpenStack service endpoints.'), cfg.StrOpt('interface', default=os.environ.get( 'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE', 'public')), deprecated_name="os-endpoint-type", choices=('public', 'internal', 'admin', 'auth', 'publicURL', 'internalURL', 'adminURL'), help='Type of endpoint in Identity service catalog to use for ' 'communication with OpenStack services.'), ] def register_keystoneauth_opts(conf): _register_keystoneauth_group(conf, DEFAULT_GROUP) for group in OVERRIDABLE_GROUPS: _register_keystoneauth_group(conf, group) conf.set_default('auth_section', DEFAULT_GROUP, group=group) def _register_keystoneauth_group(conf, group): ka_loading.register_auth_conf_options(conf, group) ka_loading.register_session_conf_options( conf, group, deprecated_opts={'cacert': [ cfg.DeprecatedOpt('os-cacert', group=group), cfg.DeprecatedOpt('os-cacert', group="DEFAULT")] }) conf.register_opts(CLI_OPTS, group=group) def post_register_keystoneauth_opts(conf): for group in OVERRIDABLE_GROUPS: if conf[group].auth_section != DEFAULT_GROUP: # NOTE(sileht): We register this again after the auth_section have # been read from the configuration file _register_keystoneauth_group(conf, conf[group].auth_section) ceilometer-10.0.0/ceilometer/messaging.py0000666000175100017510000000645513236733243020445 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_messaging from oslo_messaging._drivers import impl_rabbit from oslo_messaging.notify import notifier from oslo_messaging import serializer as oslo_serializer DEFAULT_URL = "__default__" TRANSPORTS = {} def setup(): oslo_messaging.set_transport_defaults('ceilometer') # NOTE(sileht): When batch is not enabled, oslo.messaging read all messages # in the queue and can consume a lot of memory, that works for rpc because # you never have a lot of message, but sucks for notification. The # default is not changeable on oslo.messaging side. And we can't expose # this option to set set_transport_defaults because it a driver option. # 100 allow to prefetch a lot of messages but limit memory to 1G per # workers in worst case (~ 1M Nova notification) # And even driver options are located in private module, this is not going # to break soon. cfg.set_defaults( impl_rabbit.rabbit_opts, rabbit_qos_prefetch_count=100, ) def get_transport(conf, url=None, optional=False, cache=True): """Initialise the oslo_messaging layer.""" global TRANSPORTS, DEFAULT_URL cache_key = url or DEFAULT_URL transport = TRANSPORTS.get(cache_key) if not transport or not cache: try: transport = notifier.get_notification_transport(conf, url) except (oslo_messaging.InvalidTransportURL, oslo_messaging.DriverLoadFailure): if not optional or url: # NOTE(sileht): oslo_messaging is configured but unloadable # so reraise the exception raise return None else: if cache: TRANSPORTS[cache_key] = transport return transport def cleanup(): """Cleanup the oslo_messaging layer.""" global TRANSPORTS, NOTIFIERS NOTIFIERS = {} for url in TRANSPORTS: TRANSPORTS[url].cleanup() del TRANSPORTS[url] _SERIALIZER = oslo_serializer.JsonPayloadSerializer() def get_batch_notification_listener(transport, targets, endpoints, allow_requeue=False, batch_size=1, batch_timeout=None): """Return a configured oslo_messaging notification listener.""" return oslo_messaging.get_batch_notification_listener( transport, targets, endpoints, executor='threading', allow_requeue=allow_requeue, batch_size=batch_size, batch_timeout=batch_timeout) def get_notifier(transport, publisher_id): """Return a configured oslo_messaging notifier.""" notifier = oslo_messaging.Notifier(transport, serializer=_SERIALIZER) return notifier.prepare(publisher_id=publisher_id) ceilometer-10.0.0/ceilometer/polling/0000775000175100017510000000000013236733440017545 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/polling/manager.py0000666000175100017510000005476113236733243021551 0ustar zuulzuul00000000000000# # Copyright 2013 Julien Danjou # Copyright 2014-2017 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools import logging import random import uuid from concurrent import futures import cotyledon from futurist import periodics from keystoneauth1 import exceptions as ka_exceptions from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import timeutils import six from six.moves.urllib import parse as urlparse from stevedore import extension from tooz import coordination from ceilometer import agent from ceilometer import keystone_client from ceilometer import messaging from ceilometer.polling import plugin_base from ceilometer.publisher import utils as publisher_utils from ceilometer import utils LOG = log.getLogger(__name__) OPTS = [ cfg.BoolOpt('batch_polled_samples', default=True, help='To reduce polling agent load, samples are sent to the ' 'notification agent in a batch. To gain higher ' 'throughput at the cost of load set this to False.'), ] POLLING_OPTS = [ cfg.StrOpt('cfg_file', default="polling.yaml", help="Configuration file for polling definition." ), cfg.StrOpt('partitioning_group_prefix', deprecated_group='central', help='Work-load partitioning group prefix. Use only if you ' 'want to run multiple polling agents with different ' 'config files. For each sub-group of the agent ' 'pool with the same partitioning_group_prefix a disjoint ' 'subset of pollsters should be loaded.'), ] def hash_of_set(s): return str(hash(frozenset(s))) class EmptyPollstersList(Exception): def __init__(self): msg = ('No valid pollsters can be loaded with the startup parameter' ' polling-namespaces.') super(EmptyPollstersList, self).__init__(msg) class PollingException(agent.ConfigException): def __init__(self, message, cfg): super(PollingException, self).__init__('Polling', message, cfg) class Resources(object): def __init__(self, agent_manager): self.agent_manager = agent_manager self._resources = [] self._discovery = [] self.blacklist = [] def setup(self, source): self._resources = source.resources self._discovery = source.discovery def get(self, discovery_cache=None): source_discovery = (self.agent_manager.discover(self._discovery, discovery_cache) if self._discovery else []) if self._resources: static_resources_group = self.agent_manager.construct_group_id( hash_of_set(self._resources)) return [v for v in self._resources if not self.agent_manager.partition_coordinator or self.agent_manager.hashrings[ static_resources_group].belongs_to_self( six.text_type(v))] + source_discovery return source_discovery @staticmethod def key(source_name, pollster): return '%s-%s' % (source_name, pollster.name) def iter_random(iterable): """Iter over iterable in a random fashion.""" lst = list(iterable) random.shuffle(lst) return iter(lst) class PollingTask(object): """Polling task for polling samples and notifying. A polling task can be invoked periodically or only once. """ def __init__(self, agent_manager): self.manager = agent_manager # elements of the Cartesian product of sources X pollsters # with a common interval self.pollster_matches = collections.defaultdict(set) # we relate the static resources and per-source discovery to # each combination of pollster and matching source resource_factory = lambda: Resources(agent_manager) self.resources = collections.defaultdict(resource_factory) self._batch = self.manager.conf.batch_polled_samples self._telemetry_secret = self.manager.conf.publisher.telemetry_secret def add(self, pollster, source): self.pollster_matches[source.name].add(pollster) key = Resources.key(source.name, pollster) self.resources[key].setup(source) def poll_and_notify(self): """Polling sample and notify.""" cache = {} discovery_cache = {} poll_history = {} for source_name, pollsters in iter_random( self.pollster_matches.items()): for pollster in iter_random(pollsters): key = Resources.key(source_name, pollster) candidate_res = list( self.resources[key].get(discovery_cache)) if not candidate_res and pollster.obj.default_discovery: candidate_res = self.manager.discover( [pollster.obj.default_discovery], discovery_cache) # Remove duplicated resources and black resources. Using # set() requires well defined __hash__ for each resource. # Since __eq__ is defined, 'not in' is safe here. polling_resources = [] black_res = self.resources[key].blacklist history = poll_history.get(pollster.name, []) for x in candidate_res: if x not in history: history.append(x) if x not in black_res: polling_resources.append(x) poll_history[pollster.name] = history # If no resources, skip for this pollster if not polling_resources: p_context = 'new ' if history else '' LOG.debug("Skip pollster %(name)s, no %(p_context)s" "resources found this cycle", {'name': pollster.name, 'p_context': p_context}) continue LOG.info("Polling pollster %(poll)s in the context of " "%(src)s", dict(poll=pollster.name, src=source_name)) try: polling_timestamp = timeutils.utcnow().isoformat() samples = pollster.obj.get_samples( manager=self.manager, cache=cache, resources=polling_resources ) sample_batch = [] for sample in samples: # Note(yuywz): Unify the timestamp of polled samples sample.set_timestamp(polling_timestamp) sample_dict = ( publisher_utils.meter_message_from_counter( sample, self._telemetry_secret )) if self._batch: sample_batch.append(sample_dict) else: self._send_notification([sample_dict]) if sample_batch: self._send_notification(sample_batch) except plugin_base.PollsterPermanentError as err: LOG.error( 'Prevent pollster %(name)s from ' 'polling %(res_list)s on source %(source)s anymore!', dict(name=pollster.name, res_list=str(err.fail_res_list), source=source_name)) self.resources[key].blacklist.extend(err.fail_res_list) except Exception as err: LOG.error( 'Continue after error from %(name)s: %(error)s' % ({'name': pollster.name, 'error': err}), exc_info=True) def _send_notification(self, samples): self.manager.notifier.sample( {}, 'telemetry.polling', {'samples': samples} ) class AgentManager(cotyledon.Service): def __init__(self, worker_id, conf, namespaces=None): namespaces = namespaces or ['compute', 'central'] group_prefix = conf.polling.partitioning_group_prefix super(AgentManager, self).__init__(worker_id) self.conf = conf if type(namespaces) is not list: namespaces = [namespaces] # we'll have default ['compute', 'central'] here if no namespaces will # be passed extensions = (self._extensions('poll', namespace, self.conf).extensions for namespace in namespaces) # get the extensions from pollster builder extensions_fb = (self._extensions_from_builder('poll', namespace) for namespace in namespaces) self.extensions = list(itertools.chain(*list(extensions))) + list( itertools.chain(*list(extensions_fb))) if self.extensions == []: raise EmptyPollstersList() discoveries = (self._extensions('discover', namespace, self.conf).extensions for namespace in namespaces) self.discoveries = list(itertools.chain(*list(discoveries))) self.polling_periodics = None self.hashrings = None self.partition_coordinator = None if self.conf.coordination.backend_url: # XXX uuid4().bytes ought to work, but it requires ascii for now coordination_id = str(uuid.uuid4()).encode('ascii') self.partition_coordinator = coordination.get_coordinator( self.conf.coordination.backend_url, coordination_id) # Compose coordination group prefix. # We'll use namespaces as the basement for this partitioning. namespace_prefix = '-'.join(sorted(namespaces)) self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix) if group_prefix else namespace_prefix) self.notifier = oslo_messaging.Notifier( messaging.get_transport(self.conf), driver=self.conf.publisher_notifier.telemetry_driver, publisher_id="ceilometer.polling") self._keystone = None self._keystone_last_exception = None @staticmethod def _get_ext_mgr(namespace, *args, **kwargs): def _catch_extension_load_error(mgr, ep, exc): # Extension raising ExtensionLoadError can be ignored, # and ignore anything we can't import as a safety measure. if isinstance(exc, plugin_base.ExtensionLoadError): LOG.exception("Skip loading extension for %s", ep.name) return show_exception = (LOG.isEnabledFor(logging.DEBUG) and isinstance(exc, ImportError)) LOG.error("Failed to import extension for %(name)r: " "%(error)s", {'name': ep.name, 'error': exc}, exc_info=show_exception) if isinstance(exc, ImportError): return raise exc return extension.ExtensionManager( namespace=namespace, invoke_on_load=True, invoke_args=args, invoke_kwds=kwargs, on_load_failure_callback=_catch_extension_load_error, ) def _extensions(self, category, agent_ns=None, *args, **kwargs): namespace = ('ceilometer.%s.%s' % (category, agent_ns) if agent_ns else 'ceilometer.%s' % category) return self._get_ext_mgr(namespace, *args, **kwargs) def _extensions_from_builder(self, category, agent_ns=None): ns = ('ceilometer.builder.%s.%s' % (category, agent_ns) if agent_ns else 'ceilometer.builder.%s' % category) mgr = self._get_ext_mgr(ns, self.conf) def _build(ext): return ext.plugin.get_pollsters_extensions(self.conf) # NOTE: this seems a stevedore bug. if no extensions are found, # map will raise runtimeError which is not documented. if mgr.names(): return list(itertools.chain(*mgr.map(_build))) else: return [] def join_partitioning_groups(self): groups = set([self.construct_group_id(d.obj.group_id) for d in self.discoveries]) # let each set of statically-defined resources have its own group static_resource_groups = set([ self.construct_group_id(hash_of_set(p.resources)) for p in self.polling_manager.sources if p.resources ]) groups.update(static_resource_groups) self.hashrings = dict( (group, self.partition_coordinator.join_partitioned_group(group)) for group in groups) def setup_polling_tasks(self): polling_tasks = {} for source in self.polling_manager.sources: for pollster in self.extensions: if source.support_meter(pollster.name): polling_task = polling_tasks.get(source.get_interval()) if not polling_task: polling_task = PollingTask(self) polling_tasks[source.get_interval()] = polling_task polling_task.add(pollster, source) return polling_tasks def construct_group_id(self, discovery_group_id): return '%s-%s' % (self.group_prefix, discovery_group_id) def start_polling_tasks(self): data = self.setup_polling_tasks() # Don't start useless threads if no task will run if not data: return # One thread per polling tasks is enough self.polling_periodics = periodics.PeriodicWorker.create( [], executor_factory=lambda: futures.ThreadPoolExecutor(max_workers=len(data))) for interval, polling_task in data.items(): @periodics.periodic(spacing=interval, run_immediately=True) def task(running_task): self.interval_task(running_task) self.polling_periodics.add(task, polling_task) utils.spawn_thread(self.polling_periodics.start, allow_empty=True) def run(self): super(AgentManager, self).run() self.polling_manager = PollingManager(self.conf) if self.partition_coordinator: self.partition_coordinator.start() self.join_partitioning_groups() self.start_polling_tasks() def terminate(self): self.stop_pollsters_tasks() if self.partition_coordinator: self.partition_coordinator.stop() super(AgentManager, self).terminate() def interval_task(self, task): # NOTE(sileht): remove the previous keystone client # and exception to get a new one in this polling cycle. self._keystone = None self._keystone_last_exception = None task.poll_and_notify() @property def keystone(self): # FIXME(sileht): This lazy loading of keystone client doesn't # look concurrently safe, we never see issue because once we have # connected to keystone everything is fine, and because all pollsters # are delayed during startup. But each polling task creates a new # client and overrides it which has been created by other polling # tasks. During this short time bad thing can occur. # # I think we must not reset keystone client before # running a polling task, but refresh it periodically instead. # NOTE(sileht): we do lazy loading of the keystone client # for multiple reasons: # * don't use it if no plugin need it # * use only one client for all plugins per polling cycle if self._keystone is None and self._keystone_last_exception is None: try: self._keystone = keystone_client.get_client(self.conf) self._keystone_last_exception = None except ka_exceptions.ClientException as e: self._keystone = None self._keystone_last_exception = e if self._keystone is not None: return self._keystone else: raise self._keystone_last_exception @staticmethod def _parse_discoverer(url): s = urlparse.urlparse(url) return (s.scheme or s.path), (s.netloc + s.path if s.scheme else None) def _discoverer(self, name): for d in self.discoveries: if d.name == name: return d.obj return None def discover(self, discovery=None, discovery_cache=None): resources = [] discovery = discovery or [] for url in discovery: if discovery_cache is not None and url in discovery_cache: resources.extend(discovery_cache[url]) continue name, param = self._parse_discoverer(url) discoverer = self._discoverer(name) if discoverer: try: if discoverer.KEYSTONE_REQUIRED_FOR_SERVICE: service_type = getattr( self.conf.service_types, discoverer.KEYSTONE_REQUIRED_FOR_SERVICE) if not keystone_client.get_service_catalog( self.keystone).get_endpoints( service_type=service_type): LOG.warning( 'Skipping %(name)s, %(service_type)s service ' 'is not registered in keystone', {'name': name, 'service_type': service_type}) continue discovered = discoverer.discover(self, param) if self.partition_coordinator: discovered = [ v for v in discovered if self.hashrings[ self.construct_group_id(discoverer.group_id) ].belongs_to_self(six.text_type(v))] resources.extend(discovered) if discovery_cache is not None: discovery_cache[url] = discovered except ka_exceptions.ClientException as e: LOG.error('Skipping %(name)s, keystone issue: ' '%(exc)s', {'name': name, 'exc': e}) except Exception as err: LOG.exception('Unable to discover resources: %s', err) else: LOG.warning('Unknown discovery extension: %s', name) return resources def stop_pollsters_tasks(self): if self.polling_periodics: self.polling_periodics.stop() self.polling_periodics.wait() self.polling_periodics = None class PollingManager(agent.ConfigManagerBase): """Polling Manager to handle polling definition""" def __init__(self, conf): """Setup the polling according to config. The configuration is supported as follows: {"sources": [{"name": source_1, "interval": interval_time, "meters" : ["meter_1", "meter_2"], "resources": ["resource_uri1", "resource_uri2"], }, {"name": source_2, "interval": interval_time, "meters" : ["meter_3"], }, ]} } The interval determines the cadence of sample polling Valid meter format is '*', '!meter_name', or 'meter_name'. '*' is wildcard symbol means any meters; '!meter_name' means "meter_name" will be excluded; 'meter_name' means 'meter_name' will be included. Valid meters definition is all "included meter names", all "excluded meter names", wildcard and "excluded meter names", or only wildcard. The resources is list of URI indicating the resources from where the meters should be polled. It's optional and it's up to the specific pollster to decide how to use it. """ super(PollingManager, self).__init__(conf) cfg = self.load_config(conf.polling.cfg_file) self.sources = [] if 'sources' not in cfg: raise PollingException("sources required", cfg) for s in cfg.get('sources'): self.sources.append(PollingSource(s)) class PollingSource(agent.Source): """Represents a source of pollsters In effect it is a set of pollsters emitting samples for a set of matching meters. Each source encapsulates meter name matching, polling interval determination, optional resource enumeration or discovery. """ def __init__(self, cfg): try: super(PollingSource, self).__init__(cfg) except agent.SourceException as err: raise PollingException(err.msg, cfg) try: self.meters = cfg['meters'] except KeyError: raise PollingException("Missing meters value", cfg) try: self.interval = int(cfg['interval']) except ValueError: raise PollingException("Invalid interval value", cfg) except KeyError: raise PollingException("Missing interval value", cfg) if self.interval <= 0: raise PollingException("Interval value should > 0", cfg) self.resources = cfg.get('resources') or [] if not isinstance(self.resources, list): raise PollingException("Resources should be a list", cfg) self.discovery = cfg.get('discovery') or [] if not isinstance(self.discovery, list): raise PollingException("Discovery should be a list", cfg) try: self.check_source_filtering(self.meters, 'meters') except agent.SourceException as err: raise PollingException(err.msg, cfg) def get_interval(self): return self.interval def support_meter(self, meter_name): return self.is_supported(self.meters, meter_name) ceilometer-10.0.0/ceilometer/polling/__init__.py0000666000175100017510000000000013236733243021647 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/polling/discovery/0000775000175100017510000000000013236733440021554 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/polling/discovery/localnode.py0000666000175100017510000000142513236733243024073 0ustar zuulzuul00000000000000# Copyright 2015 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.polling import plugin_base class LocalNodeDiscovery(plugin_base.DiscoveryBase): def discover(self, manager, param=None): """Return local node as resource.""" return ['local_host'] ceilometer-10.0.0/ceilometer/polling/discovery/tenant.py0000666000175100017510000000270113236733243023422 0ustar zuulzuul00000000000000# Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.polling import plugin_base as plugin LOG = log.getLogger(__name__) class TenantDiscovery(plugin.DiscoveryBase): """Discovery that supplies keystone tenants. This discovery should be used when the pollster's work can't be divided into smaller pieces than per-tenants. Example of this is the Swift pollster, which polls account details and does so per-project. """ def discover(self, manager, param=None): domains = manager.keystone.domains.list() LOG.debug('Found %s keystone domains', len(domains)) tenants = [] for domain in domains: domain_tenants = manager.keystone.projects.list(domain) LOG.debug("Found %s tenants in domain %s", len(domain_tenants), domain.name) tenants = tenants + domain_tenants return tenants or [] ceilometer-10.0.0/ceilometer/polling/discovery/__init__.py0000666000175100017510000000000013236733243023656 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/polling/discovery/endpoint.py0000666000175100017510000000277013236733243023757 0ustar zuulzuul00000000000000# Copyright 2014-2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer import keystone_client from ceilometer.polling import plugin_base as plugin LOG = log.getLogger(__name__) class EndpointDiscovery(plugin.DiscoveryBase): """Discovery that supplies service endpoints. This discovery should be used when the relevant APIs are not well suited to dividing the pollster's work into smaller pieces than a whole service at once. """ def discover(self, manager, param=None): endpoints = keystone_client.get_service_catalog( manager.keystone).get_urls( service_type=param, interface=self.conf.service_credentials.interface, region_name=self.conf.service_credentials.region_name) if not endpoints: LOG.warning('No endpoints found for service %s', "" if param is None else param) return [] return endpoints ceilometer-10.0.0/ceilometer/polling/plugin_base.py0000666000175100017510000001401413236733243022412 0ustar zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for plugins. """ import abc import six from stevedore import extension class PluginBase(object): """Base class for all plugins.""" class ExtensionLoadError(Exception): """Error of loading pollster plugin. PollsterBase provides a hook, setup_environment, called in pollster loading to setup required HW/SW dependency. Any exception from it would be propagated as ExtensionLoadError, then skip loading this pollster. """ pass class PollsterPermanentError(Exception): """Permanent error when polling. When unrecoverable error happened in polling, pollster can raise this exception with failed resource to prevent itself from polling any more. Resource is one of parameter resources from get_samples that cause polling error. """ def __init__(self, resources): self.fail_res_list = resources @six.add_metaclass(abc.ABCMeta) class PollsterBase(PluginBase): """Base class for plugins that support the polling API.""" def setup_environment(self): """Setup required environment for pollster. Each subclass could overwrite it for specific usage. Any exception raised in this function would prevent pollster being loaded. """ pass def __init__(self, conf): super(PollsterBase, self).__init__() self.conf = conf try: self.setup_environment() except Exception as err: raise ExtensionLoadError(err) @abc.abstractproperty def default_discovery(self): """Default discovery to use for this pollster. There are three ways a pollster can get a list of resources to poll, listed here in ascending order of precedence: 1. from the per-agent discovery, 2. from the per-pollster discovery (defined here) 3. from the per-pipeline configured discovery and/or per-pipeline configured static resources. If a pollster should only get resources from #1 or #3, this property should be set to None. """ @abc.abstractmethod def get_samples(self, manager, cache, resources): """Return a sequence of Counter instances from polling the resources. :param manager: The service manager class invoking the plugin. :param cache: A dictionary to allow pollsters to pass data between themselves when recomputing it would be expensive (e.g., asking another service for a list of objects). :param resources: A list of resources the pollster will get data from. It's up to the specific pollster to decide how to use it. It is usually supplied by a discovery, see ``default_discovery`` for more information. """ @classmethod def build_pollsters(cls, conf): """Return a list of tuple (name, pollster). The name is the meter name which the pollster would return, the pollster is a pollster object instance. The pollster which implements this method should be registered in the namespace of ceilometer.builder.xxx instead of ceilometer.poll.xxx. """ return [] @classmethod def get_pollsters_extensions(cls, conf): """Return a list of stevedore extensions. The returned stevedore extensions wrap the pollster object instances returned by build_pollsters. """ extensions = [] try: for name, pollster in cls.build_pollsters(conf): ext = extension.Extension(name, None, cls, pollster) extensions.append(ext) except Exception as err: raise ExtensionLoadError(err) return extensions @six.add_metaclass(abc.ABCMeta) class DiscoveryBase(object): KEYSTONE_REQUIRED_FOR_SERVICE = None """Service type required in keystone catalog to works""" def __init__(self, conf): self.conf = conf @abc.abstractmethod def discover(self, manager, param=None): """Discover resources to monitor. The most fine-grained discovery should be preferred, so the work is the most evenly distributed among multiple agents (if they exist). For example: if the pollster can separately poll individual resources, it should have its own discovery implementation to discover those resources. If it can only poll per-tenant, then the `TenantDiscovery` should be used. If even that is not possible, use `EndpointDiscovery` (see their respective docstrings). :param manager: The service manager class invoking the plugin. :param param: an optional parameter to guide the discovery """ @property def group_id(self): """Return group id of this discovery. All running discoveries with the same group_id should return the same set of resources at a given point in time. By default, a discovery is put into a global group, meaning that all discoveries of its type running anywhere in the cloud, return the same set of resources. This property can be overridden to provide correct grouping of localized discoveries. For example, compute discovery is localized to a host, which is reflected in its group_id. A None value signifies that this discovery does not want to be part of workload partitioning at all. """ return 'global' ceilometer-10.0.0/ceilometer/publisher/0000775000175100017510000000000013236733440020076 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/publisher/http.py0000666000175100017510000001635513236733243021444 0ustar zuulzuul00000000000000# # Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_log import log from oslo_utils import strutils import requests from requests import adapters from six.moves.urllib import parse as urlparse from ceilometer import publisher LOG = log.getLogger(__name__) class HttpPublisher(publisher.ConfigPublisherBase): """Publish metering data to a http endpoint This publisher pushes metering data to a specified http endpoint. The endpoint should be configured in ceilometer pipeline configuration file. If the `timeout` and/or `max_retries` are not specified, the default `timeout` and `max_retries` will be set to 5 and 2 respectively. Additional parameters are: - ssl certificate verification can be disabled by setting `verify_ssl` to False - batching can be configured by `batch` - Basic authentication can be configured using the URL authentication scheme: http://username:password@example.com - For certificate authentication, `clientcert` and `clientkey` are the paths to the certificate and key files respectively. `clientkey` is only required if the clientcert file doesn't already contain the key. All of the parameters mentioned above get removed during processing, with the remaining portion of the URL being used as the actual endpoint. e.g. https://username:password@example.com/path?verify_ssl=False&q=foo will result in a call to https://example.com/path?q=foo To use this publisher for samples, add the following section to the /etc/ceilometer/pipeline.yaml file or simply add it to an existing pipeline:: - name: meter_file meters: - "*" publishers: - http://host:80/path?timeout=1&max_retries=2&batch=False In the event_pipeline.yaml file, you can use the publisher in one of the sinks like the following: - name: event_sink transformers: publishers: - http://host:80/path?timeout=1&max_retries=2 """ def __init__(self, conf, parsed_url): super(HttpPublisher, self).__init__(conf, parsed_url) if not parsed_url.hostname: raise ValueError('The hostname of an endpoint for ' 'HttpPublisher is required') # non-numeric port from the url string will cause a ValueError # exception when the port is read. Do a read to make sure the port # is valid, if not, ValueError will be thrown. parsed_url.port self.headers = {'Content-type': 'application/json'} # Handling other configuration options in the query string params = urlparse.parse_qs(parsed_url.query) self.timeout = self._get_param(params, 'timeout', 5, int) self.max_retries = self._get_param(params, 'max_retries', 2, int) self.poster = ( self._do_post if strutils.bool_from_string(self._get_param( params, 'batch', True)) else self._individual_post) verify_ssl = self._get_param(params, 'verify_ssl', True) try: self.verify_ssl = strutils.bool_from_string(verify_ssl, strict=True) except ValueError: self.verify_ssl = (verify_ssl or True) username = parsed_url.username password = parsed_url.password if username: self.client_auth = (username, password) netloc = parsed_url.netloc.replace(username+':'+password+'@', '') else: self.client_auth = None netloc = parsed_url.netloc clientcert = self._get_param(params, 'clientcert', None) clientkey = self._get_param(params, 'clientkey', None) if clientcert: if clientkey: self.client_cert = (clientcert, clientkey) else: self.client_cert = clientcert else: self.client_cert = None self.raw_only = strutils.bool_from_string( self._get_param(params, 'raw_only', False)) kwargs = {'max_retries': self.max_retries, 'pool_connections': conf.max_parallel_requests, 'pool_maxsize': conf.max_parallel_requests} self.session = requests.Session() # authentication & config params have been removed, so use URL with # updated query string self.target = urlparse.urlunsplit([ parsed_url.scheme, netloc, parsed_url.path, urlparse.urlencode(params), parsed_url.fragment]) self.session.mount(self.target, adapters.HTTPAdapter(**kwargs)) LOG.debug('HttpPublisher for endpoint %s is initialized!' % self.target) @staticmethod def _get_param(params, name, default_value, cast=None): try: return cast(params.pop(name)[-1]) if cast else params.pop(name)[-1] except (ValueError, TypeError, KeyError): LOG.debug('Default value %(value)s is used for %(name)s' % {'value': default_value, 'name': name}) return default_value def _individual_post(self, data): for d in data: self._do_post(d) def _do_post(self, data): if not data: LOG.debug('Data set is empty!') return data = json.dumps(data) LOG.trace('Message: %s', data) try: res = self.session.post(self.target, data=data, headers=self.headers, timeout=self.timeout, auth=self.client_auth, cert=self.client_cert, verify=self.verify_ssl) res.raise_for_status() LOG.debug('Message posting to %s: status code %d.', self.target, res.status_code) except requests.exceptions.HTTPError: LOG.exception('Status Code: %(code)s. ' 'Failed to dispatch message: %(data)s' % {'code': res.status_code, 'data': data}) def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ self.poster([sample.as_dict() for sample in samples]) def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline after transformation """ if self.raw_only: data = [evt.as_dict()['raw']['payload'] for evt in events if evt.as_dict().get('raw', {}).get('payload')] else: data = [event.serialize() for event in events] self.poster(data) ceilometer-10.0.0/ceilometer/publisher/data/0000775000175100017510000000000013236733440021007 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/publisher/data/gnocchi_resources.yaml0000666000175100017510000002303213236733243025402 0ustar zuulzuul00000000000000--- resources: - resource_type: identity metrics: - 'identity.authenticate.success' - 'identity.authenticate.pending' - 'identity.authenticate.failure' - 'identity.user.created' - 'identity.user.deleted' - 'identity.user.updated' - 'identity.group.created' - 'identity.group.deleted' - 'identity.group.updated' - 'identity.role.created' - 'identity.role.deleted' - 'identity.role.updated' - 'identity.project.created' - 'identity.project.deleted' - 'identity.project.updated' - 'identity.trust.created' - 'identity.trust.deleted' - 'identity.role_assignment.created' - 'identity.role_assignment.deleted' - resource_type: ceph_account metrics: - 'radosgw.objects' - 'radosgw.objects.size' - 'radosgw.objects.containers' - 'radosgw.api.request' - 'radosgw.containers.objects' - 'radosgw.containers.objects.size' - resource_type: instance metrics: - 'memory' - 'memory.usage' - 'memory.resident' - 'memory.swap.in' - 'memory.swap.out' - 'memory.bandwidth.total' - 'memory.bandwidth.local' - 'vcpus' - 'cpu' - 'cpu.delta' - 'cpu_util' - 'cpu_l3_cache' - 'disk.root.size' - 'disk.ephemeral.size' - 'disk.read.requests' - 'disk.read.requests.rate' - 'disk.write.requests' - 'disk.write.requests.rate' - 'disk.read.bytes' - 'disk.read.bytes.rate' - 'disk.write.bytes' - 'disk.write.bytes.rate' - 'disk.latency' - 'disk.iops' - 'disk.capacity' - 'disk.allocation' - 'disk.usage' - 'compute.instance.booting.time' - 'perf.cpu.cycles' - 'perf.instructions' - 'perf.cache.references' - 'perf.cache.misses' attributes: host: resource_metadata.(instance_host|host) image_ref: resource_metadata.image_ref display_name: resource_metadata.display_name flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id) flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name) server_group: resource_metadata.user_metadata.server_group event_delete: compute.instance.delete.start event_attributes: id: instance_id event_associated_resources: instance_network_interface: '{"=": {"instance_id": "%s"}}' instance_disk: '{"=": {"instance_id": "%s"}}' - resource_type: instance_network_interface metrics: - 'network.outgoing.packets.rate' - 'network.incoming.packets.rate' - 'network.outgoing.packets' - 'network.incoming.packets' - 'network.outgoing.packets.drop' - 'network.incoming.packets.drop' - 'network.outgoing.packets.error' - 'network.incoming.packets.error' - 'network.outgoing.bytes.rate' - 'network.incoming.bytes.rate' - 'network.outgoing.bytes' - 'network.incoming.bytes' attributes: name: resource_metadata.vnic_name instance_id: resource_metadata.instance_id - resource_type: instance_disk metrics: - 'disk.device.read.requests' - 'disk.device.read.requests.rate' - 'disk.device.write.requests' - 'disk.device.write.requests.rate' - 'disk.device.read.bytes' - 'disk.device.read.bytes.rate' - 'disk.device.write.bytes' - 'disk.device.write.bytes.rate' - 'disk.device.latency' - 'disk.device.iops' - 'disk.device.capacity' - 'disk.device.allocation' - 'disk.device.usage' attributes: name: resource_metadata.disk_name instance_id: resource_metadata.instance_id - resource_type: image metrics: - 'image.size' - 'image.download' - 'image.serve' attributes: name: resource_metadata.name container_format: resource_metadata.container_format disk_format: resource_metadata.disk_format event_delete: image.delete event_attributes: id: resource_id - resource_type: ipmi metrics: - 'hardware.ipmi.node.power' - 'hardware.ipmi.node.temperature' - 'hardware.ipmi.node.inlet_temperature' - 'hardware.ipmi.node.outlet_temperature' - 'hardware.ipmi.node.fan' - 'hardware.ipmi.node.current' - 'hardware.ipmi.node.voltage' - 'hardware.ipmi.node.airflow' - 'hardware.ipmi.node.cups' - 'hardware.ipmi.node.cpu_util' - 'hardware.ipmi.node.mem_util' - 'hardware.ipmi.node.io_util' - resource_type: network metrics: - 'bandwidth' - 'ip.floating' event_delete: floatingip.delete.end event_attributes: id: resource_id - resource_type: stack metrics: - 'stack.create' - 'stack.update' - 'stack.delete' - 'stack.resume' - 'stack.suspend' - resource_type: swift_account metrics: - 'storage.objects.incoming.bytes' - 'storage.objects.outgoing.bytes' - 'storage.api.request' - 'storage.objects.size' - 'storage.objects' - 'storage.objects.containers' - 'storage.containers.objects' - 'storage.containers.objects.size' - resource_type: volume metrics: - 'volume' - 'volume.size' - 'snapshot.size' - 'volume.snapshot.size' - 'volume.backup.size' attributes: display_name: resource_metadata.(display_name|name) volume_type: resource_metadata.volume_type event_delete: volume.delete.start event_attributes: id: resource_id - resource_type: volume_provider metrics: - 'volume.provider.capacity.total' - 'volume.provider.capacity.free' - 'volume.provider.capacity.allocated' - 'volume.provider.capacity.provisioned' - 'volume.provider.capacity.virtual_free' - resource_type: volume_provider_pool metrics: - 'volume.provider.pool.capacity.total' - 'volume.provider.pool.capacity.free' - 'volume.provider.pool.capacity.allocated' - 'volume.provider.pool.capacity.provisioned' - 'volume.provider.pool.capacity.virtual_free' attributes: provider: resource_metadata.provider - resource_type: host metrics: - 'hardware.cpu.load.1min' - 'hardware.cpu.load.5min' - 'hardware.cpu.load.15min' - 'hardware.cpu.util' - 'hardware.memory.total' - 'hardware.memory.used' - 'hardware.memory.swap.total' - 'hardware.memory.swap.avail' - 'hardware.memory.buffer' - 'hardware.memory.cached' - 'hardware.network.ip.outgoing.datagrams' - 'hardware.network.ip.incoming.datagrams' - 'hardware.system_stats.cpu.idle' - 'hardware.system_stats.io.outgoing.blocks' - 'hardware.system_stats.io.incoming.blocks' attributes: host_name: resource_metadata.resource_url - resource_type: host_disk metrics: - 'hardware.disk.size.total' - 'hardware.disk.size.used' - 'hardware.disk.read.bytes' - 'hardware.disk.write.bytes' - 'hardware.disk.read.requests' - 'hardware.disk.write.requests' attributes: host_name: resource_metadata.resource_url device_name: resource_metadata.device - resource_type: host_network_interface metrics: - 'hardware.network.incoming.bytes' - 'hardware.network.outgoing.bytes' - 'hardware.network.outgoing.errors' attributes: host_name: resource_metadata.resource_url device_name: resource_metadata.name - resource_type: nova_compute metrics: - 'compute.node.cpu.frequency' - 'compute.node.cpu.idle.percent' - 'compute.node.cpu.idle.time' - 'compute.node.cpu.iowait.percent' - 'compute.node.cpu.iowait.time' - 'compute.node.cpu.kernel.percent' - 'compute.node.cpu.kernel.time' - 'compute.node.cpu.percent' - 'compute.node.cpu.user.percent' - 'compute.node.cpu.user.time' attributes: host_name: resource_metadata.host - resource_type: manila_share metrics: - 'manila.share.size' attributes: name: resource_metadata.name host: resource_metadata.host status: resource_metadata.status availability_zone: resource_metadata.availability_zone protocol: resource_metadata.protocol - resource_type: switch metrics: - 'switch' - 'switch.ports' attributes: controller: resource_metadata.controller - resource_type: switch_port metrics: - 'switch.port' - 'switch.port.uptime' - 'switch.port.receive.packets' - 'switch.port.transmit.packets' - 'switch.port.receive.bytes' - 'switch.port.transmit.bytes' - 'switch.port.receive.drops' - 'switch.port.transmit.drops' - 'switch.port.receive.errors' - 'switch.port.transmit.errors' - 'switch.port.receive.frame_error' - 'switch.port.receive.overrun_error' - 'switch.port.receive.crc_error' - 'switch.port.collision.count' attributes: switch: resource_metadata.switch port_number_on_switch: resource_metadata.port_number_on_switch neutron_port_id: resource_metadata.neutron_port_id controller: resource_metadata.controller - resource_type: port metrics: - 'port' - 'port.uptime' - 'port.receive.packets' - 'port.transmit.packets' - 'port.receive.bytes' - 'port.transmit.bytes' - 'port.receive.drops' - 'port.receive.errors' attributes: controller: resource_metadata.controller - resource_type: switch_table metrics: - 'switch.table.active.entries' attributes: controller: resource_metadata.controller switch: resource_metadata.switch ceilometer-10.0.0/ceilometer/publisher/zaqar.py0000666000175100017510000000536313236733243021600 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves.urllib import parse as urlparse from ceilometer import keystone_client from ceilometer import publisher from zaqarclient.queues.v2 import client as zaqarclient DEFAULT_TTL = 3600 class ZaqarPublisher(publisher.ConfigPublisherBase): """Publish metering data to a Zaqar queue. The target queue name must be configured in the ceilometer pipeline configuration file. The TTL can also optionally be specified as a query argument:: meter: - name: meter_zaqar meters: - "*" sinks: - zaqar_sink sinks: - name: zaqar_sink transformers: publishers: - zaqar://?queue=meter_queue&ttl=1200 The credentials to access Zaqar must be set in the [zaqar] section in the configuration. """ def __init__(self, conf, parsed_url): super(ZaqarPublisher, self).__init__(conf, parsed_url) options = urlparse.parse_qs(parsed_url.query) self.queue_name = options.get('queue', [None])[0] if not self.queue_name: raise ValueError('Must specify a queue in the zaqar publisher') self.ttl = int(options.pop('ttl', [DEFAULT_TTL])[0]) self._client = None @property def client(self): if self._client is None: session = keystone_client.get_session( self.conf, group=self.conf.zaqar.auth_section) self._client = zaqarclient.Client(session=session) return self._client def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ queue = self.client.queue(self.queue_name) messages = [{'body': sample.as_dict(), 'ttl': self.ttl} for sample in samples] queue.post(messages) def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline after transformation """ queue = self.client.queue(self.queue_name) messages = [{'body': event.serialize(), 'ttl': self.ttl} for event in events] queue.post(messages) ceilometer-10.0.0/ceilometer/publisher/file.py0000666000175100017510000000714013236733243021374 0ustar zuulzuul00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import logging.handlers from oslo_log import log from six.moves.urllib import parse as urlparse from ceilometer import publisher LOG = log.getLogger(__name__) class FilePublisher(publisher.ConfigPublisherBase): """Publisher metering data to file. The file publisher pushes metering data into a file. The file name and location should be configured in ceilometer pipeline configuration file. If a file name and location is not specified, this File Publisher will not log any meters other than log a warning in Ceilometer log file. To enable this publisher, add the following section to the /etc/ceilometer/pipeline.yaml file or simply add it to an existing pipeline:: - name: meter_file meters: - "*" publishers: - file:///var/test?max_bytes=10000000&backup_count=5 File path is required for this publisher to work properly. If max_bytes or backup_count is missing, FileHandler will be used to save the metering data. If max_bytes and backup_count are present, RotatingFileHandler will be used to save the metering data. """ def __init__(self, conf, parsed_url): super(FilePublisher, self).__init__(conf, parsed_url) self.publisher_logger = None path = parsed_url.path if not path: LOG.error('The path for the file publisher is required') return rfh = None max_bytes = 0 backup_count = 0 # Handling other configuration options in the query string if parsed_url.query: params = urlparse.parse_qs(parsed_url.query) if params.get('max_bytes') and params.get('backup_count'): try: max_bytes = int(params.get('max_bytes')[0]) backup_count = int(params.get('backup_count')[0]) except ValueError: LOG.error('max_bytes and backup_count should be ' 'numbers.') return # create rotating file handler rfh = logging.handlers.RotatingFileHandler( path, encoding='utf8', maxBytes=max_bytes, backupCount=backup_count) self.publisher_logger = logging.Logger('publisher.file') self.publisher_logger.propagate = False self.publisher_logger.setLevel(logging.INFO) rfh.setLevel(logging.INFO) self.publisher_logger.addHandler(rfh) def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ if self.publisher_logger: for sample in samples: self.publisher_logger.info(sample.as_dict()) def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline after transformation """ if self.publisher_logger: for event in events: self.publisher_logger.info(event.as_dict()) ceilometer-10.0.0/ceilometer/publisher/__init__.py0000666000175100017510000000307213236733243022214 0ustar zuulzuul00000000000000# # Copyright 2013 Intel Corp. # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_utils import netutils import six from stevedore import driver def get_publisher(conf, url, namespace): """Get publisher driver and load it. :param url: URL for the publisher :param namespace: Namespace to use to look for drivers. """ parse_result = netutils.urlsplit(url) loaded_driver = driver.DriverManager(namespace, parse_result.scheme) if issubclass(loaded_driver.driver, ConfigPublisherBase): return loaded_driver.driver(conf, parse_result) else: return loaded_driver.driver(parse_result) @six.add_metaclass(abc.ABCMeta) class ConfigPublisherBase(object): """Base class for plugins that publish data.""" def __init__(self, conf, parsed_url): self.conf = conf @abc.abstractmethod def publish_samples(self, samples): """Publish samples into final conduit.""" @abc.abstractmethod def publish_events(self, events): """Publish events into final conduit.""" ceilometer-10.0.0/ceilometer/publisher/utils.py0000666000175100017510000001233013236733243021612 0ustar zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utils for publishers """ import hashlib import hmac from oslo_config import cfg from oslo_utils import secretutils import six OPTS = [ cfg.StrOpt('telemetry_secret', secret=True, default='change this for valid signing', help='Secret value for signing messages. Set value empty if ' 'signing is not required to avoid computational overhead.', deprecated_opts=[cfg.DeprecatedOpt("metering_secret", "DEFAULT"), cfg.DeprecatedOpt("metering_secret", "publisher_rpc"), cfg.DeprecatedOpt("metering_secret", "publisher")] ), ] def decode_unicode(input): """Decode the unicode of the message, and encode it into utf-8.""" if isinstance(input, dict): temp = {} # If the input data is a dict, create an equivalent dict with a # predictable insertion order to avoid inconsistencies in the # message signature computation for equivalent payloads modulo # ordering for key, value in sorted(six.iteritems(input)): temp[decode_unicode(key)] = decode_unicode(value) return temp elif isinstance(input, (tuple, list)): # When doing a pair of JSON encode/decode operations to the tuple, # the tuple would become list. So we have to generate the value as # list here. return [decode_unicode(element) for element in input] elif isinstance(input, six.text_type): return input.encode('utf-8') elif six.PY3 and isinstance(input, six.binary_type): return input.decode('utf-8') else: return input def recursive_keypairs(d, separator=':'): """Generator that produces sequence of keypairs for nested dictionaries.""" for name, value in sorted(six.iteritems(d)): if isinstance(value, dict): for subname, subvalue in recursive_keypairs(value, separator): yield ('%s%s%s' % (name, separator, subname), subvalue) elif isinstance(value, (tuple, list)): yield name, decode_unicode(value) else: yield name, value def compute_signature(message, secret): """Return the signature for a message dictionary.""" if not secret: return '' if isinstance(secret, six.text_type): secret = secret.encode('utf-8') digest_maker = hmac.new(secret, b'', hashlib.sha256) for name, value in recursive_keypairs(message): if name == 'message_signature': # Skip any existing signature value, which would not have # been part of the original message. continue digest_maker.update(six.text_type(name).encode('utf-8')) digest_maker.update(six.text_type(value).encode('utf-8')) return digest_maker.hexdigest() def verify_signature(message, secret): """Check the signature in the message. Message is verified against the value computed from the rest of the contents. """ if not secret: return True old_sig = message.get('message_signature', '') new_sig = compute_signature(message, secret) if isinstance(old_sig, six.text_type): try: old_sig = old_sig.encode('ascii') except UnicodeDecodeError: return False if six.PY3: new_sig = new_sig.encode('ascii') return secretutils.constant_time_compare(new_sig, old_sig) def meter_message_from_counter(sample, secret): """Make a metering message ready to be published or stored. Returns a dictionary containing a metering message for a notification message and a Sample instance. """ msg = {'source': sample.source, 'counter_name': sample.name, 'counter_type': sample.type, 'counter_unit': sample.unit, 'counter_volume': sample.volume, 'user_id': sample.user_id, 'project_id': sample.project_id, 'resource_id': sample.resource_id, 'timestamp': sample.timestamp, 'resource_metadata': sample.resource_metadata, 'message_id': sample.id, 'monotonic_time': sample.monotonic_time, } msg['message_signature'] = compute_signature(msg, secret) return msg def message_from_event(event, secret): """Make an event message ready to be published or stored. Returns a serialized model of Event containing an event message """ msg = event.serialize() msg['message_signature'] = compute_signature(msg, secret) return msg ceilometer-10.0.0/ceilometer/publisher/udp.py0000666000175100017510000000557013236733243021252 0ustar zuulzuul00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample using an UDP mechanism """ import socket import msgpack from oslo_log import log from oslo_utils import netutils import ceilometer from ceilometer.i18n import _ from ceilometer import publisher from ceilometer.publisher import utils LOG = log.getLogger(__name__) class UDPPublisher(publisher.ConfigPublisherBase): def __init__(self, conf, parsed_url): super(UDPPublisher, self).__init__(conf, parsed_url) self.host, self.port = netutils.parse_host_port( parsed_url.netloc, default_port=4952) addrinfo = None try: addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET6, socket.SOCK_DGRAM)[0] except socket.gaierror: try: addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET, socket.SOCK_DGRAM)[0] except socket.gaierror: pass if addrinfo: addr_family = addrinfo[0] else: LOG.warning( "Cannot resolve host %s, creating AF_INET socket...", self.host) addr_family = socket.AF_INET self.socket = socket.socket(addr_family, socket.SOCK_DGRAM) def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ for sample in samples: msg = utils.meter_message_from_counter( sample, self.conf.publisher.telemetry_secret) host = self.host port = self.port LOG.debug("Publishing sample %(msg)s over UDP to " "%(host)s:%(port)d", {'msg': msg, 'host': host, 'port': port}) try: self.socket.sendto(msgpack.dumps(msg), (self.host, self.port)) except Exception as e: LOG.warning(_("Unable to send sample over UDP")) LOG.exception(e) def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline after transformation """ raise ceilometer.NotImplementedError ceilometer-10.0.0/ceilometer/publisher/messaging.py0000666000175100017510000002235213236733243022434 0ustar zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample using the preferred RPC mechanism. """ import abc import itertools import operator import threading from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import encodeutils from oslo_utils import excutils import six import six.moves.urllib.parse as urlparse from ceilometer.i18n import _ from ceilometer import messaging from ceilometer import publisher from ceilometer.publisher import utils LOG = log.getLogger(__name__) NOTIFIER_OPTS = [ cfg.StrOpt('metering_topic', default='metering', help='The topic that ceilometer uses for metering ' 'notifications.', ), cfg.StrOpt('event_topic', default='event', help='The topic that ceilometer uses for event ' 'notifications.', ), cfg.StrOpt('telemetry_driver', default='messagingv2', help='The driver that ceilometer uses for metering ' 'notifications.', deprecated_name='metering_driver', ) ] class DeliveryFailure(Exception): def __init__(self, message=None, cause=None): super(DeliveryFailure, self).__init__(message) self.cause = cause def raise_delivery_failure(exc): excutils.raise_with_cause(DeliveryFailure, encodeutils.exception_to_unicode(exc), cause=exc) @six.add_metaclass(abc.ABCMeta) class MessagingPublisher(publisher.ConfigPublisherBase): def __init__(self, conf, parsed_url): super(MessagingPublisher, self).__init__(conf, parsed_url) options = urlparse.parse_qs(parsed_url.query) # the value of options is a list of url param values # only take care of the latest one if the option # is provided more than once self.per_meter_topic = bool(int( options.get('per_meter_topic', [0])[-1])) self.policy = options.get('policy', ['default'])[-1] self.max_queue_length = int(options.get( 'max_queue_length', [1024])[-1]) self.max_retry = 0 self.queue_lock = threading.Lock() self.local_queue = [] if self.policy in ['default', 'queue', 'drop']: LOG.info('Publishing policy set to %s', self.policy) else: LOG.warning(_('Publishing policy is unknown (%s) force to ' 'default'), self.policy) self.policy = 'default' self.retry = 1 if self.policy in ['queue', 'drop'] else None def publish_samples(self, samples): """Publish samples on RPC. :param samples: Samples from pipeline after transformation. """ meters = [ utils.meter_message_from_counter( sample, self.conf.publisher.telemetry_secret) for sample in samples ] topic = self.conf.publisher_notifier.metering_topic self.local_queue.append((topic, meters)) if self.per_meter_topic: for meter_name, meter_list in itertools.groupby( sorted(meters, key=operator.itemgetter('counter_name')), operator.itemgetter('counter_name')): meter_list = list(meter_list) topic_name = topic + '.' + meter_name LOG.debug('Publishing %(m)d samples on %(n)s', {'m': len(meter_list), 'n': topic_name}) self.local_queue.append((topic_name, meter_list)) self.flush() def flush(self): with self.queue_lock: queue = self.local_queue self.local_queue = [] queue = self._process_queue(queue, self.policy) with self.queue_lock: self.local_queue = (queue + self.local_queue) if self.policy == 'queue': self._check_queue_length() def _check_queue_length(self): queue_length = len(self.local_queue) if queue_length > self.max_queue_length > 0: count = queue_length - self.max_queue_length self.local_queue = self.local_queue[count:] LOG.warning(_("Publisher max local_queue length is exceeded, " "dropping %d oldest samples") % count) def _process_queue(self, queue, policy): current_retry = 0 while queue: topic, data = queue[0] try: self._send(topic, data) except DeliveryFailure: data = sum([len(m) for __, m in queue]) if policy == 'queue': LOG.warning(_("Failed to publish %d datapoints, queue " "them"), data) return queue elif policy == 'drop': LOG.warning(_("Failed to publish %d datapoints, " "dropping them"), data) return [] current_retry += 1 if current_retry >= self.max_retry: LOG.exception("Failed to retry to send sample data " "with max_retry times") raise else: queue.pop(0) return [] def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline after transformation """ ev_list = [utils.message_from_event( event, self.conf.publisher.telemetry_secret) for event in events] topic = self.conf.publisher_notifier.event_topic self.local_queue.append((topic, ev_list)) self.flush() @abc.abstractmethod def _send(self, topic, meters): """Send the meters to the messaging topic.""" class NotifierPublisher(MessagingPublisher): """Publish metering data from notifier publisher. The ip address and port number of notifier can be configured in ceilometer pipeline configuration file. User can customize the transport driver such as rabbit, kafka and so on. The Notifier uses `sample` method as default method to send notifications. This publisher has transmit options such as queue, drop, and retry. These options are specified using policy field of URL parameter. When queue option could be selected, local queue length can be determined using max_queue_length field as well. When the transfer fails with retry option, try to resend the data as many times as specified in max_retry field. If max_retry is not specified, by default the number of retry is 100. To enable this publisher, add the following section to the /etc/ceilometer/pipeline.yaml file or simply add it to an existing pipeline:: meter: - name: meter_notifier meters: - "*" sinks: - notifier_sink sinks: - name: notifier_sink transformers: publishers: - notifier://[notifier_ip]:[notifier_port]?topic=[topic]& driver=driver&max_retry=100 """ def __init__(self, conf, parsed_url, default_topic): super(NotifierPublisher, self).__init__(conf, parsed_url) options = urlparse.parse_qs(parsed_url.query) topics = options.pop('topic', [default_topic]) driver = options.pop('driver', ['rabbit'])[0] self.max_retry = int(options.get('max_retry', [100])[-1]) url = None if parsed_url.netloc != '': url = urlparse.urlunsplit([driver, parsed_url.netloc, parsed_url.path, urlparse.urlencode(options, True), parsed_url.fragment]) self.notifier = oslo_messaging.Notifier( messaging.get_transport(self.conf, url), driver=self.conf.publisher_notifier.telemetry_driver, publisher_id='telemetry.publisher.%s' % self.conf.host, topics=topics, retry=self.retry ) def _send(self, event_type, data): try: self.notifier.sample({}, event_type=event_type, payload=data) except oslo_messaging.MessageDeliveryFailure as e: raise_delivery_failure(e) class SampleNotifierPublisher(NotifierPublisher): def __init__(self, conf, parsed_url): super(SampleNotifierPublisher, self).__init__( conf, parsed_url, conf.publisher_notifier.metering_topic) class EventNotifierPublisher(NotifierPublisher): def __init__(self, conf, parsed_url): super(EventNotifierPublisher, self).__init__( conf, parsed_url, conf.publisher_notifier.event_topic) ceilometer-10.0.0/ceilometer/publisher/test.py0000666000175100017510000000256213236733243021437 0ustar zuulzuul00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample in memory, useful for testing """ from ceilometer import publisher class TestPublisher(publisher.ConfigPublisherBase): """Publisher used in unit testing.""" def __init__(self, conf, parsed_url): super(TestPublisher, self).__init__(conf, parsed_url) self.samples = [] self.events = [] self.calls = 0 def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ self.samples.extend(samples) self.calls += 1 def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline after transformation """ self.events.extend(events) self.calls += 1 ceilometer-10.0.0/ceilometer/publisher/gnocchi.py0000666000175100017510000005100113236733243022062 0ustar zuulzuul00000000000000# # Copyright 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict import hashlib import itertools import json import operator import pkg_resources import threading import uuid from gnocchiclient import exceptions as gnocchi_exc from keystoneauth1 import exceptions as ka_exceptions from oslo_log import log from oslo_utils import fnmatch from oslo_utils import timeutils import six import six.moves.urllib.parse as urlparse from stevedore import extension from ceilometer import declarative from ceilometer import gnocchi_client from ceilometer.i18n import _ from ceilometer import keystone_client from ceilometer import publisher NAME_ENCODED = __name__.encode('utf-8') CACHE_NAMESPACE = uuid.UUID(bytes=hashlib.md5(NAME_ENCODED).digest()) LOG = log.getLogger(__name__) def cache_key_mangler(key): """Construct an opaque cache key.""" if six.PY2: key = key.encode('utf-8') return uuid.uuid5(CACHE_NAMESPACE, key).hex EVENT_CREATE, EVENT_UPDATE, EVENT_DELETE = ("create", "update", "delete") class ResourcesDefinition(object): MANDATORY_FIELDS = {'resource_type': six.string_types, 'metrics': list} MANDATORY_EVENT_FIELDS = {'id': six.string_types} def __init__(self, definition_cfg, default_archive_policy, plugin_manager): self._default_archive_policy = default_archive_policy self.cfg = definition_cfg self._check_required_and_types(self.MANDATORY_FIELDS, self.cfg) if self.support_events(): self._check_required_and_types(self.MANDATORY_EVENT_FIELDS, self.cfg['event_attributes']) self._attributes = {} for name, attr_cfg in self.cfg.get('attributes', {}).items(): self._attributes[name] = declarative.Definition(name, attr_cfg, plugin_manager) self._event_attributes = {} for name, attr_cfg in self.cfg.get('event_attributes', {}).items(): self._event_attributes[name] = declarative.Definition( name, attr_cfg, plugin_manager) self.metrics = {} for t in self.cfg['metrics']: archive_policy = self.cfg.get('archive_policy', self._default_archive_policy) if archive_policy is None: self.metrics[t] = {} else: self.metrics[t] = dict(archive_policy_name=archive_policy) @staticmethod def _check_required_and_types(expected, definition): for field, field_type in expected.items(): if field not in definition: raise declarative.ResourceDefinitionException( _("Required field %s not specified") % field, definition) if not isinstance(definition[field], field_type): raise declarative.ResourceDefinitionException( _("Required field %(field)s should be a %(type)s") % {'field': field, 'type': field_type}, definition) @staticmethod def _ensure_list(value): if isinstance(value, list): return value return [value] def support_events(self): for e in ["event_create", "event_delete", "event_update"]: if e in self.cfg: return True return False def event_match(self, event_type): for e in self._ensure_list(self.cfg.get('event_create', [])): if fnmatch.fnmatch(event_type, e): return EVENT_CREATE for e in self._ensure_list(self.cfg.get('event_delete', [])): if fnmatch.fnmatch(event_type, e): return EVENT_DELETE for e in self._ensure_list(self.cfg.get('event_update', [])): if fnmatch.fnmatch(event_type, e): return EVENT_UPDATE def sample_attributes(self, sample): attrs = {} sample_dict = sample.as_dict() for name, definition in self._attributes.items(): value = definition.parse(sample_dict) if value is not None: attrs[name] = value return attrs def event_attributes(self, event): attrs = {'type': self.cfg['resource_type']} traits = dict([(trait.name, trait.value) for trait in event.traits]) for attr, field in self.cfg.get('event_attributes', {}).items(): value = traits.get(field) if value is not None: attrs[attr] = value return attrs class LockedDefaultDict(defaultdict): """defaultdict with lock to handle threading Dictionary only deletes if nothing is accessing dict and nothing is holding lock to be deleted. If both cases are not true, it will skip delete. """ def __init__(self, *args, **kwargs): self.lock = threading.Lock() super(LockedDefaultDict, self).__init__(*args, **kwargs) def __getitem__(self, key): with self.lock: return super(LockedDefaultDict, self).__getitem__(key) def pop(self, key, *args): with self.lock: key_lock = super(LockedDefaultDict, self).__getitem__(key) if key_lock.acquire(False): try: super(LockedDefaultDict, self).pop(key, *args) finally: key_lock.release() class GnocchiPublisher(publisher.ConfigPublisherBase): """Publisher class for recording metering data into the Gnocchi service. The publisher class records each meter into the gnocchi service configured in Ceilometer pipeline file. An example target may look like the following: gnocchi://?archive_policy=low&filter_project=gnocchi """ def __init__(self, conf, parsed_url): super(GnocchiPublisher, self).__init__(conf, parsed_url) # TODO(jd) allow to override Gnocchi endpoint via the host in the URL options = urlparse.parse_qs(parsed_url.query) self.filter_project = options.get( 'filter_project', [conf.dispatcher_gnocchi.filter_project])[-1] resources_definition_file = options.get( 'resources_definition_file', [conf.dispatcher_gnocchi.resources_definition_file])[-1] archive_policy = options.get( 'archive_policy', [conf.dispatcher_gnocchi.archive_policy])[-1] self.resources_definition = self._load_resources_definitions( conf, archive_policy, resources_definition_file) self.metric_map = dict((metric, rd) for rd in self.resources_definition for metric in rd.metrics) timeout = options.get('timeout', [conf.dispatcher_gnocchi.request_timeout])[-1] self._ks_client = keystone_client.get_client(conf) self.cache = None try: import oslo_cache oslo_cache.configure(conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. if conf.cache.enabled: cache_region = oslo_cache.create_region() self.cache = oslo_cache.configure_cache_region( conf, cache_region) self.cache.key_mangler = cache_key_mangler except ImportError: pass except oslo_cache.exception.ConfigurationError as exc: LOG.warning('unable to configure oslo_cache: %s', exc) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) self._gnocchi = gnocchi_client.get_gnocchiclient( conf, request_timeout=timeout) self._already_logged_event_types = set() self._already_logged_metric_names = set() @staticmethod def _load_resources_definitions(conf, archive_policy, resources_definition_file): plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait_plugin') data = declarative.load_definitions( conf, {}, resources_definition_file, pkg_resources.resource_filename(__name__, "data/gnocchi_resources.yaml")) resource_defs = [] for resource in data.get('resources', []): try: resource_defs.append(ResourcesDefinition( resource, archive_policy, plugin_manager)) except Exception as exc: LOG.error("Failed to load resource due to error %s" % exc) return resource_defs @property def gnocchi_project_id(self): if self._gnocchi_project_id is not None: return self._gnocchi_project_id with self._gnocchi_project_id_lock: if self._gnocchi_project_id is None: try: project = self._ks_client.projects.find( name=self.filter_project) except ka_exceptions.NotFound: LOG.warning('filtered project not found in keystone,' ' ignoring the filter_project ' 'option') self.filter_project = None return None except Exception: LOG.exception('fail to retrieve filtered project ') raise self._gnocchi_project_id = project.id LOG.debug("filtered project found: %s", self._gnocchi_project_id) return self._gnocchi_project_id def _is_swift_account_sample(self, sample): try: return (self.metric_map[sample.name].cfg['resource_type'] == 'swift_account') except KeyError: return False def _is_gnocchi_activity(self, sample): return (self.filter_project and self.gnocchi_project_id and ( # avoid anything from the user used by gnocchi sample.project_id == self.gnocchi_project_id or # avoid anything in the swift account used by gnocchi (sample.resource_id == self.gnocchi_project_id and self._is_swift_account_sample(sample)) )) def _get_resource_definition_from_event(self, event_type): for rd in self.resources_definition: operation = rd.event_match(event_type) if operation: return rd, operation def publish_samples(self, data): # NOTE(sileht): skip sample generated by gnocchi itself data = [s for s in data if not self._is_gnocchi_activity(s)] data.sort(key=operator.attrgetter('resource_id')) resource_grouped_samples = itertools.groupby( data, key=operator.attrgetter('resource_id')) gnocchi_data = {} measures = {} for resource_id, samples_of_resource in resource_grouped_samples: # NOTE(sileht): / is forbidden by Gnocchi resource_id = resource_id.replace('/', '_') for sample in samples_of_resource: metric_name = sample.name rd = self.metric_map.get(metric_name) if rd is None: if metric_name not in self._already_logged_metric_names: LOG.warning("metric %s is not handled by Gnocchi" % metric_name) self._already_logged_metric_names.add(metric_name) continue if resource_id not in gnocchi_data: gnocchi_data[resource_id] = { 'resource_type': rd.cfg['resource_type'], 'resource': {"id": resource_id, "user_id": sample.user_id, "project_id": sample.project_id, "metrics": rd.metrics}} gnocchi_data[resource_id].setdefault( "resource_extra", {}).update(rd.sample_attributes(sample)) measures.setdefault(resource_id, {}).setdefault( metric_name, []).append({'timestamp': sample.timestamp, 'value': sample.volume}) # TODO(gordc): unit should really be part of metric definition gnocchi_data[resource_id]['resource']['metrics'][ metric_name]['unit'] = sample.unit try: self.batch_measures(measures, gnocchi_data) except gnocchi_exc.ClientException as e: LOG.error(six.text_type(e)) except Exception as e: LOG.error(six.text_type(e), exc_info=True) for info in gnocchi_data.values(): resource = info["resource"] resource_type = info["resource_type"] resource_extra = info["resource_extra"] if not resource_extra: continue try: self._if_not_cached("update", resource_type, resource, self._update_resource, resource_extra) except gnocchi_exc.ClientException as e: LOG.error(six.text_type(e)) except Exception as e: LOG.error(six.text_type(e), exc_info=True) @staticmethod def _extract_resources_from_error(e, resource_infos): resource_ids = set([r['original_resource_id'] for r in e.message['detail']]) return [(resource_infos[rid]['resource_type'], resource_infos[rid]['resource'], resource_infos[rid]['resource_extra']) for rid in resource_ids] def batch_measures(self, measures, resource_infos): # NOTE(sileht): We don't care about error here, we want # resources metadata always been updated try: self._gnocchi.metric.batch_resources_metrics_measures( measures, create_metrics=True) except gnocchi_exc.BadRequest as e: if not isinstance(e.message, dict): raise if e.message.get('cause') != 'Unknown resources': raise resources = self._extract_resources_from_error(e, resource_infos) for resource_type, resource, resource_extra in resources: try: resource.update(resource_extra) self._if_not_cached("create", resource_type, resource, self._create_resource) except gnocchi_exc.ResourceAlreadyExists: # NOTE(sileht): resource created in the meantime pass except gnocchi_exc.ClientException as e: LOG.error('Error creating resource %(id)s: %(err)s', {'id': resource['id'], 'err': six.text_type(e)}) # We cannot post measures for this resource # and we can't patch it later del measures[resource['id']] del resource_infos[resource['id']] # NOTE(sileht): we have created missing resources/metrics, # now retry to post measures self._gnocchi.metric.batch_resources_metrics_measures( measures, create_metrics=True) LOG.debug( "%d measures posted against %d metrics through %d resources", sum(len(m) for rid in measures for m in measures[rid].values()), sum(len(m) for m in measures.values()), len(resource_infos)) def _create_resource(self, resource_type, resource): self._gnocchi.resource.create(resource_type, resource) LOG.debug('Resource %s created', resource["id"]) def _update_resource(self, resource_type, resource, resource_extra): self._gnocchi.resource.update(resource_type, resource["id"], resource_extra) LOG.debug('Resource %s updated', resource["id"]) def _if_not_cached(self, operation, resource_type, resource, method, *args, **kwargs): if self.cache: cache_key = resource['id'] attribute_hash = self._check_resource_cache(cache_key, resource) hit = False if attribute_hash: with self._gnocchi_resource_lock[cache_key]: # NOTE(luogangyi): there is a possibility that the # resource was already built in cache by another # ceilometer-notification-agent when we get the lock here. attribute_hash = self._check_resource_cache(cache_key, resource) if attribute_hash: method(resource_type, resource, *args, **kwargs) self.cache.set(cache_key, attribute_hash) else: hit = True LOG.debug('resource cache recheck hit for ' '%s %s', operation, cache_key) self._gnocchi_resource_lock.pop(cache_key, None) else: hit = True LOG.debug('Resource cache hit for %s %s', operation, cache_key) if hit and operation == "create": raise gnocchi_exc.ResourceAlreadyExists() else: method(resource_type, resource, *args, **kwargs) def _check_resource_cache(self, key, resource_data): cached_hash = self.cache.get(key) attribute_hash = hash(tuple(i for i in resource_data.items() if i[0] != 'metrics')) if not cached_hash or cached_hash != attribute_hash: return attribute_hash else: return None def publish_events(self, events): for event in events: rd = self._get_resource_definition_from_event(event.event_type) if not rd: if event.event_type not in self._already_logged_event_types: LOG.debug("No gnocchi definition for event type: %s", event.event_type) self._already_logged_event_types.add(event.event_type) continue rd, operation = rd if operation == EVENT_DELETE: self._delete_event(rd, event) def _delete_event(self, rd, event): ended_at = timeutils.utcnow().isoformat() resource = rd.event_attributes(event) associated_resources = rd.cfg.get('event_associated_resources', {}) if associated_resources: to_end = itertools.chain([resource], *[ self._search_resource(resource_type, query % resource['id']) for resource_type, query in associated_resources.items() ]) else: to_end = [resource] for resource in to_end: self._set_ended_at(resource, ended_at) def _search_resource(self, resource_type, query): try: return self._gnocchi.resource.search( resource_type, json.loads(query)) except Exception: LOG.error("Fail to search resource type %{resource_type}s " "with '%{query}s'", {'resource_type': resource_type, 'query': query}, exc_info=True) return [] def _set_ended_at(self, resource, ended_at): try: self._gnocchi.resource.update(resource['type'], resource['id'], {'ended_at': ended_at}) except gnocchi_exc.ResourceNotFound: LOG.debug("Delete event received on unexisting resource (%s), " "ignore it.", resource['id']) except Exception: LOG.error("Fail to update the resource %s", resource, exc_info=True) LOG.debug('Resource %s ended at %s' % (resource["id"], ended_at)) ceilometer-10.0.0/ceilometer/gnocchi_client.py0000666000175100017510000002366713236733243021444 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from distutils import version from gnocchiclient import client from gnocchiclient import exceptions as gnocchi_exc import keystoneauth1.session from oslo_log import log from ceilometer import keystone_client LOG = log.getLogger(__name__) def get_gnocchiclient(conf, request_timeout=None): group = conf.gnocchi.auth_section session = keystone_client.get_session(conf, group=group, timeout=request_timeout) adapter = keystoneauth1.session.TCPKeepAliveAdapter( pool_maxsize=conf.max_parallel_requests) session.mount("http://", adapter) session.mount("https://", adapter) return client.Client( '1', session, adapter_options={'connect_retries': 3, 'interface': conf[group].interface, 'region_name': conf[group].region_name}) # NOTE(sileht): This is the initial resource types created in Gnocchi # This list must never change to keep in sync with what Gnocchi early # database contents was containing resources_initial = { "image": { "name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "container_format": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "disk_format": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, }, "instance": { "flavor_id": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "image_ref": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, "host": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "display_name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "server_group": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, }, "instance_disk": { "name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "instance_id": {"type": "uuid", "required": True}, }, "instance_network_interface": { "name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "instance_id": {"type": "uuid", "required": True}, }, "volume": { "display_name": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, }, "swift_account": {}, "ceph_account": {}, "network": {}, "identity": {}, "ipmi": {}, "stack": {}, "host": { "host_name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, }, "host_network_interface": { "host_name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "device_name": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, }, "host_disk": { "host_name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "device_name": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, }, } # NOTE(sileht): Order matter this have to be considered like alembic migration # code, because it updates the resources schema of Gnocchi resources_update_operations = [ {"desc": "add volume_type to volume", "type": "update_attribute_type", "resource_type": "volume", "data": [{ "op": "add", "path": "/attributes/volume_type", "value": {"type": "string", "min_length": 0, "max_length": 255, "required": False} }]}, {"desc": "add flavor_name to instance", "type": "update_attribute_type", "resource_type": "instance", "data": [{ "op": "add", "path": "/attributes/flavor_name", "value": {"type": "string", "min_length": 0, "max_length": 255, "required": True, "options": {'fill': ''}} }]}, {"desc": "add nova_compute resource type", "type": "create_resource_type", "resource_type": "nova_compute", "data": [{ "attributes": {"host_name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, {"desc": "add manila share type", "type": "create_resource_type", "resource_type": "manila_share", "data": [{ "attributes": {"name": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, "host": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "protocol": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, "availability_zone": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, "status": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, {"desc": "add switch resource type", "type": "create_resource_type", "resource_type": "switch", "data": [{ "attributes": {"controller": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, {"desc": "add switch_port resource type", "type": "create_resource_type", "resource_type": "switch_port", "data": [{ "attributes": {"switch": {"type": "string", "min_length": 0, "max_length": 64, "required": True}, "port_number_on_switch": {"type": "number", "min": 0, "max": 4294967295, "required": False}, "neutron_port_id": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, "controller": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, {"desc": "add port resource type", "type": "create_resource_type", "resource_type": "port", "data": [{ "attributes": {"controller": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, {"desc": "add switch_table resource type", "type": "create_resource_type", "resource_type": "switch_table", "data": [{ "attributes": {"switch": {"type": "string", "min_length": 0, "max_length": 64, "required": True}, "controller": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, {"desc": "add volume provider resource type", "type": "create_resource_type", "resource_type": "volume_provider", "data": [{ "attributes": {} }]}, {"desc": "add volume provider pool resource type", "type": "create_resource_type", "resource_type": "volume_provider_pool", "data": [{ "attributes": {"provider": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, ] # NOTE(sileht): We use LooseVersion because pbr can generate invalid # StrictVersion like 9.0.1.dev226 REQUIRED_VERSION = version.LooseVersion("4.0.0") def upgrade_resource_types(conf): gnocchi = get_gnocchiclient(conf) gnocchi_version = version.LooseVersion(gnocchi.build.get()) if gnocchi_version < REQUIRED_VERSION: raise Exception("required gnocchi version is %s, got %s", REQUIRED_VERSION, gnocchi_version) for name, attributes in resources_initial.items(): try: gnocchi.resource_type.get(name=name) except gnocchi_exc.ResourceTypeNotFound: rt = {'name': name, 'attributes': attributes} gnocchi.resource_type.create(resource_type=rt) for ops in resources_update_operations: if ops['type'] == 'update_attribute_type': rt = gnocchi.resource_type.get(name=ops['resource_type']) first_op = ops['data'][0] attrib = first_op['path'].replace('/attributes', '') if first_op['op'] == 'add' and attrib in rt['attributes']: continue if first_op['op'] == 'remove' and attrib not in rt['attributes']: continue gnocchi.resource_type.update(ops['resource_type'], ops['data']) elif ops['type'] == 'create_resource_type': try: gnocchi.resource_type.get(name=ops['resource_type']) except gnocchi_exc.ResourceTypeNotFound: rt = {'name': ops['resource_type'], 'attributes': ops['data'][0]['attributes']} gnocchi.resource_type.create(resource_type=rt) ceilometer-10.0.0/ceilometer/middleware.py0000666000175100017510000000245613236733243020602 0ustar zuulzuul00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.pipeline import sample as endpoint from ceilometer import sample class HTTPRequest(endpoint.SampleEndpoint): event_types = ['http.request'] def build_sample(self, message): yield sample.Sample.from_notification( name=message['event_type'], type=sample.TYPE_DELTA, volume=1, unit=message['event_type'].split('.')[1], user_id=message['payload']['request'].get('HTTP_X_USER_ID'), project_id=message['payload']['request'].get('HTTP_X_PROJECT_ID'), resource_id=message['payload']['request'].get( 'HTTP_X_SERVICE_NAME'), message=message) class HTTPResponse(HTTPRequest): event_types = ['http.response'] ceilometer-10.0.0/ceilometer/agent.py0000666000175100017510000001160413236733243017556 0ustar zuulzuul00000000000000# # Copyright 2013 Intel Corp. # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import os import pkg_resources from oslo_log import log from oslo_utils import fnmatch import six import yaml LOG = log.getLogger(__name__) class ConfigException(Exception): def __init__(self, cfg_type, message, cfg): self.cfg_type = cfg_type self.msg = message self.cfg = cfg def __str__(self): return '%s %s: %s' % (self.cfg_type, self.cfg, self.msg) class SourceException(Exception): def __init__(self, message, cfg): self.msg = message self.cfg = cfg def __str__(self): return 'Source definition invalid: %s (%s)' % (self.msg, self.cfg) class ConfigManagerBase(object): """Base class for managing configuration file refresh""" def __init__(self, conf): self.conf = conf self.cfg_loc = None def load_config(self, cfg_file, fallback_cfg_prefix='pipeline/data/'): """Load a configuration file and set its refresh values.""" if os.path.exists(cfg_file): self.cfg_loc = cfg_file else: self.cfg_loc = self.conf.find_file(cfg_file) if not self.cfg_loc and fallback_cfg_prefix is not None: LOG.debug("No pipeline definitions configuration file found! " "Using default config.") self.cfg_loc = pkg_resources.resource_filename( __name__, fallback_cfg_prefix + cfg_file) with open(self.cfg_loc) as fap: data = fap.read() conf = yaml.safe_load(data) self.cfg_mtime = self.get_cfg_mtime() self.cfg_hash = self.get_cfg_hash() LOG.info("Config file: %s", conf) return conf def get_cfg_mtime(self): """Return modification time of cfg file""" return os.path.getmtime(self.cfg_loc) if self.cfg_loc else None def get_cfg_hash(self): """Return hash of configuration file""" if not self.cfg_loc: return None with open(self.cfg_loc) as fap: data = fap.read() if six.PY3: data = data.encode('utf-8') file_hash = hashlib.md5(data).hexdigest() return file_hash def cfg_changed(self): """Returns hash of changed cfg else False.""" mtime = self.get_cfg_mtime() if mtime > self.cfg_mtime: LOG.info('Configuration file has been updated.') self.cfg_mtime = mtime _hash = self.get_cfg_hash() if _hash != self.cfg_hash: LOG.info("Detected change in configuration.") return _hash return False class Source(object): """Represents a generic source""" def __init__(self, cfg): self.cfg = cfg try: self.name = cfg['name'] except KeyError as err: raise SourceException( "Required field %s not specified" % err.args[0], cfg) def __str__(self): return self.name def check_source_filtering(self, data, d_type): """Source data rules checking - At least one meaningful datapoint exist - Included type and excluded type can't co-exist on the same pipeline - Included type meter and wildcard can't co-exist at same pipeline """ if not data: raise SourceException('No %s specified' % d_type, self.cfg) if ([x for x in data if x[0] not in '!*'] and [x for x in data if x[0] == '!']): raise SourceException( 'Both included and excluded %s specified' % d_type, self.cfg) if '*' in data and [x for x in data if x[0] not in '!*']: raise SourceException( 'Included %s specified with wildcard' % d_type, self.cfg) @staticmethod def is_supported(dataset, data_name): # Support wildcard like storage.* and !disk.* # Start with negation, we consider that the order is deny, allow if any(fnmatch.fnmatch(data_name, datapoint[1:]) for datapoint in dataset if datapoint[0] == '!'): return False if any(fnmatch.fnmatch(data_name, datapoint) for datapoint in dataset if datapoint[0] != '!'): return True # if we only have negation, we suppose the default is allow return all(datapoint.startswith('!') for datapoint in dataset) ceilometer-10.0.0/ceilometer/locale/0000775000175100017510000000000013236733440017340 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/es/0000775000175100017510000000000013236733440017747 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/es/LC_MESSAGES/0000775000175100017510000000000013236733440021534 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/es/LC_MESSAGES/ceilometer.po0000666000175100017510000001650513236733243024236 0ustar zuulzuul00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Rafael Rivero , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 9.0.1.dev161\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2017-11-21 04:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Spanish\n" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "El transformador aritmĂ©tico debe utilizar al menos un medidor en la " "expresiĂłn '%s'" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "No se puede conectar a XenAPI: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Descartando la notificaciĂłn %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Error de libvirt al buscar la instancia : [CĂłdigo " "de error %(error_code)s] %(ex)s" msgid "Expression evaluated to a NaN value!" msgstr "La expresiĂłn se ha evaluado en un valor NaN." #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "No se han podido analizar los datos de la instancia , el estado del dominio es SHUTOFF." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "No se han podido publicar los puntos de datos %d, descartándolos" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "No se han podido publicar los puntos de datos %d, pĂłngalos en cola" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintaxis de YAML no válida en archivo de definiciones %(file)s en la lĂ­nea: " "%(line)s, columna: %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Tipo de rasgo no válido '%(type)s' para el rasgo %(trait)s" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Se han encontrado varias VM %s en XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "" "Debe especificar el url_conexiĂłn y la contraseña_conexiĂłn para utilizar" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "No hay ningĂşn plug-in denominado %(plugin)s disponible para %(name)s" msgid "Node Manager init failed" msgstr "El inicio de Gestor de nodos ha fallado" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "La API Opencontrail ha devuelto %(status)s %(reason)s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Error de análisis en especificaciĂłn de JSONPath '%(jsonpath)s' para " "%(name)s: %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "" "Se ha especificado un plug-in, pero no se ha proporcionado ningĂşn nombre de " "plug-in para %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "¡El sensor de sondeo %(mtr)s ha fallado %(cnt)s veces!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "El sondeo %(name)s ha fallado %(cnt)s veces." #, python-format msgid "Pollster for %s is disabled!" msgstr "¡El Pollster para %s está inhabilitado!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Se supera la longitud máxima de aplicaciĂłn de publicaciĂłn local_queue, " "descartando los ejemplos más antiguos %d" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "No se conoce la polĂ­tica de publicaciĂłn (%s) forzar para tomar el valor " "predeterminado" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "La API de RGW AdminOps ha devuelto %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "Campo necesario %s no especificado" #, python-format msgid "The field 'fields' is required for %s" msgstr "El campo 'campos' es obligatorio para %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "No se puede evaluar la expresiĂłn %(expr)s: %(exc)s" msgid "Unable to send sample over UDP" msgstr "No se ha podido enviar una muestra sobre UDP" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en el equilibrador de carga " "%(id)s, se omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en fw %(id)s, se omitirá el " "ejemplo" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en el escucha %(id)s, se " "omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en el miembro %(id)s, se " "omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en la agrupaciĂłn %(id)s, se " "omitirá el ejemplo" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en vip %(id)s, se omitirá el " "ejemplo" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en vpn %(id)s, se omitirá el " "ejemplo" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %s no se ha encontrado en VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "No se han encontrado VM %s en XenServer" msgid "Wrong sensor type" msgstr "Tipo de sensor incorrecto" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Error de YAML al leer el archivo de definiciones %(file)s" #, python-format msgid "dropping out of time order sample: %s" msgstr "saliendo del ejemplo de orden de tiempo: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "eliminando la muestra sin predecesor: %s" msgid "ipmitool output length mismatch" msgstr "la longitud de salida de ipmitool no coincide" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "ha fallado el análisis de datos de sensor IPMI,no se ha recuperado ningĂşn " "dato de la entrada" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "ha fallado el análisis de datos de sensor IPMI,tipo de sensor desconocido" msgid "running ipmitool failure" msgstr "fallo de ejecuciĂłn de ipmitool" ceilometer-10.0.0/ceilometer/locale/fr/0000775000175100017510000000000013236733440017747 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/fr/LC_MESSAGES/0000775000175100017510000000000013236733440021534 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/fr/LC_MESSAGES/ceilometer.po0000666000175100017510000002005213236733243024226 0ustar zuulzuul00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Corinne Verheyde , 2013 # CHABERT Loic , 2013 # Christophe kryskool , 2013 # Corinne Verheyde , 2013-2014 # EVEILLARD , 2013-2014 # Francesco Vollero , 2015 # Jonathan Dupart , 2014 # CHABERT Loic , 2013 # Maxime COQUEREL , 2014 # Nick Barcet , 2013 # Nick Barcet , 2013 # Andrew Melim , 2014 # Patrice LACHANCE , 2013 # Patrice LACHANCE , 2013 # RĂ©mi Le Trocquer , 2014 # EVEILLARD , 2013 # Corinne Verheyde , 2013 # Corinne Verheyde , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 9.0.1.dev161\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2017-11-21 04:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: French\n" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "Le transformateur arithmĂ©tique doit utiliser au moins un mètre dans " "l'expression '%s'" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Connexion impossible XenAPI: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Suppression du %(type)s de notification (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Erreur de libvirt lors de la recherche de l'instance : [Code d'erreur %(error_code)s] %(ex)s" msgid "Expression evaluated to a NaN value!" msgstr "Expression Ă©valuĂ©e avec une valeur not-a-number !" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Echec de l'inspection des donnĂ©es de l'instance . " "Le domaine est Ă  l'Ă©tat SHUTOFF (INTERRUPTION)." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Echec de la publication des points de donnĂ©es %d. Suppression en cours" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "" "Echec de la publication des points de donnĂ©es %d. Mettez-les en file " "d'attente" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Syntaxe YAML non valide dans le fichier de dĂ©finitions %(file)s Ă  la ligne : " "%(line)s, colonne : %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Type de trait non valide '%(type)s' pour le trait %(trait)s" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Plusieurs machines virtuelles %s trouvĂ©es dans XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "Il faut indiquer connection_url et connection_password pour utiliser" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Aucun plugin nommĂ© %(plugin)s n'est disponible pour %(name)s" msgid "Node Manager init failed" msgstr "Echec de l'initialisation du gestionnaire de noeud" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "L'API Opencontrail a renvoyĂ© %(status)s %(reason)s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Erreur d'analyse dans la spĂ©cification JSONPath '%(jsonpath)s' pour " "%(name)s : %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plugin spĂ©cifiĂ©, mais aucun nom de plugin n'est fourni pour %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "L'interrogation du capteur %(mtr)s a Ă©chouĂ© %(cnt)s fois !" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Sondage de %(name)s %(cnt)s fois en Ă©chec!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Le pollster pour %s est dĂ©sactivĂ© !" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "La longueur maximale de local_queue du diffuseur est dĂ©passĂ©e, suppression " "des %d Ă©chantillons les plus anciens" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "La politique de publication est inconnue (%s) forcĂ© le dĂ©faut" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "L'API AdminOps RGW a renvoyĂ© %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "Champ requis %s non spĂ©cifiĂ©e" #, python-format msgid "The field 'fields' is required for %s" msgstr "Le champ 'fields' est requis pour %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "Impossible d'Ă©valuer l'expression %(expr)s : %(exc)s" msgid "Unable to send sample over UDP" msgstr "Impossible d'envoyer l'Ă©chantillon en UDP" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Statut %(stat)s inconnu reçu sur le Load Balancer %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le pare-feu %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le listener %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le membre %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le pool %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Etat %(stat)s inconnu reçu sur l'IP virtuelle %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le vpn %(id)s, Ă©chantillon ignorĂ©" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "La machine virtuelle %s est introuvable dans VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s non trouvĂ© dans XenServer" msgid "Wrong sensor type" msgstr "Type de dĂ©tecteur incorrect" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Erreur YAML lors de la lecture du fichier de dĂ©finitions %(file)s" #, python-format msgid "dropping out of time order sample: %s" msgstr "suppression de l'exemple de classement dans le temps : %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "abandon de l'Ă©chantillon sans prĂ©dĂ©cesseur: %s" msgid "ipmitool output length mismatch" msgstr "Non-concordance de longueur de la sortie ipmitool" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "Echec de l'analyse des donnĂ©es du dĂ©tecteur IPMI, aucune donnĂ©e extraite Ă  " "partir de l'entrĂ©e fournie" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "Echec de l'analyse des donnĂ©es du dĂ©tecteur IPMI, type de dĂ©tecteur inconnu" msgid "running ipmitool failure" msgstr "Echec d'exĂ©cution d'ipmitool" ceilometer-10.0.0/ceilometer/locale/it/0000775000175100017510000000000013236733440017754 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/it/LC_MESSAGES/0000775000175100017510000000000013236733440021541 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/it/LC_MESSAGES/ceilometer.po0000666000175100017510000001615013236733243024237 0ustar zuulzuul00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Stefano Maffulli , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 9.0.1.dev161\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2017-11-21 04:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Italian\n" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "Il trasformatore aritmetico deve utilizzare almeno un contatore " "nell'espressione '%s'" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Impossibile connettersi a XenAPI: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Eliminazione della notifica %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Errore da libvirt durante la ricerca dell'istanza : [Codice di errore %(error_code)s] %(ex)s" msgid "Expression evaluated to a NaN value!" msgstr "Espressione valutata a un valore NaN!" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Impossibile ispezionare i dati dell'istanza , " "stato dominio SHUTOFF." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Impossibile pubblicare %d datapoint, eliminati" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Impossibile pubblicare %d datapoint, inseriti in coda" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintassi YAML non valida nel file delle definizioni %(file)s alla riga: " "%(line)s, colonna: %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "" "Tipo di caratteristica non valido '%(type)s' per la caratteristica %(trait)s" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "PiĂą VM %s trovate in XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "" "Ă necessario specificare connection_url e connection_password da utilizzare" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Nessun plug-in con nome %(plugin)s disponibile per %(name)s" msgid "Node Manager init failed" msgstr "Inizializzazione gestore nodi non riuscita" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "L'API Opencontrail ha restituito %(status)s %(reason)s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Errore di analisi nella specifica JSONPath '%(jsonpath)s' per %(name)s: " "%(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in specificato, ma nessun nome di plug-in fornito per %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "Polling del sensore %(mtr)s non riuscito per %(cnt)s volte!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Polling di %(name)s non riuscito per %(cnt)s volte!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Pollster per %s disabilitato!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "La lunghezza local_queue massima del publisher è stata superata, " "eliminazione di esempi %d meno recenti" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "La politica di pubblicazione è sconosciuta (%s), applicazione del valore " "predefinito" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "L'API RGW AdminOps ha restituito %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "Campo richiesto %s non specificato" #, python-format msgid "The field 'fields' is required for %s" msgstr "Il campo 'fields' è obbligatorio per %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "Impossibile valutare l'espressione %(expr)s: %(exc)s" msgid "Unable to send sample over UDP" msgstr "Impossibile inviare l'esempio su UDP" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su bilanciatore del carico %(id)s, " "ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "Stato non conosciuto %(stat)s ricevuto su fw %(id)s,ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su listener %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su membro %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto sul pool %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su vip %(id)s, ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su vpn %(id)s, ignorare l'esempio" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %s non trovata in VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s non trovata in XenServer" msgid "Wrong sensor type" msgstr "Tipo di sensore errato" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Errore YAML durante la lettura del file definizioni %(file)s" #, python-format msgid "dropping out of time order sample: %s" msgstr "rilascio campione ordinamento fuori tempo: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "eliminazione in corso dell'esempio senza predecessore: %s" msgid "ipmitool output length mismatch" msgstr "mancata corrispondenza della lunghezza dell'output ipmitool" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "analisi dei dati del sensore IPMI non riuscita, nessun dato recuperato " "dall'input fornito" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "analisi dei dati del sensore IPMI non riuscita, tipo di sensore sconosciuto" msgid "running ipmitool failure" msgstr "errore nell'esecuzione ipmitool" ceilometer-10.0.0/ceilometer/locale/de/0000775000175100017510000000000013236733440017730 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/de/LC_MESSAGES/0000775000175100017510000000000013236733440021515 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/de/LC_MESSAGES/ceilometer.po0000666000175100017510000001654413236733243024222 0ustar zuulzuul00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Carsten Duch , 2014 # Christian Berendt , 2014 # Ettore Atalan , 2014 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 9.0.1.dev161\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2017-11-21 04:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: German\n" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "Arithmetiktransformer muss mindestens eine Messgröße im Ausdruck '%s' " "verwenden" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Es konnte keine Verbindung zu XenAPI hergestellt werden: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Löschen von Benachrichtigung %(type)s (UUID:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Fehler von libvirt während Suche nach Instanz : " "[Fehlercode %(error_code)s] %(ex)s" msgid "Expression evaluated to a NaN value!" msgstr "Ausdruck ergab einen NaN-Wert!" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Fehler beim ĂśberprĂĽfen von Daten der Instanz , " "Domänenstatus ist ABGESCHALTET." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d Datenpunkte konnten nicht veröffentlicht werden; werden gelöscht" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "" "%d Datenpunkte konnten nicht veröffentlicht werden; in Warteschlange " "einreihen" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "UngĂĽltige YAML-Syntax in Definitionsdatei %(file)s in Zeile: %(line)s, " "Spalte: %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "UngĂĽltiger Traittyp '%(type)s' fĂĽr Trait %(trait)s" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Mehrere VMs %s in XenServer gefunden" msgid "Must specify connection_url, and connection_password to use" msgstr "" "Angabe von connection_url und connection_password fĂĽr die Verwendung " "erforderlich" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Kein Plug-in mit dem Namen %(plugin)s verfĂĽgbar fĂĽr %(name)s." msgid "Node Manager init failed" msgstr "Initialisierung von Knoten-Manager fehlgeschlagen" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail-API hat Folgendes zurĂĽckgegeben: %(status)s %(reason)s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Analysefehler in JSONPath-Spezifikation '%(jsonpath)s' fĂĽr %(name)s: %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in angegeben, aber kein Plug-in-Name fĂĽr %s angegeben." #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "Polling von %(mtr)s-Sensor %(cnt)s Mal fehlgeschlagen!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Polling von %(name)s %(cnt)s Mal fehlgeschlagen!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Pollster fĂĽr %s ist inaktiviert!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Maximale Länge von local_queue fĂĽr Publisher ist ĂĽberschritten, die %d " "ältesten Beispiele werden gelöscht" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "Veröffentlichungsrichtlinie ist unbekannt (%s); auf Standardeinstellung " "setzen" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW-AdminOps-API hat Folgendes zurĂĽckgegeben: %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "Erforderliches Feld %s nicht angegeben" #, python-format msgid "The field 'fields' is required for %s" msgstr "Das Feld 'fields' ist erforderlich fĂĽr %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "Auswertung nicht möglich fĂĽr Ausdruck %(expr)s: %(exc)s" msgid "Unable to send sample over UDP" msgstr "Beispiel kann nicht ĂĽber UDP gesendet werden" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr Loadbalancer %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr Firewall %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr Listener %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr Mitglied %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr Pool %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr VIP %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten fĂĽr VPN %(id)s; Beispiel wird " "ĂĽbersprungen" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %s in VMware vSphere nicht gefunden" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s in XenServer nicht gefunden" msgid "Wrong sensor type" msgstr "Falscher Sensortyp" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "YAML-Fehler beim Lesen von Definitionsdatei %(file)s." #, python-format msgid "dropping out of time order sample: %s" msgstr "" "Löschen des nicht in die zeitliche Reihenfolge gehörenden Beispiels: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "Beispiel ohne Vorgänger wird gelöscht: %s" msgid "ipmitool output length mismatch" msgstr "Abweichung bei ipmitool-Ausgabelänge" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "Analyse von IPMI-Sensordaten fehlgeschlagen, keine Daten von angegebener " "Eingabe abgerufen" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "Analyse von IPMI-Sensordaten fehlgeschlagen, unbekannter Sensortyp" msgid "running ipmitool failure" msgstr "Fehler beim AusfĂĽhren von ipmitool" ceilometer-10.0.0/ceilometer/locale/ja/0000775000175100017510000000000013236733440017732 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/ja/LC_MESSAGES/0000775000175100017510000000000013236733440021517 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/ja/LC_MESSAGES/ceilometer.po0000666000175100017510000001771013236733243024220 0ustar zuulzuul00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Tomoyuki KATO , 2013 # Andreas Jaeger , 2016. #zanata # 笹原 ćŚçľŽ , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 9.0.1.dev161\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2017-11-21 04:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-16 11:33+0000\n" "Last-Translator: 笹原 ćŚçľŽ \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Japanese\n" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "演算変換ă—ă­ă‚°ă©ă ăŻă€ĺĽŹ '%s' ă§ĺ°‘ăŞăŹă¨ă‚‚ 1 ă¤ă®ăˇăĽă‚żăĽă‚’使用ă™ă‚‹ĺż…č¦ăŚă‚り" "ăľă™" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "XenAPI ă«ćŽĄç¶šă§ăŤăľă›ă‚“ă§ă—ăź: %s" #, fuzzy, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "通知 %(type)s を除去ă—ă¦ă„ăľă™ (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "イăłă‚ąă‚żăłă‚ą ă®ć¤śç´˘ä¸­ă« libvirt ă§ă‚¨ă©ăĽăŚç™şç”źă—ăľ" "ă—ăź: [エă©ăĽă‚łăĽă‰ %(error_code)s] %(ex)s" msgid "Expression evaluated to a NaN value!" msgstr "式㌠NaN 値ă«č©•価ă•れăľă—ăźă€‚" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "イăłă‚ąă‚żăłă‚ą ă®ă‡ăĽă‚żă‚’検査ă§ăŤăľă›ă‚“ă§ă—ăźă€‚ă‰ăˇ" "イăłçŠ¶ć…‹ăŻ SHUTOFF ă§ă™ă€‚" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d ă‡ăĽă‚żăťă‚¤ăłăă®ĺ…¬é–‹ă«ĺ¤±ć•—ă—ăľă—ăźă€‚ă“れらăŻĺ»ćŁ„ă•れăľă™" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "%d ă‡ăĽă‚żăťă‚¤ăłăă®ĺ…¬é–‹ă«ĺ¤±ć•—ă—ăľă—ăźă€‚ă“れらをキăĄăĽă«ĺ…Ąă‚Śă¦ăŹă ă•ă„" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "%(line)s 行目㮠%(column)s ĺ—ă§ĺ®šçľ©ă•ァイ㫠%(file)s ă® YAML ć§‹ć–‡ ăŚç„ˇĺŠąă§" "ă™ă€‚" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "特性 %(trait)s ă®ç‰ąć€§ă‚żă‚¤ă— '%(type)s' ăŚç„ˇĺŠąă§ă™" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "複数㮠VM %s ㌠XenServer ă«č¦‹ă¤ă‹ă‚Šăľă—ăź" msgid "Must specify connection_url, and connection_password to use" msgstr "" "connection_url ă¨ă€ä˝żç”¨ă™ă‚‹ connection_password を指定ă™ă‚‹ĺż…č¦ăŚă‚りăľă™" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "%(name)s ă«ä˝żç”¨ă§ăŤă‚‹ %(plugin)s ă¨ă„ă†ĺŤĺ‰Ťă®ă—ă©ă‚°ă‚¤ăłăŚă‚りăľă›ă‚“" msgid "Node Manager init failed" msgstr "ăŽăĽă‰ăžăŤăĽă‚¸ăŁăĽă®ĺťćśźĺŚ–ă«ĺ¤±ć•—ă—ăľă—ăź" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail API ă‹ă‚‰ %(status)s %(reason)s ăŚčż”ă•れăľă—ăź" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "%(name)s ă«é–˘ă™ă‚‹ JSONPath ă®ćŚ‡ĺ®š '%(jsonpath)s' ă®ă‚¨ă©ăĽă‚’č§Łćžă—ăľă™: " "%(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "ă—ă©ă‚°ă‚¤ăłăŚćŚ‡ĺ®šă•れă¦ă„ăľă™ăŚă€%s ă«ă—ă©ă‚°ă‚¤ăłĺŤăŚćŹäľ›ă•れă¦ă„ăľă›ă‚“" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "ă‚»ăłă‚µăĽ %(mtr)s ă®ăťăĽăŞăłă‚°ăŚ %(cnt)s 回失敗ă—ăľă—ăź" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "ăťăĽăŞăłă‚° %(name)s ㌠%(cnt)s 回失敗ă—ăľă—ăź" #, python-format msgid "Pollster for %s is disabled!" msgstr "%s ă® pollster ăŚç„ˇĺŠąă«ăŞăŁă¦ă„ăľă™" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "ă‘ă–ăŞăă‚·ăŁăĽ local_queue 最大長を超ăăľă—ăźă€‚古ă„ć–ąă‹ă‚‰ %d 個ă®ă‚µăłă—ă«ă‚’除" "去ă—ăľă™" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "ă‘ă–ăŞăă‚·ăĄăťăŞă‚·ăĽăŚä¸ŤćŽă§ă™ (%s)。強ĺ¶çš„ă«ă‡ă•ă‚©ă«ăă«č¨­ĺ®šă•れăľă™" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API ă‹ă‚‰ %(status)s %(reason)s ăŚčż”ă•れăľă—ăź" #, python-format msgid "Required field %s not specified" msgstr "ĺż…é ă•ィăĽă«ă‰ %s ăŚćŚ‡ĺ®šă•れă¦ă„ăľă›ă‚“" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s ă«ăŻă•ィăĽă«ă‰ 'fields' ăŚĺż…č¦ă§ă™" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "式 %(expr)s を評価ă§ăŤăľă›ă‚“: %(exc)s" msgid "Unable to send sample over UDP" msgstr "UDP 経由ă§ă‚µăłă—ă«ă‚’é€äżˇă§ăŤăľă›ă‚“" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "ă­ăĽă‰ăă©ăłă‚µăĽ %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’ス" "ă‚­ăă—ă—ăľă™" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "ă•ァイアウォăĽă« %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’ス" "ă‚­ăă—ă—ăľă™" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "ăŞă‚ąăŠăĽ %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’スキăă—ă—ăľ" "ă™" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "ăˇăłă㼠%(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’スキăă—ă—ăľ" "ă™" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "ă—ăĽă« %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’スキăă—ă—ăľă™" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "ä»®ćł IP %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’スキăă—ă—ăľă™" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "vpn %(id)s ă§ä¸ŤćŽăŞçжㅋ %(stat)s を受信ă—ăľă—ăźă€‚サăłă—ă«ă‚’スキăă—ă—ăľă™" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VMware vSphere ă§ VM %s ăŚč¦‹ă¤ă‹ă‚Šăľă›ă‚“" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s ㌠XenServer ă«č¦‹ă¤ă‹ă‚Šăľă›ă‚“" msgid "Wrong sensor type" msgstr "ă‚»ăłă‚µăĽç¨®ĺĄăŚć­Łă—ăŹă‚りăľă›ă‚“" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "定義ă•ァイ㫠%(file)s ă§ă®čŞ­ăżĺŹ–ă‚Šă® YAML エă©ăĽ" #, python-format msgid "dropping out of time order sample: %s" msgstr "ćśźé™ĺ‡ă‚Śă®ă‚ŞăĽă€ăĽă‚µăłă—ă«ă‚’ĺ»ćŁ„ă—ă¦ă„ăľă™: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "サăłă—ă« (ĺ…行ăŞă—) ă‚’ĺ»ćŁ„ă—ă¦ă„ăľă™: %s" msgid "ipmitool output length mismatch" msgstr "ipmitool 出力ă®é•·ă•ăŚä¸€č‡´ă—ăľă›ă‚“" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "IPMI ă‚»ăłă‚µăĽă‡ăĽă‚żă®č§Łćžă«ĺ¤±ć•—ă—ăľă—ăźă€‚指定ă•れăźĺ…ĄĺŠ›ă‹ă‚‰ă‡ăĽă‚żăŚĺŹ–ĺľ—ă•れăľ" "ă›ă‚“ă§ă—ăź" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "IPMI ă‚»ăłă‚µăĽă‡ăĽă‚żă®č§Łćžă«ĺ¤±ć•—ă—ăľă—ăźă€‚不ćŽăŞă‚»ăłă‚µăĽç¨®ĺĄă§ă™ă€‚" msgid "running ipmitool failure" msgstr "ipmitool ă®ĺ®źčˇŚă«ĺ¤±ć•—ă—ăľă—ăź" ceilometer-10.0.0/ceilometer/locale/ko_KR/0000775000175100017510000000000013236733440020345 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/ko_KR/LC_MESSAGES/0000775000175100017510000000000013236733440022132 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po0000666000175100017510000001672213236733243024635 0ustar zuulzuul00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Seong-ho Cho , 2014 # Seunghyo Chun , 2013 # Seunghyo Chun , 2013 # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 9.0.1.dev161\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2017-11-21 04:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:27+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "'%s' 표í„식ě—서 ě‚°ě  ëł€í™ę¸°ëŠ” í•ë‚ ěť´ěěť ëŻ¸í„°ëĄĽ 사용해야 함" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "XenAPI를 연결할 ě 없음: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "알림 %(type)s ě‚­ě ś 중(uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "인스턴스 ę˛€ě‰ ě¤‘ libvirtě—서 ě¤ëĄ ë°śěť: [ě¤ëĄ ě˝”" "드 %(error_code)s] %(ex)s" msgid "Expression evaluated to a NaN value!" msgstr "표í„식이 NaN 값으로 평가ëě—습ë‹ë‹¤!" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "인스턴스 <이름=%(name)s, id=%(id)s>ěť ëŤ°ěť´í„° 검사 실패, 도메인 ěíśę°€ SHUTOFF" "ěž…ë‹ë‹¤." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d 데이터포인트 공개 실패. 이를 ě‚­ě śí•는 중" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "%d 데이터포인트 공개 실패. 이를 íě— ëŚ€ę¸°ě‹śí‚´" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "다음ě—서 ě •ěť íŚŚěťĽ %(file)sěť ě¬ë°”르지 않은 YAML 구문: í–‰: %(line)s, ě—´: " "%(column)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "특성 %(trait)sě— ëŚ€í•ś ě¬ë°”르지 않은 특성 ěś í• '%(type)s'" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "여러 VM %sěť„(를) XenServerě—서 찾음 " msgid "Must specify connection_url, and connection_password to use" msgstr "사용할 connection_url ë°Ź connection_password를 지정해야 함 " #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "%(name)sě— ëŚ€í•´ %(plugin)s(ěť´)라는 플러그인을 사용할 ě 없음" msgid "Node Manager init failed" msgstr "노드 ę´€ë¦¬ěž ě´ę¸°í™” 실패" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail APIę°€ %(status)s 리턴: %(reason)s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" " %(name)sě— ëŚ€í•ś JSONPath 스펙 '%(jsonpath)s'ěť ęµ¬ë¬¸ 분석 ě¤ëĄ: %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "플러그인이 지정ëě§€ 않ě•지만, %sě— í”Śëź¬ę·¸ěť¸ 이름이 ě śęłµëě§€ 않음" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "íŹ´ë§ %(mtr)s 센서가 %(cnt)së˛ ě‹¤íŚ¨í–습ë‹ë‹¤!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "íŹ´ë§ %(name)sěť´(ę°€) %(cnt)së˛ ě‹¤íŚ¨í–습ë‹ë‹¤!" #, python-format msgid "Pollster for %s is disabled!" msgstr "%sěť ěťę˛¬ěˇ°ě‚¬ěžę°€ 사용 ě•함으로 설정ëě–´ ěžěеë‹ë‹¤!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "ęłµę°śěž ěµśëŚ€ local_queue 길이가 ě´ęłĽë¨. %d 가장 ě¤ëžëś ě플 ě‚­ě ś 중" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "공개 ě •ě±…ěť„ 알 ě 없음(%s). 기본값으로 ę°•ě ś 설정함" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps APIę°€ %(status)s %(reason)sěť„(를) 리턴함" #, python-format msgid "Required field %s not specified" msgstr "í•„ě 필드 %sěť´(ę°€) 지정ëě§€ 않음" #, python-format msgid "The field 'fields' is required for %s" msgstr "%sě— 'fields' 필드 í•„ěš”" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "%(expr)s 표í„식을 평가할 ě 없음: %(exc)s" msgid "Unable to send sample over UDP" msgstr "UDP를 통해 ě플을 전송할 ě 없음" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "로드 밸런서 %(id)sě—서 알 ě 없는 ěíś %(stat)sěť´(ę°€) ěě‹ ë¨. ě플 ę±´ë„뛰기" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "fw %(id)sě—서 알 ě 없는 ěíś %(stat)sěť´(ę°€) ěě‹ ë¨. ě플을 ę±´ë„뛰는 중" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "ë¦¬ěŠ¤ë„ %(id)sě—서 알 ě 없는 ěíś %(stat)sěť´(ę°€) ěě‹ ë¨. ě플 ę±´ë„뛰기" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "멤버 %(id)sě—서 알 ě 없는 ěíś %(stat)sěť´(ę°€) ěě‹ ë¨. ě플을 ę±´ë„뛰는 중" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "í’€ %(id)sě—서 알 ě 없는 ěíś %(stat)sěť´(ę°€) ěě‹ ë¨. ě플을 ę±´ë„뛰는 중" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "vip %(id)sě—서 알 ě 없는 ěíś %(stat)sěť´(ę°€) ěě‹ ë¨. ě플을 ę±´ë„뛰는 중" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "vpn%(id)sě—서 알 ě 없는 ěíś %(stat)sěť´(ę°€) ěě‹ ë¨. ě플 ę±´ë„뛰기" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %sěť„(를) VMware vSphereě—서 ě°ľěť„ ě 없음" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %sěť„(를) XenServerě—서 ě°ľěť„ ě 없음 " msgid "Wrong sensor type" msgstr "ěžëŞ»ëś ě„Ľě„ś ěś í•" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "ě •ěť íŚŚěťĽ %(file)sěť„(를) 읽는 ě¤‘ě— YAML ě¤ëĄ ë°śěť" #, python-format msgid "dropping out of time order sample: %s" msgstr "시간 ěśě„ś ě플ě—서 벗어남: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "ě„ í–‰ 작업이 없는 ě플 ě‚­ě ś: %s" msgid "ipmitool output length mismatch" msgstr "ipmitool ě¶śë Ą 길이 ë¶ěťĽěą" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "IPMI 센서 데이터 구문 ë¶„ě„ťě— ě‹¤íŚ¨í–음, ě śęłµëś ěž…ë Ąě—서 검ě‰ëś 데이터가 없음" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "IPMI 센서 데이터 구문 ë¶„ě„ťě— ě‹¤íŚ¨í–음, 알 ě 없는 센서 ěś í•" msgid "running ipmitool failure" msgstr "ipmitool 실행 실패" ceilometer-10.0.0/ceilometer/locale/ru/0000775000175100017510000000000013236733440017766 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/ru/LC_MESSAGES/0000775000175100017510000000000013236733440021553 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/ru/LC_MESSAGES/ceilometer.po0000666000175100017510000002125313236733243024251 0ustar zuulzuul00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 9.0.1.dev161\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2017-11-21 04:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Russian\n" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "ĐрифметичеŃкий преобразователь должен иŃпользовать хотя бы один Ńчетчик в " "выражении %s'" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Не ŃдалоŃŃŚ подключитьŃŃŹ Đş XenAPI: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Удаление Ńведомления %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Возникла ĐľŃибка в libvirt при поиŃке экземпляра <имя=%(name)s, ĐĐ”=%(id)s>: " "[Код ĐľŃибки: %(error_code)s] %(ex)s" msgid "Expression evaluated to a NaN value!" msgstr "РезŃльтат вычиŃления выражения - значение NaN!" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Не ŃдалоŃŃŚ проверить данные экземпляра <имя=%(name)s, ĐĐ”=%(id)s>, ŃĐľŃтояние " "домена - SHUTOFF." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Не ŃдалоŃŃŚ опŃбликовать %d точек данных, выполняетŃŃŹ их Ńдаление" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Не ŃдалоŃŃŚ опŃбликовать %d точек данных, Ńоздайте для них очередь" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "НедопŃŃтимый ŃинтакŃĐ¸Ń YAML в файле определений %(file)s; Ńтрока: %(line)s, " "Ńтолбец: %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "НедопŃŃтимый тип ĐľŃобенноŃти %(type)s для ĐľŃобенноŃти %(trait)s" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Найдено неŃколько виртŃальных маŃин %s в XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "Необходимо Ńказать connection_url и connection_password" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Нет Đ´ĐľŃŃ‚Ńпного модŃля %(plugin)s для %(name)s" msgid "Node Manager init failed" msgstr "Сбой инициализации админиŃтратора Ńзлов" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "API Opencontrail возвратил %(status)s %(reason)s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "ĐžŃибка анализа Ńпецификации JSONPath %(jsonpath)s для %(name)s: %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Указан модŃль, но не передано имя модŃля для %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "%(cnt)s-кратный Ńбой датчика опроŃа %(mtr)s!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "ĐžĐżŃ€ĐľŃ %(name)s не ŃдалоŃŃŚ выполнить %(cnt)s раз." #, python-format msgid "Pollster for %s is disabled!" msgstr "ОпраŃивающий объект для %s выключен!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "ПревыŃена макŃимальная длина local_queue ĐżŃбликатора, Ńдаление %d Ńамых " "Ńтарых образцов" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "Стратегия ĐżŃбликации неизвеŃтна (%s). По Ńмолчанию принŃдительная" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "ФŃнкция API RGW AdminOps вернŃла %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "Не Ńказано обязательное поле %s" #, python-format msgid "The field 'fields' is required for %s" msgstr "Поле 'fields' являетŃŃŹ обязательным для %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "ВычиŃлить выражение %(expr)s невозможно: %(exc)s" msgid "Unable to send sample over UDP" msgstr "Не ŃдалоŃŃŚ отправить образец по UDP" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Đ’ баланŃировщике нагрŃзки %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s, " "пример пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Đ’ fw %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s,пример пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Đ’ обработчике %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s, пример " "пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Đ’ ŃчаŃтнике %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s, пример " "пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Đ’ ĐżŃле %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s,пример пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Đ’ vip %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s,пример пропŃŃкаетŃŃŹ" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Đ’ VPN %(id)s полŃчено неизвеŃтное ŃĐľŃтояние %(stat)s, пример пропŃŃкаетŃŃŹ" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "ВиртŃальная маŃина %s не найдена в VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "Не найдена виртŃальная маŃина %s в XenServer" msgid "Wrong sensor type" msgstr "Неверный тип датчика" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "ĐžŃибка YAML при чтении файла определений %(file)s" #, python-format msgid "dropping out of time order sample: %s" msgstr "Ńдаление образца, выпадающего из хронологичеŃкого порядка: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "Ńдаление образца без предŃеŃтвенника: %s" msgid "ipmitool output length mismatch" msgstr "неŃоответŃтвие длины вывода ipmitool" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "Ńбой анализа данных датчика IPMI, не полŃчены данные из переданного ввода" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "Ńбой анализа данных датчика IPMI, неизвеŃтный тип датчика" msgid "running ipmitool failure" msgstr "Ńбой выполнения ipmitool" ceilometer-10.0.0/ceilometer/locale/zh_TW/0000775000175100017510000000000013236733440020373 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/zh_TW/LC_MESSAGES/0000775000175100017510000000000013236733440022160 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po0000666000175100017510000001531513236733243024660 0ustar zuulzuul00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Stefano Maffulli , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 9.0.1.dev161\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2017-11-21 04:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:27+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "在表示式 '%s' 中,算術轉換器必é č‡łĺ°‘使用一種č¨é‡Ź" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "無法連接 XenAPI:%s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "正在捨棄通知 %(type)sďĽUUID:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "查閱實例 <ĺŤç¨±=%(name)s,ID=%(id)s> 時,libvirt 中發生錯誤:[錯誤碼 " "%(error_code)s] %(ex)s" msgid "Expression evaluated to a NaN value!" msgstr "表示式已求值為非數字值ďĽ" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "無法檢查實例 <ĺŤç¨±=%(name)s,ID=%(id)s> 的資料,網域狀態為 SHUTOFF。" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "ç„ˇćł•ç™Ľä˝ %d 個資料點,正在捨棄ĺ®ĺ€‘" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "ç„ˇćł•ç™Ľä˝ %d 個資料點,正在將ĺ®ĺ€‘排入佇ĺ—" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "定義檔 %(file)s 第 %(line)s 行第 %(column)s ĺ—中的 YAML 語法無ć•。" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "特徵 %(trait)s 的特徵類型 '%(type)s' 無ć•" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "在 XenServer 中找ĺ°ĺ¤šĺ€‹ VM %s" msgid "Must specify connection_url, and connection_password to use" msgstr "ĺż…é ćŚ‡ĺ®š connection_url ĺ’Ś connection_password,才č˝ä˝żç”¨" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "沒有ĺŤç‚ş %(plugin)s 的外掛程式可供 %(name)s 使用" msgid "Node Manager init failed" msgstr "節點管ç†ç¨‹ĺĽŹčµ·ĺ§‹č¨­ĺ®šĺ¤±ć•—" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail API 傳回了 %(status)s %(reason)s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "%(name)s çš„ JSONPath 規格 '%(jsonpath)s' 中發生剖ćžéŚŻčŞ¤ďĽš%(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "ĺ·˛ćŚ‡ĺ®šĺ¤–ćŽ›ç¨‹ĺĽŹďĽŚä˝†ĺŤ»ćśŞĺ‘ %s ćŹäľ›ĺ¤–掛程式ĺŤç¨±" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "輪詢 %(mtr)s 感應器已失敗 %(cnt)s 次ďĽ" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "輪詢 %(name)s 失敗了 %(cnt)s 次ďĽ" #, python-format msgid "Pollster for %s is disabled!" msgstr "ĺ·˛ĺśç”¨ %s çš„ PollsterďĽ" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "已超出發ä˝č€… local_queue 長度上é™ďĽŚć­Łĺś¨ćŤ¨ćŁ„ %d 個最čŠçš„樣本" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "發ä˝ĺŽźĺ‰‡ä¸ŤćŽ (%s),強ĺ¶č¨­ç‚şé č¨­ĺ€Ľ" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API 傳回了 %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "未指定必č¦ć¬„位 %s" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s 需č¦ć¬„位「欄位」" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "無法對表示式 %(expr)s 進行求值:%(exc)s" msgid "Unable to send sample over UDP" msgstr "ç„ˇćł•é€ŹéŽ UDP 來傳é€ć¨Łćś¬" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "在負載平衡器 %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "在é˛ç«ç‰† %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "在接č˝ĺ™¨ %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "在ć員 %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "在儲ĺ­ĺŤ€ %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "在 VIP %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "在 VPN %(id)s 上接收ĺ°ä¸ŤćŽç‹€ć…‹ %(stat)s,正在跳éŽçŻ„äľ‹" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "在 VMware vSphere ä¸­ć‰ľä¸Ťĺ° VM %s" #, python-format msgid "VM %s not found in XenServer" msgstr "在 XenServer ä¸­ć‰ľä¸Ťĺ° VM %s" msgid "Wrong sensor type" msgstr "感應器類型錯誤" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "讀取定義檔 %(file)s 時發生 YAML 錯誤" #, python-format msgid "dropping out of time order sample: %s" msgstr "正在ĺŞé™¤ä¸Ťĺś¨ć™‚間順序內的範例:%s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "正在捨棄不ĺ«ĺ‰Ťä¸€ç‰ćś¬çš„樣本:%s" msgid "ipmitool output length mismatch" msgstr "ipmitool 輸出長度不符" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "ĺ‰–ćž IPMI 感應器資料失敗,未從給定的輸入擷取任何資料" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "ĺ‰–ćž IPMI 感應器資料失敗,感應器類型不ćŽ" msgid "running ipmitool failure" msgstr "執行 ipmitool 失敗" ceilometer-10.0.0/ceilometer/locale/en_GB/0000775000175100017510000000000013236733440020312 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/en_GB/LC_MESSAGES/0000775000175100017510000000000013236733440022077 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po0000666000175100017510000001742413236733243024602 0ustar zuulzuul00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Andi Chandler , 2013-2014 # Andreas Jaeger , 2016. #zanata # Andi Chandler , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 9.0.1.dev177\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2017-11-29 00:03+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-11-30 10:30+0000\n" "Last-Translator: Andi Chandler \n" "Language: en-GB\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: English (United Kingdom)\n" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "Arithmetic transformer must use at least one meter in expression '%s'" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "Could not connect to XenAPI: %s" #, python-format msgid "Could not get VM %s CPU number" msgstr "Could not get VM %s CPU number" #, python-format msgid "Could not load the following pipelines: %s" msgstr "Could not load the following pipelines: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Dropping Notification %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgid "Expression evaluated to a NaN value!" msgstr "Expression evaluated to a NaN value!" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Failed to inspect data of instance , domain state " "is SHUTOFF." #, python-format msgid "" "Failed to inspect instance %(instance_uuid)s stats, can not get info from " "libvirt: %(error)s" msgstr "" "Failed to inspect instance %(instance_uuid)s stats, can not get info from " "libvirt: %(error)s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Failed to publish %d datapoints, dropping them" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Failed to publish %d datapoints, queue them" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Invalid trait type '%(type)s' for trait %(trait)s" #, python-format msgid "Invalid type %s specified" msgstr "Invalid type %s specified" #, python-format msgid "Missing field %s" msgstr "Missing field %s" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Multiple VM %s found in XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "Must specify connection_url, and connection_password to use" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "No plugin named %(plugin)s available for %(name)s" msgid "Node Manager init failed" msgstr "Node Manager init failed" #, python-format msgid "OpenDaylight API returned %(status)s %(reason)s" msgstr "OpenDaylight API returned %(status)s %(reason)s" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail API returned %(status)s %(reason)s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plugin specified, but no plugin name supplied for %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "Polling %(mtr)s sensor failed for %(cnt)s times!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Polling %(name)s failed for %(cnt)s times!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Pollster for %s is disabled!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "Publishing policy is unknown (%s) force to default" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API returned %(status)s %(reason)s" #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "Required field %(field)s should be a %(type)s" #, python-format msgid "Required field %s not specified" msgstr "Required field %s not specified" #, python-format msgid "Required fields %s not specified" msgstr "Required fields %s not specified" #, python-format msgid "The field 'fields' is required for %s" msgstr "The field 'fields' is required for %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "Unable to evaluate expression %(expr)s: %(exc)s" msgid "Unable to send sample over UDP" msgstr "Unable to send sample over UDP" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "Unknown status %(stat)s received on fw %(id)s,skipping sample" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "Unknown status %(stat)s received on listener %(id)s, skipping sample" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "Unknown status %(stat)s received on member %(id)s, skipping sample" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "Unknown status %(stat)s received on pool %(id)s, skipping sample" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "Unknown status %(stat)s received on vip %(id)s, skipping sample" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "Unknown status %(stat)s received on vpn %(id)s, skipping sample" #, python-format msgid "Unrecognized type value %s" msgstr "Unrecognised type value %s" #, python-format msgid "VM %s is poweredOff in VMware vSphere" msgstr "VM %s is poweredOff in VMware vSphere" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %s not found in VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "VM %s not found in XenServer" msgid "Wrong sensor type" msgstr "Wrong sensor type" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "YAML error reading Definitions file %(file)s" #, python-format msgid "dropping out of time order sample: %s" msgstr "dropping out of time order sample: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "dropping sample with no predecessor: %s" msgid "ipmitool output length mismatch" msgstr "ipmitool output length mismatch" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "parse IPMI sensor data failed,No data retrieved from given input" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "parse IPMI sensor data failed,unknown sensor type" msgid "running ipmitool failure" msgstr "running ipmitool failure" ceilometer-10.0.0/ceilometer/locale/zh_CN/0000775000175100017510000000000013236733440020341 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/zh_CN/LC_MESSAGES/0000775000175100017510000000000013236733440022126 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po0000666000175100017510000001565613236733243024636 0ustar zuulzuul00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # aji.zqfan , 2015 # yelu , 2013 # Tom Fifield , 2013 # 颜海峰 , 2014 # yelu , 2013 # Yu Zhang, 2013 # Yu Zhang, 2013 # 颜海峰 , 2014 # English translations for ceilometer. # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 9.0.1.dev161\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2017-11-21 04:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:27+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh-CN\n" "Language-Team: Chinese (China)\n" "Plural-Forms: nplurals=1; plural=0\n" "Generated-By: Babel 2.2.0\n" "X-Generator: Zanata 3.9.6\n" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "算术ĺŹĺ˝˘ĺ™¨ĺś¨čˇ¨čľľĺĽŹ'%s'中必须至少使用一个指标" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "无法连接ĺ°XenAPI:%s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "正在丢ĺĽé€šçźĄ%(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "查找实例 <ĺŤç§°ä¸ş %(name)s,标识为 %(id)s> 时,libvirt 中出错:[é”™čŻŻä»Łç  " "%(error_code)s] %(ex)s" msgid "Expression evaluated to a NaN value!" msgstr "表达式计算结果为NaNďĽ" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "为虚拟机获取监控数据失败了,虚拟机状ć€ä¸şSHUTOFF" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "发ĺ¸%d个数据点时失败,正在将其丢ĺĽ" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "发ĺ¸%d个数据点时失败,将其入éź" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "定义文件%(file)s中有非法YAML语法,行:%(line)s,ĺ—%(column)s。" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "特ĺľ%(trait)s包ĺ«äş†ä¸Ťĺ法的特ĺľç±»ĺž‹'%(type)s' " #, python-format msgid "Multiple VM %s found in XenServer" msgstr "多个虚拟机%s在XenServer中被找ĺ°" msgid "Must specify connection_url, and connection_password to use" msgstr "使用时必须指定connection_urlĺ’Śconnection_password" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "未对 %(name)s ćŹäľ›ĺŤä¸ş %(plugin)s 的插件" msgid "Node Manager init failed" msgstr "节点管ç†ĺ™¨ĺťĺ§‹ĺŚ–ĺ¤±č´Ą" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "Opencontrail接口返回状ć€%(status)s,原因%(reason)s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "对 %(name)s 指定的 JSONPathďĽĺŤłâ€ś%(jsonpath)s”)ĺ­ĺś¨č§Łćžé”™čŻŻďĽš%(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "指定了插件,但未对 %s ćŹäľ›ćŹ’ä»¶ĺŤ" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "拉取%(mtr)s传感器失败了%(cnt)s次ďĽ" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "轮询 %(name)s 已失败 %(cnt)s 次ďĽ" #, python-format msgid "Pollster for %s is disabled!" msgstr "%s的采集器被ç¦ç”¨" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "发ĺ¸çš„数据量超过本地éźĺ—最大长度,正在丢ĺĽćś€č€çš„%d个数据" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "未知的发ĺ¸ç­–略(%s),强ĺ¶ä˝żç”¨é»č®¤ç­–略" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps接口返回%(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "必填项%s没有填写" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s 需č¦ĺ­—段“fields”" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "无法计算表达式%(expr)s:%(exc)s" msgid "Unable to send sample over UDP" msgstr "无法通过UDP发é€é‡‡ć ·" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "在负载均衡器 %(id)s 上接收ĺ°ćśŞçźĄçж〠%(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "从fw %(id)sć”¶ĺ°ćśŞçźĄçš„状ć€%(stat)s,跳过该采样数据" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "在侦ĺ¬ĺ™¨ %(id)s 上接收ĺ°ćśŞçźĄçж〠%(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "在ćĺ‘ %(id)s 上接收ĺ°ćśŞçźĄçж〠%(stat)s,正在跳过样本" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "从pool %(id)sć”¶ĺ°ćśŞçźĄçš„状ć€%(stat)s,跳过该采样数据" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "从vip %(id)sć”¶ĺ°ćśŞçźĄçš„状ć€%(stat)s,跳过该采样数据" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "在 VPN %(id)s 上接收ĺ°ćśŞçźĄçж〠%(stat)s,正在跳过样本" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "在 VMware vSphere ä¸­ďĽŚć‰ľä¸Ťĺ° VM %s" #, python-format msgid "VM %s not found in XenServer" msgstr "无法在XenServer中找ĺ°č™šć‹źćśş%s" msgid "Wrong sensor type" msgstr "错误的传感器类型" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "读取定义文件%(file)sć—¶é‡ĺ°YAML错误" #, python-format msgid "dropping out of time order sample: %s" msgstr "正在退出时间顺序样本:%s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "因为之前没有数据ďĽç”¨ćťĄč®ˇç®—差值)因而丢ĺĽć•°ćŤ®ďĽš%s" msgid "ipmitool output length mismatch" msgstr "ipmi输出长度不匹配" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "č§ŁćžIPMI传感器数据失败,从给定的输入中无法检索ĺ°ć•°ćŤ®" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "č§ŁćžIPMI传感器数据失败,未知的传感器类型" msgid "running ipmitool failure" msgstr "čżčˇŚipmitool时失败了" ceilometer-10.0.0/ceilometer/locale/pt_BR/0000775000175100017510000000000013236733440020346 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/pt_BR/LC_MESSAGES/0000775000175100017510000000000013236733440022133 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po0000666000175100017510000001612213236733243024630 0ustar zuulzuul00000000000000# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Gabriel Wainer, 2013 # Gabriel Wainer, 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer 9.0.1.dev161\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2017-11-21 04:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:27+0000\n" "Last-Translator: Copied by Zanata \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "Arithmetic transformer must use at least one meter in expression '%s'" msgstr "" "O transformador aritmĂ©tico deve usar pelo menos um medidor na expressĂŁo '%s'" #, python-format msgid "Could not connect to XenAPI: %s" msgstr "NĂŁo foi possĂ­vel conectar-se ao XenAPI: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Descartando Notificação %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Erro de libvirt ao consultar instância : [CĂłdigo " "de Erro %(error_code)s] %(ex)s" msgid "Expression evaluated to a NaN value!" msgstr "ExpressĂŁo avaliada para um valor NaN!" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Falha ao inspecionar os dados da instância , " "estado do domĂ­nio Ă© SHUTOFF." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Falha ao publicar %d pontos de dados, descartando-os" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Falha ao publicar %d pontos de dados, enfileire-os" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintaxe YAML inválida no arquivo de definições %(file)s na linha: %(line)s, " "coluna: %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Tipo de traço inválido '%(type)s' para traço %(trait)s" #, python-format msgid "Multiple VM %s found in XenServer" msgstr "Várias máquinas virtuais %s localizadas no XenServer" msgid "Must specify connection_url, and connection_password to use" msgstr "connection_url e connection_password devem ser especificados para uso" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Nenhum plug-in nomeado %(plugin)s disponĂ­vel para %(name)s" msgid "Node Manager init failed" msgstr "Inicialização do gerenciador de nĂł com falha" #, python-format msgid "Opencontrail API returned %(status)s %(reason)s" msgstr "A API Opencontrail retornou%(status)s%(reason)s" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Erro de análise na especificação JSONPath '%(jsonpath)s' para %(name)s: " "%(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in especificado, mas nenhum nome de plug-in fornecido para %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "O sensor de pesquisa %(mtr)s falhou para %(cnt)s vezes!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "A pesquisa %(name)s falhou para %(cnt)s vezes!" #, python-format msgid "Pollster for %s is disabled!" msgstr "O pesquisador para %s está desativado!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Comprimento máximo de local_queue do publicador foi excedido, descartando %d " "amostras antigas" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "Publicando polĂ­tica desconhecida (%s) força para o padrĂŁo" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "A API AdminOps RGW retornou %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "Campo obrigatĂłrio %s nĂŁo especificado" #, python-format msgid "The field 'fields' is required for %s" msgstr "O campo 'fields' Ă© necessário para %s" #, python-format msgid "Unable to evaluate expression %(expr)s: %(exc)s" msgstr "NĂŁo Ă© possĂ­vel avaliar expressĂŁo %(expr)s:%(exc)s" msgid "Unable to send sample over UDP" msgstr "NĂŁo Ă© possĂ­vel enviar amostra sobre UDP" #, python-format msgid "" "Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no Balanceador de Carga %(id)s, " "ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Status desconhecido %(stat)s recebido na largura da fonte %(id)s, ignorando " "a amostra" #, python-format msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no listener %(id)s, ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no membro %(id)s, ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido no conjunto %(id)s, ignorando amostras" #, python-format msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido em vip %(id)s, ignorando a amostra" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido recebido no vpn %(id)s, ignorando a " "amostra" #, python-format msgid "VM %s not found in VMware vSphere" msgstr "VM %s nĂŁo localizado no VMware vSphere" #, python-format msgid "VM %s not found in XenServer" msgstr "Máquina virtual %s nĂŁo localizada no XenServer" msgid "Wrong sensor type" msgstr "Tipo de sensor errado" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Erro YAML ao ler o arquivo de definições %(file)s" #, python-format msgid "dropping out of time order sample: %s" msgstr "eliminando amostra fora de ordem de tempo: %s" #, python-format msgid "dropping sample with no predecessor: %s" msgstr "descartando amostra sem predecessor: %s" msgid "ipmitool output length mismatch" msgstr "incompatibilidade no comprimento da saĂ­da de ipmitool" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "análise dos dados do sensor IPMI com falha, nenhum dado recuperado da " "entrada fornecida" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "análise dos dados do sensor IPMI com falha,tipo de sensor desconhecido" msgid "running ipmitool failure" msgstr "executando falha de ipmitool" ceilometer-10.0.0/ceilometer/image/0000775000175100017510000000000013236733440017163 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/image/discovery.py0000666000175100017510000000254313236733243021553 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glanceclient from oslo_config import cfg from ceilometer import keystone_client from ceilometer.polling import plugin_base SERVICE_OPTS = [ cfg.StrOpt('glance', default='image', help='Glance service type.'), ] class ImagesDiscovery(plugin_base.DiscoveryBase): def __init__(self, conf): super(ImagesDiscovery, self).__init__(conf) creds = conf.service_credentials self.glance_client = glanceclient.Client( version='2', session=keystone_client.get_session(conf), region_name=creds.region_name, interface=creds.interface, service_type=conf.service_types.glance) def discover(self, manager, param=None): """Discover resources to monitor.""" return self.glance_client.images.list() ceilometer-10.0.0/ceilometer/image/__init__.py0000666000175100017510000000000013236733243021265 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/image/glance.py0000666000175100017510000000367713236733243021006 0ustar zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with images """ from __future__ import absolute_import from ceilometer.polling import plugin_base from ceilometer import sample class _Base(plugin_base.PollsterBase): @property def default_discovery(self): return 'images' @staticmethod def extract_image_metadata(image): return dict((k, getattr(image, k)) for k in [ "status", "visibility", "name", "container_format", "created_at", "disk_format", "updated_at", "min_disk", "protected", "checksum", "min_ram", "tags", "virtual_size"]) class ImageSizePollster(_Base): def get_samples(self, manager, cache, resources): for image in resources: yield sample.Sample( name='image.size', type=sample.TYPE_GAUGE, unit='B', volume=image.size, user_id=None, project_id=image.owner, resource_id=image.id, resource_metadata=self.extract_image_metadata(image), ) ceilometer-10.0.0/ceilometer/objectstore/0000775000175100017510000000000013236733440020424 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/objectstore/rgw.py0000666000175100017510000001672513236733243021613 0ustar zuulzuul00000000000000# # Copyright 2015 Reliance Jio Infocomm Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with ceph object stores """ from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log import six.moves.urllib.parse as urlparse from ceilometer import keystone_client from ceilometer.polling import plugin_base from ceilometer import sample LOG = log.getLogger(__name__) SERVICE_OPTS = [ cfg.StrOpt('radosgw', help='Radosgw service type.'), ] CREDENTIAL_OPTS = [ cfg.StrOpt('access_key', secret=True, help='Access key for Radosgw Admin.'), cfg.StrOpt('secret_key', secret=True, help='Secret key for Radosgw Admin.') ] class _Base(plugin_base.PollsterBase): METHOD = 'bucket' _ENDPOINT = None def __init__(self, conf): super(_Base, self).__init__(conf) self.access_key = self.conf.rgw_admin_credentials.access_key self.secret = self.conf.rgw_admin_credentials.secret_key @property def default_discovery(self): return 'tenant' @property def CACHE_KEY_METHOD(self): return 'rgw.get_%s' % self.METHOD @staticmethod def _get_endpoint(conf, ksclient): # we store the endpoint as a base class attribute, so keystone is # only ever called once, also we assume that in a single deployment # we may be only deploying `radosgw` or `swift` as the object-store if _Base._ENDPOINT is None and conf.service_types.radosgw: try: creds = conf.service_credentials rgw_url = keystone_client.get_service_catalog( ksclient).url_for( service_type=conf.service_types.radosgw, interface=creds.interface, region_name=creds.region_name) _Base._ENDPOINT = urlparse.urljoin(rgw_url, '/admin') except exceptions.EndpointNotFound: LOG.debug("Radosgw endpoint not found") return _Base._ENDPOINT def _iter_accounts(self, ksclient, cache, tenants): if self.CACHE_KEY_METHOD not in cache: cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( ksclient, tenants)) return iter(cache[self.CACHE_KEY_METHOD]) def _get_account_info(self, ksclient, tenants): endpoint = self._get_endpoint(self.conf, ksclient) if not endpoint: raise StopIteration() try: from ceilometer.objectstore import rgw_client as c_rgw_client rgw_client = c_rgw_client.RGWAdminClient(endpoint, self.access_key, self.secret) except ImportError: raise plugin_base.PollsterPermanentError(tenants) for t in tenants: api_method = 'get_%s' % self.METHOD yield t.id, getattr(rgw_client, api_method)(t.id) class ContainersObjectsPollster(_Base): """Get info about object counts in a container using RGW Admin APIs.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): for it in bucket_info['buckets']: yield sample.Sample( name='radosgw.containers.objects', type=sample.TYPE_GAUGE, volume=int(it.num_objects), unit='object', user_id=None, project_id=tenant, resource_id=tenant + '/' + it.name, resource_metadata=None, ) class ContainersSizePollster(_Base): """Get info about object sizes in a container using RGW Admin APIs.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): for it in bucket_info['buckets']: yield sample.Sample( name='radosgw.containers.objects.size', type=sample.TYPE_GAUGE, volume=int(it.size * 1024), unit='B', user_id=None, project_id=tenant, resource_id=tenant + '/' + it.name, resource_metadata=None, ) class ObjectsSizePollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects.size', type=sample.TYPE_GAUGE, volume=int(bucket_info['size'] * 1024), unit='B', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class ObjectsPollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects', type=sample.TYPE_GAUGE, volume=int(bucket_info['num_objects']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class ObjectsContainersPollster(_Base): def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects.containers', type=sample.TYPE_GAUGE, volume=int(bucket_info['num_buckets']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class UsagePollster(_Base): METHOD = 'usage' def get_samples(self, manager, cache, resources): for tenant, usage in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.api.request', type=sample.TYPE_GAUGE, volume=int(usage), unit='request', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) ceilometer-10.0.0/ceilometer/objectstore/__init__.py0000666000175100017510000000000013236733243022526 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/objectstore/rgw_client.py0000666000175100017510000000504113236733243023136 0ustar zuulzuul00000000000000# # Copyright 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import namedtuple from awsauth import S3Auth import requests import six.moves.urllib.parse as urlparse from ceilometer.i18n import _ class RGWAdminAPIFailed(Exception): pass class RGWAdminClient(object): Bucket = namedtuple('Bucket', 'name, num_objects, size') def __init__(self, endpoint, access_key, secret_key): self.access_key = access_key self.secret = secret_key self.endpoint = endpoint self.hostname = urlparse.urlparse(endpoint).netloc def _make_request(self, path, req_params): uri = "{0}/{1}".format(self.endpoint, path) r = requests.get(uri, params=req_params, auth=S3Auth(self.access_key, self.secret, self.hostname) ) if r.status_code != 200: raise RGWAdminAPIFailed( _('RGW AdminOps API returned %(status)s %(reason)s') % {'status': r.status_code, 'reason': r.reason}) return r.json() def get_bucket(self, tenant_id): path = "bucket" req_params = {"uid": tenant_id, "stats": "true"} json_data = self._make_request(path, req_params) stats = {'num_buckets': 0, 'buckets': [], 'size': 0, 'num_objects': 0} stats['num_buckets'] = len(json_data) for it in json_data: for v in it["usage"].values(): stats['num_objects'] += v["num_objects"] stats['size'] += v["size_kb"] stats['buckets'].append(self.Bucket(it["bucket"], v["num_objects"], v["size_kb"])) return stats def get_usage(self, tenant_id): path = "usage" req_params = {"uid": tenant_id} json_data = self._make_request(path, req_params) usage_data = json_data["summary"] return sum((it["total"]["ops"] for it in usage_data)) ceilometer-10.0.0/ceilometer/objectstore/swift.py0000666000175100017510000001625013236733243022141 0ustar zuulzuul00000000000000# # Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with object stores """ from __future__ import absolute_import from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log import six.moves.urllib.parse as urlparse from swiftclient import client as swift from swiftclient.exceptions import ClientException from ceilometer import keystone_client from ceilometer.polling import plugin_base from ceilometer import sample LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('reseller_prefix', default='AUTH_', help="Swift reseller prefix. Must be on par with " "reseller_prefix in proxy-server.conf."), ] SERVICE_OPTS = [ cfg.StrOpt('swift', default='object-store', help='Swift service type.'), ] class _Base(plugin_base.PollsterBase): METHOD = 'head' _ENDPOINT = None @property def default_discovery(self): return 'tenant' @property def CACHE_KEY_METHOD(self): return 'swift.%s_account' % self.METHOD @staticmethod def _get_endpoint(conf, ksclient): # we store the endpoint as a base class attribute, so keystone is # only ever called once if _Base._ENDPOINT is None: try: creds = conf.service_credentials _Base._ENDPOINT = keystone_client.get_service_catalog( ksclient).url_for( service_type=conf.service_types.swift, interface=creds.interface, region_name=creds.region_name) except exceptions.EndpointNotFound as e: LOG.info("Swift endpoint not found: %s", e) return _Base._ENDPOINT def _iter_accounts(self, ksclient, cache, tenants): if self.CACHE_KEY_METHOD not in cache: cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( ksclient, tenants)) return iter(cache[self.CACHE_KEY_METHOD]) def _get_account_info(self, ksclient, tenants): endpoint = self._get_endpoint(self.conf, ksclient) if not endpoint: raise StopIteration() swift_api_method = getattr(swift, '%s_account' % self.METHOD) for t in tenants: try: yield (t.id, swift_api_method( self._neaten_url(endpoint, t.id, self.conf.reseller_prefix), keystone_client.get_auth_token(ksclient))) except ClientException as e: if e.http_status == 404: LOG.warning("Swift tenant id %s not found.", t.id) else: raise e @staticmethod def _neaten_url(endpoint, tenant_id, reseller_prefix): """Transform the registered url to standard and valid format.""" return urlparse.urljoin(endpoint.split('/v1')[0].rstrip('/') + '/', 'v1/' + reseller_prefix + tenant_id) class ObjectsPollster(_Base): """Collect the total objects count for each project""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects', type=sample.TYPE_GAUGE, volume=int(account['x-account-object-count']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class ObjectsSizePollster(_Base): """Collect the total objects size of each project""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects.size', type=sample.TYPE_GAUGE, volume=int(account['x-account-bytes-used']), unit='B', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class ObjectsContainersPollster(_Base): """Collect the container count for each project""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects.containers', type=sample.TYPE_GAUGE, volume=int(account['x-account-container-count']), unit='container', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class ContainersObjectsPollster(_Base): """Collect the objects count per container for each project""" METHOD = 'get' def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): containers_info = account[1] for container in containers_info: yield sample.Sample( name='storage.containers.objects', type=sample.TYPE_GAUGE, volume=int(container['count']), unit='object', user_id=None, project_id=tenant, resource_id=tenant + '/' + container['name'], resource_metadata=None, ) class ContainersSizePollster(_Base): """Collect the total objects size per container for each project""" METHOD = 'get' def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): containers_info = account[1] for container in containers_info: yield sample.Sample( name='storage.containers.objects.size', type=sample.TYPE_GAUGE, volume=int(container['bytes']), unit='B', user_id=None, project_id=tenant, resource_id=tenant + '/' + container['name'], resource_metadata=None, ) ceilometer-10.0.0/ceilometer/compute/0000775000175100017510000000000013236733440017555 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/pollsters/0000775000175100017510000000000013236733440021604 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/pollsters/util.py0000666000175100017510000000670413236733243023145 0ustar zuulzuul00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer import sample INSTANCE_PROPERTIES = [ # Identity properties 'reservation_id', # Type properties 'architecture', 'OS-EXT-AZ:availability_zone', 'kernel_id', 'os_type', 'ramdisk_id', ] def _get_metadata_from_object(conf, instance): """Return a metadata dictionary for the instance.""" instance_type = instance.flavor['name'] if instance.flavor else None metadata = { 'display_name': instance.name, 'name': getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', u''), 'instance_id': instance.id, 'instance_type': instance_type, 'host': instance.hostId, 'instance_host': getattr(instance, 'OS-EXT-SRV-ATTR:host', u''), 'flavor': instance.flavor, 'status': instance.status.lower(), 'state': getattr(instance, 'OS-EXT-STS:vm_state', u''), 'task_state': getattr(instance, 'OS-EXT-STS:task_state', u''), } # Image properties if instance.image: metadata['image'] = instance.image metadata['image_ref'] = instance.image['id'] # Images that come through the conductor API in the nova notifier # plugin will not have links. if instance.image.get('links'): metadata['image_ref_url'] = instance.image['links'][0]['href'] else: metadata['image_ref_url'] = None else: metadata['image'] = None metadata['image_ref'] = None metadata['image_ref_url'] = None for name in INSTANCE_PROPERTIES: if hasattr(instance, name): metadata[name] = getattr(instance, name) metadata['vcpus'] = instance.flavor['vcpus'] metadata['memory_mb'] = instance.flavor['ram'] metadata['disk_gb'] = instance.flavor['disk'] metadata['ephemeral_gb'] = instance.flavor['ephemeral'] metadata['root_gb'] = (int(metadata['disk_gb']) - int(metadata['ephemeral_gb'])) return sample.add_reserved_user_metadata(conf, instance.metadata, metadata) def make_sample_from_instance(conf, instance, name, type, unit, volume, resource_id=None, additional_metadata=None, monotonic_time=None): additional_metadata = additional_metadata or {} resource_metadata = _get_metadata_from_object(conf, instance) resource_metadata.update(additional_metadata) return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=instance.user_id, project_id=instance.tenant_id, resource_id=resource_id or instance.id, resource_metadata=resource_metadata, monotonic_time=monotonic_time, ) def instance_name(instance): """Shortcut to get instance name.""" return getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', None) ceilometer-10.0.0/ceilometer/compute/pollsters/__init__.py0000666000175100017510000001526613236733243023732 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import monotonic from oslo_log import log from oslo_utils import timeutils import ceilometer from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import plugin_base from ceilometer import sample LOG = log.getLogger(__name__) class NoVolumeException(Exception): pass class GenericComputePollster(plugin_base.PollsterBase): """This class aims to cache instance statistics data First polled pollsters that inherit of this will retrieve and cache stats of an instance, then other pollsters will just build the samples without queyring the backend anymore. """ sample_name = None sample_unit = '' sample_type = sample.TYPE_GAUGE sample_stats_key = None inspector_method = None def setup_environment(self): super(GenericComputePollster, self).setup_environment() self.inspector = self._get_inspector(self.conf) @staticmethod def aggregate_method(stats): # Don't aggregate anything by default return stats @classmethod def _get_inspector(cls, conf): # FIXME(sileht): This doesn't looks threadsafe... try: inspector = cls._inspector except AttributeError: inspector = virt_inspector.get_hypervisor_inspector(conf) cls._inspector = inspector return inspector @property def default_discovery(self): return 'local_instances' def _record_poll_time(self): """Method records current time as the poll time. :return: time in seconds since the last poll time was recorded """ current_time = timeutils.utcnow() duration = None if hasattr(self, '_last_poll_time'): duration = timeutils.delta_seconds(self._last_poll_time, current_time) self._last_poll_time = current_time return duration @staticmethod def get_additional_metadata(instance, stats): pass @staticmethod def get_resource_id(instance, stats): return instance.id def _inspect_cached(self, cache, instance, duration): cache.setdefault(self.inspector_method, {}) if instance.id not in cache[self.inspector_method]: result = getattr(self.inspector, self.inspector_method)( instance, duration) polled_time = monotonic.monotonic() # Ensure we don't cache an iterator if isinstance(result, collections.Iterable): result = list(result) else: result = [result] cache[self.inspector_method][instance.id] = (polled_time, result) return cache[self.inspector_method][instance.id] def _stats_to_sample(self, instance, stats, polled_time): volume = getattr(stats, self.sample_stats_key) LOG.debug("%(instance_id)s/%(name)s volume: " "%(volume)s" % { 'name': self.sample_name, 'instance_id': instance.id, 'volume': (volume if volume is not None else 'Unavailable')}) if volume is None: raise NoVolumeException() return util.make_sample_from_instance( self.conf, instance, name=self.sample_name, unit=self.sample_unit, type=self.sample_type, resource_id=self.get_resource_id(instance, stats), volume=volume, additional_metadata=self.get_additional_metadata( instance, stats), monotonic_time=polled_time, ) def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: try: polled_time, result = self._inspect_cached( cache, instance, self._inspection_duration) if not result: continue for stats in self.aggregate_method(result): yield self._stats_to_sample(instance, stats, polled_time) except NoVolumeException: # FIXME(sileht): This should be a removed... but I will # not change the test logic for now LOG.warning("%(name)s statistic in not available for " "instance %(instance_id)s" % {'name': self.sample_name, 'instance_id': instance.id}) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting sample of %(name)s: %(exc)s', {'instance_id': instance.id, 'name': self.sample_name, 'exc': e}) except virt_inspector.NoDataException as e: LOG.warning('Cannot inspect data of %(pollster)s for ' '%(instance_id)s, non-fatal reason: %(exc)s', {'pollster': self.__class__.__name__, 'instance_id': instance.id, 'exc': e}) raise plugin_base.PollsterPermanentError(resources) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' '%(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) raise plugin_base.PollsterPermanentError(resources) except Exception as err: LOG.error( 'Could not get %(name)s events for %(id)s: %(e)s', { 'name': self.sample_name, 'id': instance.id, 'e': err}, exc_info=True) ceilometer-10.0.0/ceilometer/compute/pollsters/disk.py0000666000175100017510000001754013236733243023122 0ustar zuulzuul00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # Copyright 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_log import log from ceilometer.compute import pollsters from ceilometer import sample LOG = log.getLogger(__name__) AGGREGATED_DEPRECATION_DONE = set() class AggregateDiskPollster(pollsters.GenericComputePollster): inspector_method = "inspect_disks" def aggregate_method(self, result): fields = list(result[0]._fields) fields.remove("device") agg_stats = collections.defaultdict(int) devices = [] for stats in result: devices.append(stats.device) for f in fields: agg_stats[f] += getattr(stats, f) kwargs = dict(agg_stats) kwargs["device"] = devices return [result[0].__class__(**kwargs)] @staticmethod def get_additional_metadata(instance, stats): return {'device': stats.device} def get_samples(self, *args, **kwargs): if self.sample_name not in AGGREGATED_DEPRECATION_DONE: AGGREGATED_DEPRECATION_DONE.add(self.sample_name) LOG.warning("The %s metric is deprecated, instead use %s" % (self.sample_name, self.sample_name.replace("disk.", "disk.device."))) return super(AggregateDiskPollster, self).get_samples(*args, **kwargs) class PerDeviceDiskPollster(pollsters.GenericComputePollster): inspector_method = "inspect_disks" @staticmethod def get_resource_id(instance, stats): return "%s-%s" % (instance.id, stats.device) @staticmethod def get_additional_metadata(instance, stats): return {'disk_name': stats.device} class ReadRequestsPollster(AggregateDiskPollster): sample_name = 'disk.read.requests' sample_unit = 'request' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'read_requests' class PerDeviceReadRequestsPollster(PerDeviceDiskPollster): sample_name = 'disk.device.read.requests' sample_unit = 'request' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'read_requests' class ReadBytesPollster(AggregateDiskPollster): sample_name = 'disk.read.bytes' sample_unit = 'B' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'read_bytes' class PerDeviceReadBytesPollster(PerDeviceDiskPollster): sample_name = 'disk.device.read.bytes' sample_unit = 'B' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'read_bytes' class WriteRequestsPollster(AggregateDiskPollster): sample_name = 'disk.write.requests' sample_unit = 'request' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'write_requests' class PerDeviceWriteRequestsPollster(PerDeviceDiskPollster): sample_name = 'disk.device.write.requests' sample_unit = 'request' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'write_requests' class WriteBytesPollster(AggregateDiskPollster): sample_name = 'disk.write.bytes' sample_unit = 'B' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'write_bytes' class PerDeviceWriteBytesPollster(PerDeviceDiskPollster): sample_name = 'disk.device.write.bytes' sample_unit = 'B' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'write_bytes' class ReadBytesRatePollster(AggregateDiskPollster): inspector_method = "inspect_disk_rates" sample_name = 'disk.read.bytes.rate' sample_unit = 'B/s' sample_stats_key = 'read_bytes_rate' class PerDeviceReadBytesRatePollster(PerDeviceDiskPollster): inspector_method = "inspect_disk_rates" sample_name = 'disk.device.read.bytes.rate' sample_unit = 'B/s' sample_stats_key = 'read_bytes_rate' class ReadRequestsRatePollster(AggregateDiskPollster): inspector_method = "inspect_disk_rates" sample_name = 'disk.read.requests.rate' sample_unit = 'request/s' sample_stats_key = 'read_requests_rate' class PerDeviceReadRequestsRatePollster(PerDeviceDiskPollster): inspector_method = "inspect_disk_rates" sample_name = 'disk.device.read.requests.rate' sample_unit = 'request/s' sample_stats_key = 'read_requests_rate' class WriteBytesRatePollster(AggregateDiskPollster): inspector_method = "inspect_disk_rates" sample_name = 'disk.write.bytes.rate' sample_unit = 'B/s' sample_stats_key = 'write_bytes_rate' class PerDeviceWriteBytesRatePollster(PerDeviceDiskPollster): inspector_method = "inspect_disk_rates" sample_name = 'disk.device.write.bytes.rate' sample_unit = 'B/s' sample_stats_key = 'write_bytes_rate' class WriteRequestsRatePollster(AggregateDiskPollster): inspector_method = "inspect_disk_rates" sample_name = 'disk.write.requests.rate' sample_unit = 'request/s' sample_stats_key = 'write_requests_rate' class PerDeviceWriteRequestsRatePollster(PerDeviceDiskPollster): inspector_method = "inspect_disk_rates" sample_name = 'disk.device.write.requests.rate' sample_unit = 'request/s' sample_stats_key = 'write_requests_rate' class DiskLatencyPollster(AggregateDiskPollster): inspector_method = 'inspect_disk_latency' sample_name = 'disk.latency' sample_unit = 'ms' sample_stats_key = 'disk_latency' class PerDeviceDiskLatencyPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_latency' sample_name = 'disk.device.latency' sample_unit = 'ms' sample_stats_key = 'disk_latency' class DiskIOPSPollster(AggregateDiskPollster): inspector_method = 'inspect_disk_iops' sample_name = 'disk.iops' sample_unit = 'count/s' sample_stats_key = 'iops_count' class PerDeviceDiskIOPSPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_iops' sample_name = 'disk.device.iops' sample_unit = 'count/s' sample_stats_key = 'iops_count' class CapacityPollster(AggregateDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.capacity' sample_unit = 'B' sample_stats_key = 'capacity' class PerDeviceCapacityPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.device.capacity' sample_unit = 'B' sample_stats_key = 'capacity' class AllocationPollster(AggregateDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.allocation' sample_unit = 'B' sample_stats_key = 'allocation' class PerDeviceAllocationPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.device.allocation' sample_unit = 'B' sample_stats_key = 'allocation' class PhysicalPollster(AggregateDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.usage' sample_unit = 'B' sample_stats_key = 'physical' class PerDevicePhysicalPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.device.usage' sample_unit = 'B' sample_stats_key = 'physical' class PerDeviceDiskReadLatencyPollster(PerDeviceDiskPollster): sample_name = 'disk.device.read.latency' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'ns' sample_stats_key = 'rd_total_times' class PerDeviceDiskWriteLatencyPollster(PerDeviceDiskPollster): sample_name = 'disk.device.write.latency' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'ns' sample_stats_key = 'wr_total_times' ceilometer-10.0.0/ceilometer/compute/pollsters/instance_stats.py0000666000175100017510000000561013236733243025205 0ustar zuulzuul00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.compute import pollsters from ceilometer import sample class InstanceStatsPollster(pollsters.GenericComputePollster): inspector_method = 'inspect_instance' class CPUPollster(InstanceStatsPollster): sample_name = 'cpu' sample_unit = 'ns' sample_stats_key = 'cpu_time' sample_type = sample.TYPE_CUMULATIVE @staticmethod def get_additional_metadata(instance, c_data): return {'cpu_number': c_data.cpu_number} class CPUUtilPollster(InstanceStatsPollster): sample_name = 'cpu_util' sample_unit = '%' sample_stats_key = 'cpu_util' class MemoryUsagePollster(InstanceStatsPollster): sample_name = 'memory.usage' sample_unit = 'MB' sample_stats_key = 'memory_usage' class MemoryResidentPollster(InstanceStatsPollster): sample_name = 'memory.resident' sample_unit = 'MB' sample_stats_key = 'memory_resident' class MemorySwapInPollster(InstanceStatsPollster): sample_name = 'memory.swap.in' sample_unit = 'MB' sample_stats_key = 'memory_swap_in' sample_type = sample.TYPE_CUMULATIVE class MemorySwapOutPollster(InstanceStatsPollster): sample_name = 'memory.swap.out' sample_unit = 'MB' sample_stats_key = 'memory_swap_out' sample_type = sample.TYPE_CUMULATIVE class PerfCPUCyclesPollster(InstanceStatsPollster): sample_name = 'perf.cpu.cycles' sample_stats_key = 'cpu_cycles' class PerfInstructionsPollster(InstanceStatsPollster): sample_name = 'perf.instructions' sample_stats_key = 'instructions' class PerfCacheReferencesPollster(InstanceStatsPollster): sample_name = 'perf.cache.references' sample_stats_key = 'cache_references' class PerfCacheMissesPollster(InstanceStatsPollster): sample_name = 'perf.cache.misses' sample_stats_key = 'cache_misses' class MemoryBandwidthTotalPollster(InstanceStatsPollster): sample_name = 'memory.bandwidth.total' sample_unit = 'B/s' sample_stats_key = 'memory_bandwidth_total' class MemoryBandwidthLocalPollster(InstanceStatsPollster): sample_name = 'memory.bandwidth.local' sample_unit = 'B/s' sample_stats_key = 'memory_bandwidth_local' class CPUL3CachePollster(InstanceStatsPollster): sample_name = 'cpu_l3_cache' sample_unit = 'B' sample_stats_key = "cpu_l3_cache_usage" ceilometer-10.0.0/ceilometer/compute/pollsters/net.py0000666000175100017510000000664313236733243022760 0ustar zuulzuul00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer import sample class NetworkPollster(pollsters.GenericComputePollster): inspector_method = "inspect_vnics" @staticmethod def get_additional_metadata(instance, stats): additional_stats = {k: getattr(stats, k) for k in ["name", "mac", "fref", "parameters"]} if stats.fref is not None: additional_stats['vnic_name'] = stats.fref else: additional_stats['vnic_name'] = stats.name return additional_stats @staticmethod def get_resource_id(instance, stats): if stats.fref is not None: return stats.fref else: instance_name = util.instance_name(instance) return "%s-%s-%s" % (instance_name, instance.id, stats.name) class IncomingBytesPollster(NetworkPollster): sample_name = 'network.incoming.bytes' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'B' sample_stats_key = 'rx_bytes' class IncomingPacketsPollster(NetworkPollster): sample_name = 'network.incoming.packets' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'rx_packets' class OutgoingBytesPollster(NetworkPollster): sample_name = 'network.outgoing.bytes' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'B' sample_stats_key = 'tx_bytes' class OutgoingPacketsPollster(NetworkPollster): sample_name = 'network.outgoing.packets' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'tx_packets' class IncomingBytesRatePollster(NetworkPollster): inspector_method = "inspect_vnic_rates" sample_name = 'network.incoming.bytes.rate' sample_unit = 'B/s' sample_stats_key = 'rx_bytes_rate' class OutgoingBytesRatePollster(NetworkPollster): inspector_method = "inspect_vnic_rates" sample_name = 'network.outgoing.bytes.rate' sample_unit = 'B/s' sample_stats_key = 'tx_bytes_rate' class IncomingDropPollster(NetworkPollster): sample_name = 'network.incoming.packets.drop' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'rx_drop' class OutgoingDropPollster(NetworkPollster): sample_name = 'network.outgoing.packets.drop' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'tx_drop' class IncomingErrorsPollster(NetworkPollster): sample_name = 'network.incoming.packets.error' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'rx_errors' class OutgoingErrorsPollster(NetworkPollster): sample_name = 'network.outgoing.packets.error' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'tx_errors' ceilometer-10.0.0/ceilometer/compute/discovery.py0000666000175100017510000002561213236733243022147 0ustar zuulzuul00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from lxml import etree import operator import threading import cachetools from novaclient import exceptions from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils try: import libvirt except ImportError: libvirt = None from ceilometer.compute.virt.libvirt import utils as libvirt_utils from ceilometer import nova_client from ceilometer.polling import plugin_base OPTS = [ cfg.StrOpt('instance_discovery_method', default='libvirt_metadata', choices=['naive', 'workload_partitioning', 'libvirt_metadata'], help="Ceilometer offers many methods to discover the instance " "running on a compute node: \n" "* naive: poll nova to get all instances\n" "* workload_partitioning: poll nova to get instances of " "the compute\n" "* libvirt_metadata: get instances from libvirt metadata " " but without instance metadata (recommended for Gnocchi " " backend"), cfg.IntOpt('resource_update_interval', default=0, min=0, help="New instances will be discovered periodically based" " on this option (in seconds). By default, " "the agent discovers instances according to pipeline " "polling interval. If option is greater than 0, " "the instance list to poll will be updated based " "on this option's interval. Measurements relating " "to the instances will match intervals " "defined in pipeline. This option is only used " "for agent polling to Nova API, so it will work only " "when 'instance_discovery_method' is set to 'naive'."), cfg.IntOpt('resource_cache_expiry', default=3600, min=0, help="The expiry to totally refresh the instances resource " "cache, since the instance may be migrated to another " "host, we need to clean the legacy instances info in " "local cache by totally refreshing the local cache. " "The minimum should be the value of the config option " "of resource_update_interval. This option is only used " "for agent polling to Nova API, so it will work only " "when 'instance_discovery_method' is set to 'naive'.") ] LOG = log.getLogger(__name__) class NovaLikeServer(object): def __init__(self, **kwargs): self.id = kwargs.pop('id') for k, v in kwargs.items(): setattr(self, k, v) def __repr__(self): return '' % getattr(self, 'name', 'unknown-name') def __eq__(self, other): return self.id == other.id class InstanceDiscovery(plugin_base.DiscoveryBase): method = None def __init__(self, conf): super(InstanceDiscovery, self).__init__(conf) if not self.method: self.method = conf.compute.instance_discovery_method self.nova_cli = nova_client.Client(conf) self.expiration_time = conf.compute.resource_update_interval self.cache_expiry = conf.compute.resource_cache_expiry if self.method == "libvirt_metadata": # 4096 instances on a compute should be enough :) self._flavor_cache = cachetools.LRUCache(4096) else: self.lock = threading.Lock() self.instances = {} self.last_run = None self.last_cache_expire = None @property def connection(self): return libvirt_utils.refresh_libvirt_connection(self.conf, self) def discover(self, manager, param=None): """Discover resources to monitor.""" if self.method != "libvirt_metadata": return self.discover_nova_polling(manager, param=None) else: return self.discover_libvirt_polling(manager, param=None) @staticmethod def _safe_find_int(xml, path): elem = xml.find("./%s" % path) if elem is not None: return int(elem.text) return 0 @cachetools.cachedmethod(operator.attrgetter('_flavor_cache')) def get_flavor_id(self, name): try: return self.nova_cli.nova_client.flavors.find(name=name).id except exceptions.NotFound: return None @libvirt_utils.retry_on_disconnect def discover_libvirt_polling(self, manager, param=None): instances = [] for domain in self.connection.listAllDomains(): try: xml_string = domain.metadata( libvirt.VIR_DOMAIN_METADATA_ELEMENT, "http://openstack.org/xmlns/libvirt/nova/1.0") except libvirt.libvirtError as e: if libvirt_utils.is_disconnection_exception(e): # Re-raise the exception so it's handled and retries raise LOG.error( "Fail to get domain uuid %s metadata, libvirtError: %s", domain.UUIDString(), e.message) continue full_xml = etree.fromstring(domain.XMLDesc()) os_type_xml = full_xml.find("./os/type") metadata_xml = etree.fromstring(xml_string) # TODO(sileht): We don't have the flavor ID here So the Gnocchi # resource update will fail for compute sample (or put None ?) # We currently poll nova to get the flavor ID, but storing the # flavor_id doesn't have any sense because the flavor description # can change over the time, we should store the detail of the # flavor. this is why nova doesn't put the id in the libvirt # metadata # This implements flavor_xml = metadata_xml.find("./flavor") flavor = { "id": self.get_flavor_id(flavor_xml.attrib["name"]), "name": flavor_xml.attrib["name"], "vcpus": self._safe_find_int(flavor_xml, "vcpus"), "ram": self._safe_find_int(flavor_xml, "memory"), "disk": self._safe_find_int(flavor_xml, "disk"), "ephemeral": self._safe_find_int(flavor_xml, "ephemeral"), "swap": self._safe_find_int(flavor_xml, "swap"), } dom_state = domain.state()[0] vm_state = libvirt_utils.LIBVIRT_POWER_STATE.get(dom_state) status = libvirt_utils.LIBVIRT_STATUS.get(dom_state) user_id = metadata_xml.find("./owner/user").attrib["uuid"] project_id = metadata_xml.find("./owner/project").attrib["uuid"] # From: # https://github.com/openstack/nova/blob/852f40fd0c6e9d8878212ff3120556668023f1c4/nova/api/openstack/compute/views/servers.py#L214-L220 host_id = hashlib.sha224( (project_id + self.conf.host).encode('utf-8')).hexdigest() # The image description is partial, but Gnocchi only care about the # id, so we are fine image_xml = metadata_xml.find("./root[@type='image']") image = ({'id': image_xml.attrib['uuid']} if image_xml is not None else None) instance_data = { "id": domain.UUIDString(), "name": metadata_xml.find("./name").text, "flavor": flavor, "image": image, "os_type": os_type_xml.text, "architecture": os_type_xml.attrib["arch"], "OS-EXT-SRV-ATTR:instance_name": domain.name(), "OS-EXT-SRV-ATTR:host": self.conf.host, "OS-EXT-STS:vm_state": vm_state, "tenant_id": project_id, "user_id": user_id, "hostId": host_id, "status": status, # NOTE(sileht): Other fields that Ceilometer tracks # where we can't get the value here, but their are # retrieved by notification "metadata": {}, # "OS-EXT-STS:task_state" # 'reservation_id', # 'OS-EXT-AZ:availability_zone', # 'kernel_id', # 'ramdisk_id', # some image detail } LOG.debug("instance data: %s", instance_data) instances.append(NovaLikeServer(**instance_data)) return instances def discover_nova_polling(self, manager, param=None): secs_from_last_update = 0 utc_now = timeutils.utcnow(True) secs_from_last_expire = 0 if self.last_run: secs_from_last_update = timeutils.delta_seconds( self.last_run, utc_now) if self.last_cache_expire: secs_from_last_expire = timeutils.delta_seconds( self.last_cache_expire, utc_now) instances = [] # NOTE(ityaptin) we update make a nova request only if # it's a first discovery or resources expired with self.lock: if (not self.last_run or secs_from_last_update >= self.expiration_time): try: if (secs_from_last_expire < self.cache_expiry and self.last_run): since = self.last_run.isoformat() else: since = None self.instances.clear() self.last_cache_expire = utc_now instances = self.nova_cli.instance_get_all_by_host( self.conf.host, since) self.last_run = utc_now except Exception: # NOTE(zqfan): instance_get_all_by_host is wrapped and will # log exception when there is any error. It is no need to # raise it again and print one more time. return [] for instance in instances: if getattr(instance, 'OS-EXT-STS:vm_state', None) in [ 'deleted', 'error']: self.instances.pop(instance.id, None) else: self.instances[instance.id] = instance return self.instances.values() @property def group_id(self): return self.conf.host ceilometer-10.0.0/ceilometer/compute/virt/0000775000175100017510000000000013236733440020541 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/virt/xenapi/0000775000175100017510000000000013236733440022025 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/virt/xenapi/__init__.py0000666000175100017510000000000013236733243024127 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/virt/xenapi/inspector.py0000666000175100017510000001642413236733243024417 0ustar zuulzuul00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for XenAPI.""" from os_xenapi.client import session as xenapi_session from os_xenapi.client import XenAPI from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ LOG = logging.getLogger(__name__) opt_group = cfg.OptGroup(name='xenapi', title='Options for XenAPI') OPTS = [ cfg.StrOpt('connection_url', help='URL for connection to XenServer/Xen Cloud Platform.'), cfg.StrOpt('connection_username', default='root', help='Username for connection to XenServer/Xen Cloud ' 'Platform.'), cfg.StrOpt('connection_password', help='Password for connection to XenServer/Xen Cloud Platform.', secret=True), ] class XenapiException(virt_inspector.InspectorException): pass def get_api_session(conf): url = conf.xenapi.connection_url username = conf.xenapi.connection_username password = conf.xenapi.connection_password if not url or password is None: raise XenapiException(_('Must specify connection_url, and ' 'connection_password to use')) try: session = xenapi_session.XenAPISession(url, username, password, originator="ceilometer") LOG.debug("XenAPI session is created successfully, %s", session) except XenAPI.Failure as e: msg = _("Could not connect to XenAPI: %s") % e.details[0] raise XenapiException(msg) return session class XenapiInspector(virt_inspector.Inspector): def __init__(self, conf): super(XenapiInspector, self).__init__(conf) self.session = get_api_session(self.conf) def _lookup_by_name(self, instance_name): vm_refs = self.session.VM.get_by_name_label(instance_name) n = len(vm_refs) if n == 0: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in XenServer') % instance_name) elif n > 1: raise XenapiException( _('Multiple VM %s found in XenServer') % instance_name) else: return vm_refs[0] def inspect_instance(self, instance, duration): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) cpu_util = self._get_cpu_usage(vm_ref, instance_name) memory_usage = self._get_memory_usage(vm_ref) LOG.debug("inspect_instance, cpu_util: %(cpu)s, memory_usage: %(mem)s", {'cpu': cpu_util, 'mem': memory_usage}, instance=instance) return virt_inspector.InstanceStats(cpu_util=cpu_util, memory_usage=memory_usage) def _get_cpu_usage(self, vm_ref, instance_name): vcpus_number = int(self.session.VM.get_VCPUs_max(vm_ref)) if vcpus_number <= 0: msg = _("Could not get VM %s CPU number") % instance_name raise XenapiException(msg) cpu_util = 0.0 for index in range(vcpus_number): cpu_util += float(self.session.VM.query_data_source( vm_ref, "cpu%d" % index)) return cpu_util / int(vcpus_number) * 100 def _get_memory_usage(self, vm_ref): total_mem = float(self.session.VM.query_data_source(vm_ref, "memory")) try: free_mem = float(self.session.VM.query_data_source( vm_ref, "memory_internal_free")) except XenAPI.Failure: # If PV tools is not installed in the guest instance, it's # impossible to get free memory. So give it a default value # as 0. free_mem = 0 # memory provided from XenServer is in Bytes; # memory_internal_free provided from XenServer is in KB, # converting it to MB. return (total_mem - free_mem * units.Ki) / units.Mi def inspect_vnics(self, instance, duration): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) dom_id = self.session.VM.get_domid(vm_ref) vif_refs = self.session.VM.get_VIFs(vm_ref) bw_all = self.session.call_plugin_serialized('bandwidth', 'fetch_all_bandwidth') LOG.debug("inspect_vnics, all bandwidth: %s", bw_all, instance=instance) for vif_ref in vif_refs: vif_rec = self.session.VIF.get_record(vif_ref) bw_vif = bw_all[dom_id][vif_rec['device']] # TODO(jianghuaw): Currently the plugin can only support # rx_bytes and tx_bytes, so temporarily set others as -1. yield virt_inspector.InterfaceStats( name=vif_rec['uuid'], mac=vif_rec['MAC'], fref=None, parameters=None, rx_bytes=bw_vif['bw_in'], rx_packets=-1, rx_drop=-1, rx_errors=-1, tx_bytes=bw_vif['bw_out'], tx_packets=-1, tx_drop=-1, tx_errors=-1) def inspect_vnic_rates(self, instance, duration): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) vif_refs = self.session.VM.get_VIFs(vm_ref) for vif_ref in vif_refs: vif_rec = self.session.VIF.get_record(vif_ref) rx_rate = float(self.session.VM.query_data_source( vm_ref, "vif_%s_rx" % vif_rec['device'])) tx_rate = float(self.session.VM.query_data_source( vm_ref, "vif_%s_tx" % vif_rec['device'])) yield virt_inspector.InterfaceRateStats( name=vif_rec['uuid'], mac=vif_rec['MAC'], fref=None, parameters=None, rx_bytes_rate=rx_rate, tx_bytes_rate=tx_rate) def inspect_disk_rates(self, instance, duration): instance_name = util.instance_name(instance) vm_ref = self._lookup_by_name(instance_name) vbd_refs = self.session.VM.get_VBDs(vm_ref) for vbd_ref in vbd_refs: vbd_rec = self.session.VBD.get_record(vbd_ref) read_rate = float(self.session.VM.query_data_source( vm_ref, "vbd_%s_read" % vbd_rec['device'])) write_rate = float(self.session.VM.query_data_source( vm_ref, "vbd_%s_write" % vbd_rec['device'])) yield virt_inspector.DiskRateStats( device=vbd_rec['device'], read_bytes_rate=read_rate, read_requests_rate=0, write_bytes_rate=write_rate, write_requests_rate=0) ceilometer-10.0.0/ceilometer/compute/virt/__init__.py0000666000175100017510000000000013236733243022643 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/virt/vmware/0000775000175100017510000000000013236733440022042 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/virt/vmware/vsphere_operations.py0000666000175100017510000002414213236733243026341 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. try: from oslo_vmware import vim_util except ImportError: # NOTE(sileht): this is safe because inspector will not load vim_util = None PERF_MANAGER_TYPE = "PerformanceManager" PERF_COUNTER_PROPERTY = "perfCounter" VM_INSTANCE_ID_PROPERTY = 'config.extraConfig["nvp.vm-uuid"].value' # ESXi Servers sample performance data every 20 seconds. 20-second interval # data is called instance data or real-time data. To retrieve instance data, # we need to specify a value of 20 seconds for the "PerfQuerySpec.intervalId" # property. In that case the "QueryPerf" method operates as a raw data feed # that bypasses the vCenter database and instead retrieves performance data # from an ESXi host. # The following value is time interval for real-time performance stats # in seconds and it is not configurable. VC_REAL_TIME_SAMPLING_INTERVAL = 20 class VsphereOperations(object): """Class to invoke vSphere APIs calls. vSphere APIs calls are required by various pollsters, collecting data from VMware infrastructure. """ def __init__(self, api_session, max_objects): self._api_session = api_session self._max_objects = max_objects # Mapping between "VM's Nova instance Id" -> "VM's managed object" # In case a VM is deployed by Nova, then its name is instance ID. # So this map essentially has VM names as keys. self._vm_mobj_lookup_map = {} # Mapping from full name -> ID, for VC Performance counters self._perf_counter_id_lookup_map = None def _init_vm_mobj_lookup_map(self): session = self._api_session result = session.invoke_api(vim_util, "get_objects", session.vim, "VirtualMachine", self._max_objects, [VM_INSTANCE_ID_PROPERTY], False) while result: for object in result.objects: vm_mobj = object.obj # propSet will be set only if the server provides value if hasattr(object, 'propSet') and object.propSet: vm_instance_id = object.propSet[0].val if vm_instance_id: self._vm_mobj_lookup_map[vm_instance_id] = vm_mobj result = session.invoke_api(vim_util, "continue_retrieval", session.vim, result) def get_vm_mobj(self, vm_instance_id): """Method returns VC mobj of the VM by its NOVA instance ID.""" if vm_instance_id not in self._vm_mobj_lookup_map: self._init_vm_mobj_lookup_map() return self._vm_mobj_lookup_map.get(vm_instance_id, None) def _init_perf_counter_id_lookup_map(self): # Query details of all the performance counters from VC session = self._api_session client_factory = session.vim.client.factory perf_manager = session.vim.service_content.perfManager prop_spec = vim_util.build_property_spec( client_factory, PERF_MANAGER_TYPE, [PERF_COUNTER_PROPERTY]) obj_spec = vim_util.build_object_spec( client_factory, perf_manager, None) filter_spec = vim_util.build_property_filter_spec( client_factory, [prop_spec], [obj_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = 1 prop_collector = session.vim.service_content.propertyCollector result = session.invoke_api(session.vim, "RetrievePropertiesEx", prop_collector, specSet=[filter_spec], options=options) perf_counter_infos = result.objects[0].propSet[0].val.PerfCounterInfo # Extract the counter Id for each counter and populate the map self._perf_counter_id_lookup_map = {} for perf_counter_info in perf_counter_infos: counter_group = perf_counter_info.groupInfo.key counter_name = perf_counter_info.nameInfo.key counter_rollup_type = perf_counter_info.rollupType counter_id = perf_counter_info.key counter_full_name = (counter_group + ":" + counter_name + ":" + counter_rollup_type) self._perf_counter_id_lookup_map[counter_full_name] = counter_id def get_perf_counter_id(self, counter_full_name): """Method returns the ID of VC performance counter by its full name. A VC performance counter is uniquely identified by the tuple {'Group Name', 'Counter Name', 'Rollup Type'}. It will have an id - counter ID (changes from one VC to another), which is required to query performance stats from that VC. This method returns the ID for a counter, assuming 'CounterFullName' => 'Group Name:CounterName:RollupType'. """ if not self._perf_counter_id_lookup_map: self._init_perf_counter_id_lookup_map() return self._perf_counter_id_lookup_map[counter_full_name] # TODO(akhils@vmware.com) Move this method to common library # when it gets checked-in def query_vm_property(self, vm_mobj, property_name): """Method returns the value of specified property for a VM. :param vm_mobj: managed object of the VM whose property is to be queried :param property_name: path of the property """ session = self._api_session return session.invoke_api(vim_util, "get_object_property", session.vim, vm_mobj, property_name) def query_vm_aggregate_stats(self, vm_mobj, counter_id, duration): """Method queries the aggregated real-time stat value for a VM. This method should be used for aggregate counters. :param vm_mobj: managed object of the VM :param counter_id: id of the perf counter in VC :param duration: in seconds from current time, over which the stat value was applicable :return: the aggregated stats value for the counter """ # For aggregate counters, device_name should be "" stats = self._query_vm_perf_stats(vm_mobj, counter_id, "", duration) # Performance manager provides the aggregated stats value # with device name -> None return stats.get(None, 0) def query_vm_device_stats(self, vm_mobj, counter_id, duration): """Method queries the real-time stat values for a VM, for all devices. This method should be used for device(non-aggregate) counters. :param vm_mobj: managed object of the VM :param counter_id: id of the perf counter in VC :param duration: in seconds from current time, over which the stat value was applicable :return: a map containing the stat values keyed by the device ID/name """ # For device counters, device_name should be "*" to get stat values # for all devices. stats = self._query_vm_perf_stats(vm_mobj, counter_id, "*", duration) # For some device counters, in addition to the per device value # the Performance manager also returns the aggregated value. # Just to be consistent, deleting the aggregated value if present. stats.pop(None, None) return stats def _query_vm_perf_stats(self, vm_mobj, counter_id, device_name, duration): """Method queries the real-time stat values for a VM. :param vm_mobj: managed object of the VM for which stats are needed :param counter_id: id of the perf counter in VC :param device_name: name of the device for which stats are to be queried. For aggregate counters pass empty string (""). For device counters pass "*", if stats are required over all devices. :param duration: in seconds from current time, over which the stat value was applicable :return: a map containing the stat values keyed by the device ID/name """ session = self._api_session client_factory = session.vim.client.factory # Construct the QuerySpec metric_id = client_factory.create('ns0:PerfMetricId') metric_id.counterId = counter_id metric_id.instance = device_name query_spec = client_factory.create('ns0:PerfQuerySpec') query_spec.entity = vm_mobj query_spec.metricId = [metric_id] query_spec.intervalId = VC_REAL_TIME_SAMPLING_INTERVAL # We query all samples which are applicable over the specified duration samples_cnt = (int(duration / VC_REAL_TIME_SAMPLING_INTERVAL) if duration and duration >= VC_REAL_TIME_SAMPLING_INTERVAL else 1) query_spec.maxSample = samples_cnt perf_manager = session.vim.service_content.perfManager perf_stats = session.invoke_api(session.vim, 'QueryPerf', perf_manager, querySpec=[query_spec]) stat_values = {} if perf_stats: entity_metric = perf_stats[0] sample_infos = entity_metric.sampleInfo if len(sample_infos) > 0: for metric_series in entity_metric.value: # Take the average of all samples to improve the accuracy # of the stat value and ignore -1 (bug 1639114) filtered = [i for i in metric_series.value if i != -1] stat_value = float(sum(filtered)) / len(filtered) device_id = metric_series.id.instance stat_values[device_id] = stat_value return stat_values ceilometer-10.0.0/ceilometer/compute/virt/vmware/__init__.py0000666000175100017510000000000013236733243024144 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/virt/vmware/inspector.py0000777000175100017510000002043513236733243024434 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for VMware vSphere""" from oslo_config import cfg from oslo_utils import units import six from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.vmware import vsphere_operations from ceilometer.i18n import _ vmware_api = None opt_group = cfg.OptGroup(name='vmware', title='Options for VMware') OPTS = [ cfg.HostAddressOpt('host_ip', default='127.0.0.1', help='IP address of the VMware vSphere host.'), cfg.PortOpt('host_port', default=443, help='Port of the VMware vSphere host.'), cfg.StrOpt('host_username', default='', help='Username of VMware vSphere.'), cfg.StrOpt('host_password', default='', help='Password of VMware vSphere.', secret=True), cfg.StrOpt('ca_file', help='CA bundle file to use in verifying the vCenter server ' 'certificate.'), cfg.BoolOpt('insecure', default=False, help='If true, the vCenter server certificate is not ' 'verified. If false, then the default CA truststore is ' 'used for verification. This option is ignored if ' '"ca_file" is set.'), cfg.IntOpt('api_retry_count', default=10, help='Number of times a VMware vSphere API may be retried.'), cfg.FloatOpt('task_poll_interval', default=0.5, help='Sleep time in seconds for polling an ongoing async ' 'task.'), cfg.StrOpt('wsdl_location', help='Optional vim service WSDL location ' 'e.g http:///vimService.wsdl. ' 'Optional over-ride to default location for bug ' 'work-arounds.'), ] VC_AVERAGE_MEMORY_CONSUMED_CNTR = 'mem:consumed:average' VC_AVERAGE_CPU_CONSUMED_CNTR = 'cpu:usage:average' VC_NETWORK_RX_COUNTER = 'net:received:average' VC_NETWORK_TX_COUNTER = 'net:transmitted:average' VC_DISK_READ_RATE_CNTR = "disk:read:average" VC_DISK_READ_REQUESTS_RATE_CNTR = "disk:numberReadAveraged:average" VC_DISK_WRITE_RATE_CNTR = "disk:write:average" VC_DISK_WRITE_REQUESTS_RATE_CNTR = "disk:numberWriteAveraged:average" def get_api_session(conf): global vmware_api if vmware_api is None: vmware_api = __import__('oslo_vmware.api') api_session = vmware_api.api.VMwareAPISession( conf.vmware.host_ip, conf.vmware.host_username, conf.vmware.host_password, conf.vmware.api_retry_count, conf.vmware.task_poll_interval, wsdl_loc=conf.vmware.wsdl_location, port=conf.vmware.host_port, cacert=conf.vmware.ca_file, insecure=conf.vmware.insecure) return api_session class VsphereInspector(virt_inspector.Inspector): def __init__(self, conf): super(VsphereInspector, self).__init__(conf) self._ops = vsphere_operations.VsphereOperations( get_api_session(self.conf), 1000) def _get_vm_mobj_not_power_off_or_raise(self, instance): vm_mobj = self._ops.get_vm_mobj(instance.id) if vm_mobj is None: raise virt_inspector.InstanceNotFoundException( _('VM %s not found in VMware vSphere') % instance.id) vm_powerState = self._ops.query_vm_property(vm_mobj, 'runtime.powerState') if vm_powerState == "poweredOff": raise virt_inspector.InstanceShutOffException( _('VM %s is poweredOff in VMware vSphere') % instance.id) return vm_mobj def inspect_vnic_rates(self, instance, duration): vm_mobj = self._get_vm_mobj_not_power_off_or_raise(instance) vnic_stats = {} vnic_ids = set() for net_counter in (VC_NETWORK_RX_COUNTER, VC_NETWORK_TX_COUNTER): net_counter_id = self._ops.get_perf_counter_id(net_counter) vnic_id_to_stats_map = self._ops.query_vm_device_stats( vm_mobj, net_counter_id, duration) # The sample for this map is: {4000: 0.0, vmnic5: 0.0, vmnic4: 0.0, # vmnic3: 0.0, vmnic2: 0.0, vmnic1: 0.0, vmnic0: 0.0} # "4000" is the virtual nic which we need. # And these "vmnic*" are phynical nics in the host, so we remove it vnic_id_to_stats_map = {k: v for (k, v) in vnic_id_to_stats_map.items() if not k.startswith('vmnic')} vnic_stats[net_counter] = vnic_id_to_stats_map vnic_ids.update(six.iterkeys(vnic_id_to_stats_map)) # Stats provided from vSphere are in KB/s, converting it to B/s. for vnic_id in sorted(vnic_ids): rx_bytes_rate = (vnic_stats[VC_NETWORK_RX_COUNTER] .get(vnic_id, 0) * units.Ki) tx_bytes_rate = (vnic_stats[VC_NETWORK_TX_COUNTER] .get(vnic_id, 0) * units.Ki) yield virt_inspector.InterfaceRateStats( name=vnic_id, mac=None, fref=None, parameters=None, rx_bytes_rate=rx_bytes_rate, tx_bytes_rate=tx_bytes_rate) def inspect_disk_rates(self, instance, duration): vm_mobj = self._get_vm_mobj_not_power_off_or_raise(instance) disk_stats = {} disk_ids = set() disk_counters = [ VC_DISK_READ_RATE_CNTR, VC_DISK_READ_REQUESTS_RATE_CNTR, VC_DISK_WRITE_RATE_CNTR, VC_DISK_WRITE_REQUESTS_RATE_CNTR ] for disk_counter in disk_counters: disk_counter_id = self._ops.get_perf_counter_id(disk_counter) disk_id_to_stat_map = self._ops.query_vm_device_stats( vm_mobj, disk_counter_id, duration) disk_stats[disk_counter] = disk_id_to_stat_map disk_ids.update(six.iterkeys(disk_id_to_stat_map)) for disk_id in disk_ids: def stat_val(counter_name): return disk_stats[counter_name].get(disk_id, 0) # Stats provided from vSphere are in KB/s, converting it to B/s. yield virt_inspector.DiskRateStats( device=disk_id, read_bytes_rate=stat_val(VC_DISK_READ_RATE_CNTR) * units.Ki, read_requests_rate=stat_val(VC_DISK_READ_REQUESTS_RATE_CNTR), write_bytes_rate=stat_val(VC_DISK_WRITE_RATE_CNTR) * units.Ki, write_requests_rate=stat_val(VC_DISK_WRITE_REQUESTS_RATE_CNTR) ) def inspect_instance(self, instance, duration): vm_mobj = self._get_vm_mobj_not_power_off_or_raise(instance) cpu_util_counter_id = self._ops.get_perf_counter_id( VC_AVERAGE_CPU_CONSUMED_CNTR) cpu_util = self._ops.query_vm_aggregate_stats( vm_mobj, cpu_util_counter_id, duration) # For this counter vSphere returns values scaled-up by 100, since the # corresponding API can't return decimals, but only longs. # For e.g. if the utilization is 12.34%, the value returned is 1234. # Hence, dividing by 100.0. cpu_util = cpu_util / 100.0 mem_counter_id = self._ops.get_perf_counter_id( VC_AVERAGE_MEMORY_CONSUMED_CNTR) memory = self._ops.query_vm_aggregate_stats( vm_mobj, mem_counter_id, duration) # Stat provided from vSphere is in KB, converting it to MB. memory = memory / units.Ki return virt_inspector.InstanceStats( cpu_util=cpu_util, memory_usage=memory) ceilometer-10.0.0/ceilometer/compute/virt/libvirt/0000775000175100017510000000000013236733440022214 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/virt/libvirt/__init__.py0000666000175100017510000000000013236733243024316 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/virt/libvirt/utils.py0000666000175100017510000001012413236733243023727 0ustar zuulzuul00000000000000# # Copyright 2016 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import tenacity try: import libvirt except ImportError: libvirt = None from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('libvirt_type', default='kvm', choices=['kvm', 'lxc', 'qemu', 'uml', 'xen'], help='Libvirt domain type.'), cfg.StrOpt('libvirt_uri', default='', help='Override the default libvirt URI ' '(which is dependent on libvirt_type).'), ] LIBVIRT_PER_TYPE_URIS = dict(uml='uml:///system', xen='xen:///', lxc='lxc:///') # We don't use the libvirt constants in case of libvirt is not available VIR_DOMAIN_NOSTATE = 0 VIR_DOMAIN_RUNNING = 1 VIR_DOMAIN_BLOCKED = 2 VIR_DOMAIN_PAUSED = 3 VIR_DOMAIN_SHUTDOWN = 4 VIR_DOMAIN_SHUTOFF = 5 VIR_DOMAIN_CRASHED = 6 VIR_DOMAIN_PMSUSPENDED = 7 # Stolen from nova LIBVIRT_POWER_STATE = { VIR_DOMAIN_NOSTATE: 'pending', VIR_DOMAIN_RUNNING: 'running', VIR_DOMAIN_BLOCKED: 'running', VIR_DOMAIN_PAUSED: 'paused', VIR_DOMAIN_SHUTDOWN: 'shutdown', VIR_DOMAIN_SHUTOFF: 'shutdown', VIR_DOMAIN_CRASHED: 'crashed', VIR_DOMAIN_PMSUSPENDED: 'suspended', } # NOTE(sileht): This is a guessing of the nova # status, should be true 99.9% on the time, # but can be wrong during some transition state # like shelving/rescuing LIBVIRT_STATUS = { VIR_DOMAIN_NOSTATE: 'building', VIR_DOMAIN_RUNNING: 'active', VIR_DOMAIN_BLOCKED: 'active', VIR_DOMAIN_PAUSED: 'paused', VIR_DOMAIN_SHUTDOWN: 'stopped', VIR_DOMAIN_SHUTOFF: 'stopped', VIR_DOMAIN_CRASHED: 'error', VIR_DOMAIN_PMSUSPENDED: 'suspended', } def new_libvirt_connection(conf): if not libvirt: raise ImportError("python-libvirt module is missing") uri = (conf.libvirt_uri or LIBVIRT_PER_TYPE_URIS.get(conf.libvirt_type, 'qemu:///system')) LOG.debug('Connecting to libvirt: %s', uri) return libvirt.openReadOnly(uri) def refresh_libvirt_connection(conf, klass): connection = getattr(klass, '_libvirt_connection', None) if not connection or not connection.isAlive(): connection = new_libvirt_connection(conf) setattr(klass, '_libvirt_connection', connection) return connection def is_disconnection_exception(e): if not libvirt: return False return (isinstance(e, libvirt.libvirtError) and e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_ERR_INTERNAL_ERROR) and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)) retry_on_disconnect = tenacity.retry( retry=tenacity.retry_if_exception(is_disconnection_exception), stop=tenacity.stop_after_attempt(2)) def raise_nodata_if_unsupported(method): def inner(in_self, instance, *args, **kwargs): try: return method(in_self, instance, *args, **kwargs) except libvirt.libvirtError as e: # NOTE(sileht): At this point libvirt connection error # have been reraise as tenacity.RetryError() msg = _('Failed to inspect instance %(instance_uuid)s stats, ' 'can not get info from libvirt: %(error)s') % { "instance_uuid": instance.id, "error": e} raise virt_inspector.NoDataException(msg) return inner ceilometer-10.0.0/ceilometer/compute/virt/libvirt/inspector.py0000777000175100017510000002111213236733243024577 0ustar zuulzuul00000000000000# # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for libvirt.""" from lxml import etree from oslo_log import log as logging from oslo_utils import units import six try: import libvirt except ImportError: libvirt = None from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.libvirt import utils as libvirt_utils from ceilometer.i18n import _ LOG = logging.getLogger(__name__) class LibvirtInspector(virt_inspector.Inspector): def __init__(self, conf): super(LibvirtInspector, self).__init__(conf) # NOTE(sileht): create a connection on startup self.connection @property def connection(self): return libvirt_utils.refresh_libvirt_connection(self.conf, self) def _lookup_by_uuid(self, instance): instance_name = util.instance_name(instance) try: return self.connection.lookupByUUIDString(instance.id) except libvirt.libvirtError as ex: if libvirt_utils.is_disconnection_exception(ex): raise msg = _("Error from libvirt while looking up instance " ": " "[Error Code %(error_code)s] " "%(ex)s") % {'name': instance_name, 'id': instance.id, 'error_code': ex.get_error_code(), 'ex': ex} raise virt_inspector.InstanceNotFoundException(msg) except Exception as ex: raise virt_inspector.InspectorException(six.text_type(ex)) def _get_domain_not_shut_off_or_raise(self, instance): instance_name = util.instance_name(instance) domain = self._lookup_by_uuid(instance) state = domain.info()[0] if state == libvirt.VIR_DOMAIN_SHUTOFF: msg = _('Failed to inspect data of instance ' ', ' 'domain state is SHUTOFF.') % { 'name': instance_name, 'id': instance.id} raise virt_inspector.InstanceShutOffException(msg) return domain @libvirt_utils.retry_on_disconnect def inspect_vnics(self, instance, duration): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) for iface in tree.findall('devices/interface'): target = iface.find('target') if target is not None: name = target.get('dev') else: continue mac = iface.find('mac') if mac is not None: mac_address = mac.get('address') else: continue fref = iface.find('filterref') if fref is not None: fref = fref.get('filter') params = dict((p.get('name').lower(), p.get('value')) for p in iface.findall('filterref/parameter')) dom_stats = domain.interfaceStats(name) yield virt_inspector.InterfaceStats(name=name, mac=mac_address, fref=fref, parameters=params, rx_bytes=dom_stats[0], rx_packets=dom_stats[1], rx_errors=dom_stats[2], rx_drop=dom_stats[3], tx_bytes=dom_stats[4], tx_packets=dom_stats[5], tx_errors=dom_stats[6], tx_drop=dom_stats[7]) @staticmethod def _get_disk_devices(domain): tree = etree.fromstring(domain.XMLDesc(0)) return filter(bool, [target.get("dev") for target in tree.findall('devices/disk/target') if target.getparent().find('source') is not None]) @libvirt_utils.retry_on_disconnect def inspect_disks(self, instance, duration): domain = self._get_domain_not_shut_off_or_raise(instance) for device in self._get_disk_devices(domain): block_stats = domain.blockStats(device) block_stats_flags = domain.blockStatsFlags(device, 0) yield virt_inspector.DiskStats( device=device, read_requests=block_stats[0], read_bytes=block_stats[1], write_requests=block_stats[2], write_bytes=block_stats[3], errors=block_stats[4], wr_total_times=block_stats_flags['wr_total_times'], rd_total_times=block_stats_flags['rd_total_times']) @libvirt_utils.retry_on_disconnect def inspect_disk_info(self, instance, duration): domain = self._get_domain_not_shut_off_or_raise(instance) for device in self._get_disk_devices(domain): block_info = domain.blockInfo(device) yield virt_inspector.DiskInfo(device=device, capacity=block_info[0], allocation=block_info[1], physical=block_info[2]) @libvirt_utils.raise_nodata_if_unsupported @libvirt_utils.retry_on_disconnect def inspect_instance(self, instance, duration=None): domain = self._get_domain_not_shut_off_or_raise(instance) memory_used = memory_resident = None memory_swap_in = memory_swap_out = None memory_stats = domain.memoryStats() # Stat provided from libvirt is in KB, converting it to MB. if 'available' in memory_stats and 'unused' in memory_stats: memory_used = (memory_stats['available'] - memory_stats['unused']) / units.Ki if 'rss' in memory_stats: memory_resident = memory_stats['rss'] / units.Ki if 'swap_in' in memory_stats and 'swap_out' in memory_stats: memory_swap_in = memory_stats['swap_in'] / units.Ki memory_swap_out = memory_stats['swap_out'] / units.Ki # TODO(sileht): stats also have the disk/vnic info # we could use that instead of the old method for Queen stats = self.connection.domainListGetStats([domain], 0)[0][1] cpu_time = 0 current_cpus = stats.get('vcpu.current') # Iterate over the maximum number of CPUs here, and count the # actual number encountered, since the vcpu.x structure can # have holes according to # https://libvirt.org/git/?p=libvirt.git;a=blob;f=src/libvirt-domain.c # virConnectGetAllDomainStats() for vcpu in six.moves.range(stats.get('vcpu.maximum', 0)): try: cpu_time += (stats.get('vcpu.%s.time' % vcpu) + stats.get('vcpu.%s.wait' % vcpu)) current_cpus -= 1 except TypeError: # pass here, if there are too many holes, the cpu count will # not match, so don't need special error handling. pass if current_cpus: # There wasn't enough data, so fall back cpu_time = stats.get('cpu.time') return virt_inspector.InstanceStats( cpu_number=stats.get('vcpu.current'), cpu_time=cpu_time, memory_usage=memory_used, memory_resident=memory_resident, memory_swap_in=memory_swap_in, memory_swap_out=memory_swap_out, cpu_cycles=stats.get("perf.cpu_cycles"), instructions=stats.get("perf.instructions"), cache_references=stats.get("perf.cache_references"), cache_misses=stats.get("perf.cache_misses"), memory_bandwidth_total=stats.get("perf.mbmt"), memory_bandwidth_local=stats.get("perf.mbml"), cpu_l3_cache_usage=stats.get("perf.cmt"), ) ceilometer-10.0.0/ceilometer/compute/virt/hyperv/0000775000175100017510000000000013236733440022056 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/virt/hyperv/__init__.py0000666000175100017510000000000013236733243024160 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/compute/virt/hyperv/inspector.py0000666000175100017510000001341713236733243024447 0ustar zuulzuul00000000000000# Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for Hyper-V""" import collections import functools import sys from os_win import exceptions as os_win_exc from os_win import utilsfactory from oslo_utils import units import six from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector def convert_exceptions(function, exception_map): expected_exceptions = tuple(exception_map.keys()) @functools.wraps(function) def wrapper(*args, **kwargs): try: return function(*args, **kwargs) except expected_exceptions as ex: # exception might be a subclass of an expected exception. for expected in expected_exceptions: if isinstance(ex, expected): raised_exception = exception_map[expected] break exc_info = sys.exc_info() # NOTE(claudiub): Python 3 raises the exception object given as # the second argument in six.reraise. # The original message will be maintained by passing the original # exception. exc = raised_exception(six.text_type(exc_info[1])) six.reraise(raised_exception, exc, exc_info[2]) return wrapper def decorate_all_methods(decorator, *args, **kwargs): def decorate(cls): for attr in cls.__dict__: class_member = getattr(cls, attr) if callable(class_member): setattr(cls, attr, decorator(class_member, *args, **kwargs)) return cls return decorate exception_conversion_map = collections.OrderedDict([ # NOTE(claudiub): order should be from the most specialized exception type # to the most generic exception type. # (expected_exception, converted_exception) (os_win_exc.NotFound, virt_inspector.InstanceNotFoundException), (os_win_exc.OSWinException, virt_inspector.InspectorException), ]) # NOTE(claudiub): the purpose of the decorator below is to prevent any # os_win exceptions (subclasses of OSWinException) to leak outside of the # HyperVInspector. @decorate_all_methods(convert_exceptions, exception_conversion_map) class HyperVInspector(virt_inspector.Inspector): def __init__(self, conf): super(HyperVInspector, self).__init__(conf) self._utils = utilsfactory.get_metricsutils() self._host_max_cpu_clock = self._compute_host_max_cpu_clock() def _compute_host_max_cpu_clock(self): hostutils = utilsfactory.get_hostutils() # host's number of CPUs and CPU clock speed will not change. cpu_info = hostutils.get_cpus_info() host_cpu_count = len(cpu_info) host_cpu_clock = cpu_info[0]['MaxClockSpeed'] return float(host_cpu_clock * host_cpu_count) def inspect_instance(self, instance, duration): instance_name = util.instance_name(instance) (cpu_clock_used, cpu_count, uptime) = self._utils.get_cpu_metrics(instance_name) cpu_percent_used = cpu_clock_used / self._host_max_cpu_clock # Nanoseconds cpu_time = (int(uptime * cpu_percent_used) * units.k) memory_usage = self._utils.get_memory_metrics(instance_name) return virt_inspector.InstanceStats( cpu_number=cpu_count, cpu_time=cpu_time, memory_usage=memory_usage) def inspect_vnics(self, instance, duration): instance_name = util.instance_name(instance) for vnic_metrics in self._utils.get_vnic_metrics(instance_name): yield virt_inspector.InterfaceStats( name=vnic_metrics["element_name"], mac=vnic_metrics["address"], fref=None, parameters=None, rx_bytes=vnic_metrics['rx_mb'] * units.Mi, rx_packets=0, rx_drop=0, rx_errors=0, tx_bytes=vnic_metrics['tx_mb'] * units.Mi, tx_packets=0, tx_drop=0, tx_errors=0) def inspect_disks(self, instance, duration): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_metrics(instance_name): yield virt_inspector.DiskStats( device=disk_metrics['instance_id'], read_requests=0, # Return bytes read_bytes=disk_metrics['read_mb'] * units.Mi, write_requests=0, write_bytes=disk_metrics['write_mb'] * units.Mi, errors=0, wr_total_times=0, rd_total_times=0) def inspect_disk_latency(self, instance, duration): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_latency_metrics( instance_name): yield virt_inspector.DiskLatencyStats( device=disk_metrics['instance_id'], disk_latency=disk_metrics['disk_latency'] / 1000) def inspect_disk_iops(self, instance, duration): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_iops_count(instance_name): yield virt_inspector.DiskIOPSStats( device=disk_metrics['instance_id'], iops_count=disk_metrics['iops_count']) ceilometer-10.0.0/ceilometer/compute/virt/inspector.py0000666000175100017510000002342513236733243023132 0ustar zuulzuul00000000000000# # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Inspector abstraction for read-only access to hypervisors.""" import collections from oslo_config import cfg from oslo_log import log from stevedore import driver import ceilometer OPTS = [ cfg.StrOpt('hypervisor_inspector', default='libvirt', help='Inspector to use for inspecting the hypervisor layer. ' 'Known inspectors are libvirt, hyperv, vsphere ' 'and xenapi.'), ] LOG = log.getLogger(__name__) # Named tuple representing instance statistics class InstanceStats(object): fields = [ 'cpu_number', # number: number of CPUs 'cpu_time', # time: cumulative CPU time 'cpu_util', # util: CPU utilization in percentage 'cpu_l3_cache_usage', # cachesize: Amount of CPU L3 cache used 'memory_usage', # usage: Amount of memory used 'memory_resident', # 'memory_swap_in', # memory swap in 'memory_swap_out', # memory swap out 'memory_bandwidth_total', # total: total system bandwidth from one # level of cache 'memory_bandwidth_local', # local: bandwidth of memory traffic for a # memory controller 'cpu_cycles', # cpu_cycles: the number of cpu cycles one # instruction needs 'instructions', # instructions: the count of instructions 'cache_references', # cache_references: the count of cache hits 'cache_misses', # cache_misses: the count of caches misses ] def __init__(self, **kwargs): for k in self.fields: setattr(self, k, kwargs.pop(k, None)) if kwargs: raise AttributeError( "'InstanceStats' object has no attributes '%s'" % kwargs) # Named tuple representing vNIC statistics. # # name: the name of the vNIC # mac: the MAC address # fref: the filter ref # parameters: miscellaneous parameters # rx_bytes: number of received bytes # rx_packets: number of received packets # tx_bytes: number of transmitted bytes # tx_packets: number of transmitted packets # InterfaceStats = collections.namedtuple('InterfaceStats', ['name', 'mac', 'fref', 'parameters', 'rx_bytes', 'tx_bytes', 'rx_packets', 'tx_packets', 'rx_drop', 'tx_drop', 'rx_errors', 'tx_errors']) # Named tuple representing vNIC rate statistics. # # name: the name of the vNIC # mac: the MAC address # fref: the filter ref # parameters: miscellaneous parameters # rx_bytes_rate: rate of received bytes # tx_bytes_rate: rate of transmitted bytes # InterfaceRateStats = collections.namedtuple('InterfaceRateStats', ['name', 'mac', 'fref', 'parameters', 'rx_bytes_rate', 'tx_bytes_rate']) # Named tuple representing disk statistics. # # read_bytes: number of bytes read # read_requests: number of read operations # write_bytes: number of bytes written # write_requests: number of write operations # errors: number of errors # DiskStats = collections.namedtuple('DiskStats', ['device', 'read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors', 'wr_total_times', 'rd_total_times']) # Named tuple representing disk rate statistics. # # read_bytes_rate: number of bytes read per second # read_requests_rate: number of read operations per second # write_bytes_rate: number of bytes written per second # write_requests_rate: number of write operations per second # DiskRateStats = collections.namedtuple('DiskRateStats', ['device', 'read_bytes_rate', 'read_requests_rate', 'write_bytes_rate', 'write_requests_rate']) # Named tuple representing disk latency statistics. # # disk_latency: average disk latency # DiskLatencyStats = collections.namedtuple('DiskLatencyStats', ['device', 'disk_latency']) # Named tuple representing disk iops statistics. # # iops: number of iops per second # DiskIOPSStats = collections.namedtuple('DiskIOPSStats', ['device', 'iops_count']) # Named tuple representing disk Information. # # capacity: capacity of the disk # allocation: allocation of the disk # physical: usage of the disk DiskInfo = collections.namedtuple('DiskInfo', ['device', 'capacity', 'allocation', 'physical']) # Exception types # class InspectorException(Exception): def __init__(self, message=None): super(InspectorException, self).__init__(message) class InstanceNotFoundException(InspectorException): pass class InstanceShutOffException(InspectorException): pass class NoDataException(InspectorException): pass # Main virt inspector abstraction layering over the hypervisor API. # class Inspector(object): def __init__(self, conf): self.conf = conf def inspect_instance(self, instance, duration): """Inspect the CPU statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: the instance stats """ raise ceilometer.NotImplementedError def inspect_vnics(self, instance, duration): """Inspect the vNIC statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each vNIC, the number of bytes & packets received and transmitted """ raise ceilometer.NotImplementedError def inspect_vnic_rates(self, instance, duration): """Inspect the vNIC rate statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each vNIC, the rate of bytes & packets received and transmitted """ raise ceilometer.NotImplementedError def inspect_disks(self, instance, duration): """Inspect the disk statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each disk, the number of bytes & operations read and written, and the error count """ raise ceilometer.NotImplementedError def inspect_disk_rates(self, instance, duration): """Inspect the disk statistics as rates for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each disk, the number of bytes & operations read and written per second, with the error count """ raise ceilometer.NotImplementedError def inspect_disk_latency(self, instance, duration): """Inspect the disk statistics as rates for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each disk, the average disk latency """ raise ceilometer.NotImplementedError def inspect_disk_iops(self, instance, duration): """Inspect the disk statistics as rates for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each disk, the number of iops per second """ raise ceilometer.NotImplementedError def inspect_disk_info(self, instance, duration): """Inspect the disk information for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each disk , capacity , allocation and usage """ raise ceilometer.NotImplementedError def get_hypervisor_inspector(conf): try: namespace = 'ceilometer.compute.virt' mgr = driver.DriverManager(namespace, conf.hypervisor_inspector, invoke_on_load=True, invoke_args=(conf, )) return mgr.driver except ImportError as e: LOG.error("Unable to load the hypervisor inspector: %s" % e) return Inspector(conf) ceilometer-10.0.0/ceilometer/compute/__init__.py0000666000175100017510000000000013236733243021657 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/notification.py0000666000175100017510000002603113236733243021146 0ustar zuulzuul00000000000000# # Copyright 2017 Red Hat, Inc. # Copyright 2012-2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import threading import time import uuid from concurrent import futures import cotyledon from futurist import periodics from oslo_config import cfg from oslo_log import log import oslo_messaging from stevedore import named from tooz import coordination from ceilometer.i18n import _ from ceilometer import messaging from ceilometer import utils LOG = log.getLogger(__name__) OPTS = [ cfg.IntOpt('pipeline_processing_queues', default=10, min=1, help='Number of queues to parallelize workload across. This ' 'value should be larger than the number of active ' 'notification agents for optimal results. WARNING: ' 'Once set, lowering this value may result in lost data.'), cfg.BoolOpt('ack_on_event_error', default=True, help='Acknowledge message when event persistence fails.'), cfg.BoolOpt('workload_partitioning', default=False, help='Enable workload partitioning, allowing multiple ' 'notification agents to be run simultaneously.'), cfg.MultiStrOpt('messaging_urls', default=[], secret=True, help="Messaging URLs to listen for notifications. " "Example: rabbit://user:pass@host1:port1" "[,user:pass@hostN:portN]/virtual_host " "(DEFAULT/transport_url is used if empty). This " "is useful when you have dedicate messaging nodes " "for each service, for example, all nova " "notifications go to rabbit-nova:5672, while all " "cinder notifications go to rabbit-cinder:5672."), cfg.IntOpt('batch_size', default=100, min=1, help='Number of notification messages to wait before ' 'publishing them. Batching is advised when transformations are ' 'applied in pipeline.'), cfg.IntOpt('batch_timeout', default=5, help='Number of seconds to wait before publishing samples ' 'when batch_size is not reached (None means indefinitely)'), cfg.IntOpt('workers', default=1, min=1, deprecated_group='DEFAULT', deprecated_name='notification_workers', help='Number of workers for notification service, ' 'default value is 1.'), cfg.MultiStrOpt('pipelines', default=['meter', 'event'], help="Select which pipeline managers to enable to " " generate data"), ] EXCHANGES_OPTS = [ cfg.MultiStrOpt('notification_control_exchanges', default=['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'sahara', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'], deprecated_group='DEFAULT', deprecated_name="http_control_exchanges", help="Exchanges name to listen for notifications."), ] class NotificationService(cotyledon.Service): """Notification service. When running multiple agents, additional queuing sequence is required for inter process communication. Each agent has two listeners: one to listen to the main OpenStack queue and another listener(and notifier) for IPC to divide pipeline sink endpoints. Coordination should be enabled to have proper active/active HA. """ NOTIFICATION_NAMESPACE = 'ceilometer.notification.v2' def __init__(self, worker_id, conf, coordination_id=None): super(NotificationService, self).__init__(worker_id) self.startup_delay = worker_id self.conf = conf self.periodic = None self.shutdown = False self.listeners = [] # NOTE(kbespalov): for the pipeline queues used a single amqp host # hence only one listener is required self.pipeline_listener = None if self.conf.notification.workload_partitioning: # XXX uuid4().bytes ought to work, but it requires ascii for now coordination_id = (coordination_id or str(uuid.uuid4()).encode('ascii')) self.partition_coordinator = coordination.get_coordinator( self.conf.coordination.backend_url, coordination_id) self.partition_set = list(range( self.conf.notification.pipeline_processing_queues)) self.group_state = None else: self.partition_coordinator = None def get_targets(self): """Return a sequence of oslo_messaging.Target This sequence is defining the exchange and topics to be connected. """ topics = (self.conf.notification_topics if 'notification_topics' in self.conf else self.conf.oslo_messaging_notifications.topics) return [oslo_messaging.Target(topic=topic, exchange=exchange) for topic in set(topics) for exchange in set(self.conf.notification.notification_control_exchanges)] def _log_missing_pipeline(self, names): LOG.error(_('Could not load the following pipelines: %s'), names) def run(self): # Delay startup so workers are jittered time.sleep(self.startup_delay) super(NotificationService, self).run() self.coord_lock = threading.Lock() self.managers = [ext.obj for ext in named.NamedExtensionManager( namespace='ceilometer.notification.pipeline', names=self.conf.notification.pipelines, invoke_on_load=True, on_missing_entrypoints_callback=self._log_missing_pipeline, invoke_args=(self.conf, self.conf.notification.workload_partitioning))] self.transport = messaging.get_transport(self.conf) if self.conf.notification.workload_partitioning: self.partition_coordinator.start(start_heart=True) else: # FIXME(sileht): endpoint uses the notification_topics option # and it should not because this is an oslo_messaging option # not a ceilometer. Until we have something to get the # notification_topics in another way, we must create a transport # to ensure the option has been registered by oslo_messaging. messaging.get_notifier(self.transport, '') self._configure_main_queue_listeners() if self.conf.notification.workload_partitioning: # join group after all manager set up is configured self.hashring = self.partition_coordinator.join_partitioned_group( self.NOTIFICATION_NAMESPACE) @periodics.periodic(spacing=self.conf.coordination.check_watchers, run_immediately=True) def run_watchers(): self.partition_coordinator.run_watchers() if self.group_state != self.hashring.ring.nodes: self.group_state = self.hashring.ring.nodes.copy() self._refresh_agent() self.periodic = periodics.PeriodicWorker.create( [], executor_factory=lambda: futures.ThreadPoolExecutor(max_workers=10)) self.periodic.add(run_watchers) utils.spawn_thread(self.periodic.start) def _configure_main_queue_listeners(self): endpoints = [] for pipe_mgr in self.managers: endpoints.extend(pipe_mgr.get_main_endpoints()) targets = self.get_targets() urls = self.conf.notification.messaging_urls or [None] for url in urls: transport = messaging.get_transport(self.conf, url) # NOTE(gordc): ignore batching as we want pull # to maintain sequencing as much as possible. listener = messaging.get_batch_notification_listener( transport, targets, endpoints) listener.start( override_pool_size=self.conf.max_parallel_requests ) self.listeners.append(listener) def _refresh_agent(self): with self.coord_lock: if self.shutdown: # NOTE(sileht): We are going to shutdown we everything will be # stopped, we should not restart them return self._configure_pipeline_listener() def _configure_pipeline_listener(self): partitioned = list(filter( self.hashring.belongs_to_self, self.partition_set)) endpoints = [] for pipe_mgr in self.managers: endpoints.extend(pipe_mgr.get_interim_endpoints()) targets = [] for mgr, hash_id in itertools.product(self.managers, partitioned): topic = '-'.join([mgr.NOTIFICATION_IPC, mgr.pm_type, str(hash_id)]) LOG.debug('Listening to queue: %s', topic) targets.append(oslo_messaging.Target(topic=topic)) if self.pipeline_listener: self.kill_listeners([self.pipeline_listener]) self.pipeline_listener = messaging.get_batch_notification_listener( self.transport, targets, endpoints, batch_size=self.conf.notification.batch_size, batch_timeout=self.conf.notification.batch_timeout) # NOTE(gordc): set single thread to process data sequentially # if batching enabled. batch = (1 if self.conf.notification.batch_size > 1 else self.conf.max_parallel_requests) self.pipeline_listener.start(override_pool_size=batch) @staticmethod def kill_listeners(listeners): # NOTE(gordc): correct usage of oslo.messaging listener is to stop(), # which stops new messages, and wait(), which processes remaining # messages and closes connection for listener in listeners: listener.stop() listener.wait() def terminate(self): self.shutdown = True if self.periodic: self.periodic.stop() self.periodic.wait() if self.partition_coordinator: self.partition_coordinator.stop() with self.coord_lock: if self.pipeline_listener: self.kill_listeners([self.pipeline_listener]) self.kill_listeners(self.listeners) super(NotificationService, self).terminate() ceilometer-10.0.0/ceilometer/transformer/0000775000175100017510000000000013236733440020443 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/transformer/conversions.py0000666000175100017510000003211313236733243023370 0ustar zuulzuul00000000000000# # Copyright 2013 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import re from oslo_log import log from oslo_utils import timeutils import six from ceilometer.i18n import _ from ceilometer import sample from ceilometer import transformer LOG = log.getLogger(__name__) class BaseConversionTransformer(transformer.TransformerBase): """Transformer to derive conversion.""" grouping_keys = ['resource_id'] def __init__(self, source=None, target=None, **kwargs): """Initialize transformer with configured parameters. :param source: dict containing source sample unit :param target: dict containing target sample name, type, unit and scaling factor (a missing value connotes no change) """ self.source = source or {} self.target = target or {} super(BaseConversionTransformer, self).__init__(**kwargs) def _map(self, s, attr): """Apply the name or unit mapping if configured.""" mapped = None from_ = self.source.get('map_from') to_ = self.target.get('map_to') if from_ and to_: if from_.get(attr) and to_.get(attr): try: mapped = re.sub(from_[attr], to_[attr], getattr(s, attr)) except Exception: pass return mapped or self.target.get(attr, getattr(s, attr)) class DeltaTransformer(BaseConversionTransformer): """Transformer based on the delta of a sample volume.""" def __init__(self, target=None, growth_only=False, **kwargs): """Initialize transformer with configured parameters. :param growth_only: capture only positive deltas """ super(DeltaTransformer, self).__init__(target=target, **kwargs) self.growth_only = growth_only self.cache = {} def handle_sample(self, s): """Handle a sample, converting if necessary.""" key = s.name + s.resource_id prev = self.cache.get(key) timestamp = timeutils.parse_isotime(s.timestamp) self.cache[key] = (s.volume, timestamp) if prev: prev_volume = prev[0] prev_timestamp = prev[1] time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) # disallow violations of the arrow of time if time_delta < 0: LOG.warning('Dropping out of time order sample: %s', (s,)) # Reset the cache to the newer sample. self.cache[key] = prev return None volume_delta = s.volume - prev_volume if self.growth_only and volume_delta < 0: LOG.warning('Negative delta detected, dropping value') s = None else: s = self._convert(s, volume_delta) LOG.debug('Converted to: %s', s) else: LOG.warning('Dropping sample with no predecessor: %s', (s,)) s = None return s def _convert(self, s, delta): """Transform the appropriate sample fields.""" return sample.Sample( name=self._map(s, 'name'), unit=s.unit, type=sample.TYPE_DELTA, volume=delta, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp, resource_metadata=s.resource_metadata ) class ScalingTransformer(BaseConversionTransformer): """Transformer to apply a scaling conversion.""" def __init__(self, source=None, target=None, **kwargs): """Initialize transformer with configured parameters. :param source: dict containing source sample unit :param target: dict containing target sample name, type, unit and scaling factor (a missing value connotes no change) """ super(ScalingTransformer, self).__init__(source=source, target=target, **kwargs) self.scale = self.target.get('scale') self.max = self.target.get('max') LOG.debug('scaling conversion transformer with source:' ' %(source)s target: %(target)s:', {'source': self.source, 'target': self.target}) def _scale(self, s): """Apply the scaling factor. Either a straight multiplicative factor or else a string to be eval'd. """ ns = transformer.Namespace(s.as_dict()) scale = self.scale return ((eval(scale, {}, ns) if isinstance(scale, six.string_types) else s.volume * scale) if scale else s.volume) def _convert(self, s, growth=1): """Transform the appropriate sample fields.""" volume = self._scale(s) * growth return sample.Sample( name=self._map(s, 'name'), unit=self._map(s, 'unit'), type=self.target.get('type', s.type), volume=min(volume, self.max) if self.max else volume, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp, resource_metadata=s.resource_metadata ) def handle_sample(self, s): """Handle a sample, converting if necessary.""" LOG.debug('handling sample %s', s) if self.source.get('unit', s.unit) == s.unit: s = self._convert(s) LOG.debug('converted to: %s', s) return s class RateOfChangeTransformer(ScalingTransformer): """Transformer based on the rate of change of a sample volume. For example, taking the current and previous volumes of a cumulative sample and producing a gauge value based on the proportion of some maximum used. """ def __init__(self, **kwargs): """Initialize transformer with configured parameters.""" super(RateOfChangeTransformer, self).__init__(**kwargs) self.cache = {} self.scale = self.scale or '1' def handle_sample(self, s): """Handle a sample, converting if necessary.""" LOG.debug('handling sample %s', s) key = s.name + s.resource_id prev = self.cache.get(key) timestamp = timeutils.parse_isotime(s.timestamp) self.cache[key] = (s.volume, timestamp, s.monotonic_time) if prev: prev_volume = prev[0] prev_timestamp = prev[1] prev_monotonic_time = prev[2] if (prev_monotonic_time is not None and s.monotonic_time is not None): # NOTE(sileht): Prefer high precision timer time_delta = s.monotonic_time - prev_monotonic_time else: time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) # disallow violations of the arrow of time if time_delta < 0: LOG.warning(_('dropping out of time order sample: %s'), (s,)) # Reset the cache to the newer sample. self.cache[key] = prev return None # we only allow negative volume deltas for noncumulative # samples, whereas for cumulative we assume that a reset has # occurred in the interim so that the current volume gives a # lower bound on growth volume_delta = (s.volume - prev_volume if (prev_volume <= s.volume or s.type != sample.TYPE_CUMULATIVE) else s.volume) rate_of_change = ((1.0 * volume_delta / time_delta) if time_delta else 0.0) s = self._convert(s, rate_of_change) LOG.debug('converted to: %s', s) else: LOG.warning(_('dropping sample with no predecessor: %s'), (s,)) s = None return s class AggregatorTransformer(ScalingTransformer): """Transformer that aggregates samples. Aggregation goes until a threshold or/and a retention_time, and then flushes them out into the wild. Example: To aggregate sample by resource_metadata and keep the resource_metadata of the latest received sample; AggregatorTransformer(retention_time=60, resource_metadata='last') To aggregate sample by user_id and resource_metadata and keep the user_id of the first received sample and drop the resource_metadata. AggregatorTransformer(size=15, user_id='first', resource_metadata='drop') To keep the timestamp of the last received sample rather than the first: AggregatorTransformer(timestamp="last") """ def __init__(self, size=1, retention_time=None, project_id=None, user_id=None, resource_metadata="last", timestamp="first", **kwargs): super(AggregatorTransformer, self).__init__(**kwargs) self.samples = {} self.counts = collections.defaultdict(int) self.size = int(size) if size else None self.retention_time = float(retention_time) if retention_time else None if not (self.size or self.retention_time): self.size = 1 if timestamp in ["first", "last"]: self.timestamp = timestamp else: self.timestamp = "first" self.initial_timestamp = None self.aggregated_samples = 0 self.key_attributes = [] self.merged_attribute_policy = {} self._init_attribute('project_id', project_id) self._init_attribute('user_id', user_id) self._init_attribute('resource_metadata', resource_metadata, is_droppable=True, mandatory=True) def _init_attribute(self, name, value, is_droppable=False, mandatory=False): drop = ['drop'] if is_droppable else [] if value or mandatory: if value not in ['last', 'first'] + drop: LOG.warning('%s is unknown (%s), using last' % (name, value)) value = 'last' self.merged_attribute_policy[name] = value else: self.key_attributes.append(name) def _get_unique_key(self, s): # NOTE(arezmerita): in samples generated by ceilometer middleware, # when accessing without authentication publicly readable/writable # swift containers, the project_id and the user_id are missing. # They will be replaced by for unique key construction. keys = ['' if getattr(s, f) is None else getattr(s, f) for f in self.key_attributes] non_aggregated_keys = "-".join(keys) # NOTE(sileht): it assumes, a meter always have the same unit/type return "%s-%s-%s" % (s.name, s.resource_id, non_aggregated_keys) def handle_sample(self, sample_): if not self.initial_timestamp: self.initial_timestamp = timeutils.parse_isotime(sample_.timestamp) self.aggregated_samples += 1 key = self._get_unique_key(sample_) self.counts[key] += 1 if key not in self.samples: self.samples[key] = self._convert(sample_) if self.merged_attribute_policy[ 'resource_metadata'] == 'drop': self.samples[key].resource_metadata = {} else: if self.timestamp == "last": self.samples[key].timestamp = sample_.timestamp if sample_.type == sample.TYPE_CUMULATIVE: self.samples[key].volume = self._scale(sample_) else: self.samples[key].volume += self._scale(sample_) for field in self.merged_attribute_policy: if self.merged_attribute_policy[field] == 'last': setattr(self.samples[key], field, getattr(sample_, field)) def flush(self): if not self.initial_timestamp: return [] expired = (self.retention_time and timeutils.is_older_than(self.initial_timestamp, self.retention_time)) full = self.size and self.aggregated_samples >= self.size if full or expired: x = list(self.samples.values()) # gauge aggregates need to be averages for s in x: if s.type == sample.TYPE_GAUGE: key = self._get_unique_key(s) s.volume /= self.counts[key] self.samples.clear() self.counts.clear() self.aggregated_samples = 0 self.initial_timestamp = None return x return [] ceilometer-10.0.0/ceilometer/transformer/__init__.py0000666000175100017510000000446713236733243022572 0ustar zuulzuul00000000000000# # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import six @six.add_metaclass(abc.ABCMeta) class TransformerBase(object): """Base class for plugins that transform the sample.""" def __init__(self, **kwargs): """Setup transformer. Each time a transformed is involved in a pipeline, a new transformer instance is created and chained into the pipeline. i.e. transformer instance is per pipeline. This helps if transformer need keep some cache and per-pipeline information. :param kwargs: The parameters that are defined in pipeline config file. """ super(TransformerBase, self).__init__() @abc.abstractmethod def handle_sample(self, sample): """Transform a sample. :param sample: A sample. """ @abc.abstractproperty def grouping_keys(self): """Keys used to group transformer.""" @staticmethod def flush(): """Flush samples cached previously.""" return [] class Namespace(object): """Encapsulates the namespace. Encapsulation is done by wrapping the evaluation of the configured rule. This allows nested dicts to be accessed in the attribute style, and missing attributes to yield false when used in a boolean expression. """ def __init__(self, seed): self.__dict__ = collections.defaultdict(lambda: Namespace({})) self.__dict__.update(seed) for k, v in six.iteritems(self.__dict__): if isinstance(v, dict): self.__dict__[k] = Namespace(v) def __getattr__(self, attr): return self.__dict__[attr] def __getitem__(self, key): return self.__dict__[key] def __nonzero__(self): return len(self.__dict__) > 0 __bool__ = __nonzero__ ceilometer-10.0.0/ceilometer/transformer/accumulator.py0000666000175100017510000000245213236733243023342 0ustar zuulzuul00000000000000# # Copyright 2013 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer import transformer class TransformerAccumulator(transformer.TransformerBase): """Transformer that accumulates samples until a threshold. And then flushes them out into the wild. """ grouping_keys = ['resource_id'] def __init__(self, size=1, **kwargs): if size >= 1: self.samples = [] self.size = size super(TransformerAccumulator, self).__init__(**kwargs) def handle_sample(self, sample): if self.size >= 1: self.samples.append(sample) else: return sample def flush(self): if len(self.samples) >= self.size: x = self.samples self.samples = [] return x return [] ceilometer-10.0.0/ceilometer/transformer/arithmetic.py0000666000175100017510000001405613236733243023157 0ustar zuulzuul00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import keyword import math import re from oslo_log import log import six from ceilometer.i18n import _ from ceilometer import sample from ceilometer import transformer LOG = log.getLogger(__name__) class ArithmeticTransformer(transformer.TransformerBase): """Multi meter arithmetic transformer. Transformer that performs arithmetic operations over one or more meters and/or their metadata. """ grouping_keys = ['resource_id'] meter_name_re = re.compile(r'\$\(([\w\.\-]+)\)') def __init__(self, target=None, **kwargs): super(ArithmeticTransformer, self).__init__(**kwargs) target = target or {} self.target = target self.expr = target.get('expr', '') self.expr_escaped, self.escaped_names = self.parse_expr(self.expr) self.required_meters = list(self.escaped_names.values()) self.misconfigured = len(self.required_meters) == 0 if not self.misconfigured: self.reference_meter = self.required_meters[0] # convert to set for more efficient contains operation self.required_meters = set(self.required_meters) self.cache = collections.defaultdict(dict) self.latest_timestamp = None else: LOG.warning(_('Arithmetic transformer must use at least one' ' meter in expression \'%s\''), self.expr) def _update_cache(self, _sample): """Update the cache with the latest sample.""" escaped_name = self.escaped_names.get(_sample.name, '') if escaped_name not in self.required_meters: return self.cache[_sample.resource_id][escaped_name] = _sample def _check_requirements(self, resource_id): """Check if all the required meters are available in the cache.""" return len(self.cache[resource_id]) == len(self.required_meters) def _calculate(self, resource_id): """Evaluate the expression and return a new sample if successful.""" ns_dict = dict((m, s.as_dict()) for m, s in six.iteritems(self.cache[resource_id])) ns = transformer.Namespace(ns_dict) try: new_volume = eval(self.expr_escaped, {}, ns) if math.isnan(new_volume): raise ArithmeticError(_('Expression evaluated to ' 'a NaN value!')) reference_sample = self.cache[resource_id][self.reference_meter] return sample.Sample( name=self.target.get('name', reference_sample.name), unit=self.target.get('unit', reference_sample.unit), type=self.target.get('type', reference_sample.type), volume=float(new_volume), user_id=reference_sample.user_id, project_id=reference_sample.project_id, resource_id=reference_sample.resource_id, timestamp=self.latest_timestamp, resource_metadata=reference_sample.resource_metadata ) except Exception as e: LOG.warning(_('Unable to evaluate expression %(expr)s: %(exc)s'), {'expr': self.expr, 'exc': e}) def handle_sample(self, _sample): self._update_cache(_sample) self.latest_timestamp = _sample.timestamp def flush(self): new_samples = [] if not self.misconfigured: # When loop self.cache, the dict could not be change by others. # If changed, will raise "RuntimeError: dictionary changed size # during iteration". so we make a tmp copy and just loop it. tmp_cache = copy.copy(self.cache) for resource_id in tmp_cache: if self._check_requirements(resource_id): new_samples.append(self._calculate(resource_id)) if resource_id in self.cache: self.cache.pop(resource_id) return new_samples @classmethod def parse_expr(cls, expr): """Transforms meter names in the expression into valid identifiers. :param expr: unescaped expression :return: A tuple of the escaped expression and a dict representing the translation of meter names into Python identifiers """ class Replacer(object): """Replaces matched meter names with escaped names. If the meter name is not followed by parameter access in the expression, it defaults to accessing the 'volume' parameter. """ def __init__(self, original_expr): self.original_expr = original_expr self.escaped_map = {} def __call__(self, match): meter_name = match.group(1) escaped_name = self.escape(meter_name) self.escaped_map[meter_name] = escaped_name if (match.end(0) == len(self.original_expr) or self.original_expr[match.end(0)] != '.'): escaped_name += '.volume' return escaped_name @staticmethod def escape(name): has_dot = '.' in name if has_dot: name = name.replace('.', '_') if has_dot or name.endswith('ESC') or name in keyword.kwlist: name = "_" + name + '_ESC' return name replacer = Replacer(expr) expr = re.sub(cls.meter_name_re, replacer, expr) return expr, replacer.escaped_map ceilometer-10.0.0/ceilometer/hardware/0000775000175100017510000000000013236733440017676 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/hardware/pollsters/0000775000175100017510000000000013236733440021725 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/hardware/pollsters/data/0000775000175100017510000000000013236733440022636 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/hardware/pollsters/data/snmp.yaml0000666000175100017510000001443013236733243024504 0ustar zuulzuul00000000000000--- # see http://www.circitor.fr/Mibs/Html/U/UCD-SNMP-MIB.php for reference. # http://www.circitor.fr/Mibs/Html/U/UCD-DISKIO-MIB.php for disk metrics metric: # cpu - name: hardware.cpu.load.1min unit: process type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.10.1.3.1" type: "lambda x: float(str(x))" - name: hardware.cpu.load.5min unit: process type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.10.1.3.2" type: "lambda x: float(str(x))" - name: hardware.cpu.load.15min unit: process type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.10.1.3.3" type: "lambda x: float(str(x))" - name: hardware.cpu.util unit: "%" type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.9.0" type: "int" # disk - name: hardware.disk.size.total unit: KB type: gauge snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.4.1.2021.9.1.6" type: "int" metadata: &disk_metadata path: oid: "1.3.6.1.4.1.2021.9.1.2" type: "str" device: oid: "1.3.6.1.4.1.2021.9.1.3" type: "str" post_op: "_post_op_disk" - name: hardware.disk.size.used unit: KB type: gauge snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.4.1.2021.9.1.8" type: "int" metadata: *disk_metadata post_op: "_post_op_disk" - name: hardware.disk.read.bytes unit: B type: gauge snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.4.1.2021.13.15.1.1.3" type: "int" metadata: &diskio_metadata device: oid: "1.3.6.1.4.1.2021.13.15.1.1.2" post_op: "_post_op_disk" - name: hardware.disk.write.bytes unit: B type: gauge snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.4.1.2021.13.15.1.1.4" type: "int" <<: *diskio_metadata post_op: "_post_op_disk" - name: hardware.disk.read.requests unit: requests type: gauge snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.4.1.2021.13.15.1.1.5" type: "int" <<: *diskio_metadata post_op: "_post_op_disk" - name: hardware.disk.write.requests unit: requests type: gauge snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.4.1.2021.13.15.1.1.6" type: "int" <<: *diskio_metadata post_op: "_post_op_disk" # memory - name: hardware.memory.total unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.5.0" type: "int" - name: hardware.memory.used unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.6.0" type: "int" post_op: "_post_op_memory_avail_to_used" - name: hardware.memory.swap.total unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.3.0" type: "int" - name: hardware.memory.swap.avail unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.4.0" type: "int" - name: hardware.memory.buffer unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.14.0" type: "int" - name: hardware.memory.cached unit: KB type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.4.15.0" type: "int" # network interface - name: hardware.network.incoming.bytes unit: B type: cumulative snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.2.1.2.2.1.10" type: "int" metadata: &net_metadata name: oid: "1.3.6.1.2.1.2.2.1.2" type: "str" speed: oid: "1.3.6.1.2.1.2.2.1.5" type: "lambda x: int(x) / 8" mac: oid: "1.3.6.1.2.1.2.2.1.6" type: "lambda x: x.prettyPrint().replace('0x', '')" post_op: "_post_op_net" - name: hardware.network.outgoing.bytes unit: B type: cumulative snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.2.1.2.2.1.16" type: "int" metadata: *net_metadata post_op: "_post_op_net" - name: hardware.network.outgoing.errors unit: packet type: cumulative snmp_inspector: matching_type: "type_prefix" oid: "1.3.6.1.2.1.2.2.1.20" type: "int" metadata: *net_metadata post_op: "_post_op_net" #network aggregate - name: hardware.network.ip.outgoing.datagrams unit: datagrams type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.2.1.4.10.0" type: "int" - name: hardware.network.ip.incoming.datagrams unit: datagrams type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.2.1.4.3.0" type: "int" #system stats - name: hardware.system_stats.cpu.idle unit: "%" type: gauge snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.11.0" type: "int" - name: hardware.system_stats.io.outgoing.blocks unit: blocks type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.57.0" type: "int" - name: hardware.system_stats.io.incoming.blocks unit: blocks type: cumulative snmp_inspector: matching_type: "type_exact" oid: "1.3.6.1.4.1.2021.11.58.0" type: "int" ceilometer-10.0.0/ceilometer/hardware/pollsters/util.py0000666000175100017510000000363413236733243023265 0ustar zuulzuul00000000000000# # Copyright 2013 ZHAW SoE # Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from six.moves.urllib import parse as urlparse from ceilometer import sample def get_metadata_from_host(host_url): return {'resource_url': urlparse.urlunsplit(host_url)} def make_resource_metadata(res_metadata=None, host_url=None): resource_metadata = dict() if res_metadata is not None: metadata = copy.copy(res_metadata) resource_metadata.update(metadata) resource_metadata.update(get_metadata_from_host(host_url)) return resource_metadata def make_sample_from_host(host_url, name, sample_type, unit, volume, project_id=None, user_id=None, resource_id=None, res_metadata=None, extra=None, name_prefix='hardware'): extra = extra or {} resource_metadata = make_resource_metadata(res_metadata, host_url) resource_metadata.update(extra) res_id = resource_id or extra.get('resource_id') or host_url.hostname if name_prefix: name = name_prefix + '.' + name return sample.Sample( name=name, type=sample_type, unit=unit, volume=volume, user_id=user_id or extra.get('user_id'), project_id=project_id or extra.get('project_id'), resource_id=res_id, resource_metadata=resource_metadata, source='hardware', ) ceilometer-10.0.0/ceilometer/hardware/pollsters/__init__.py0000666000175100017510000000000013236733243024027 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/hardware/pollsters/generic.py0000666000175100017510000002053413236733243023722 0ustar zuulzuul00000000000000# # Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import pkg_resources from oslo_config import cfg from oslo_log import log from oslo_utils import netutils import six from ceilometer import declarative from ceilometer.hardware import inspector as insloader from ceilometer.hardware.pollsters import util from ceilometer.i18n import _ from ceilometer.polling import plugin_base from ceilometer import sample OPTS = [ cfg.StrOpt('meter_definitions_file', default="snmp.yaml", help="Configuration file for defining hardware snmp meters." ), ] LOG = log.getLogger(__name__) class MeterDefinition(object): required_fields = ['name', 'unit', 'type'] def __init__(self, definition_cfg): self.cfg = definition_cfg for fname, fval in self.cfg.items(): if (isinstance(fname, six.string_types) and (fname in self.required_fields or fname.endswith('_inspector'))): setattr(self, fname, fval) else: LOG.warning("Ignore unrecognized field %s", fname) for fname in self.required_fields: if not getattr(self, fname, None): raise declarative.MeterDefinitionException( _("Missing field %s") % fname, self.cfg) if self.type not in sample.TYPES: raise declarative.MeterDefinitionException( _("Unrecognized type value %s") % self.type, self.cfg) class GenericHardwareDeclarativePollster(plugin_base.PollsterBase): CACHE_KEY = 'hardware.generic' mapping = None def __init__(self, conf): super(GenericHardwareDeclarativePollster, self).__init__(conf) self.inspectors = {} def _update_meter_definition(self, definition): self.meter_definition = definition self.cached_inspector_params = {} @property def default_discovery(self): return 'tripleo_overcloud_nodes' @staticmethod def _parse_resource(res): """Parse resource from discovery. Either URL can be given or dict. Dict has to contain at least keys 'resource_id' and 'resource_url', all the dict keys will be stored as metadata. :param res: URL or dict containing all resource info. :return: parsed_url, resource_id, metadata Returns parsed URL used for SNMP query, unique identifier of the resource and metadata of the resource. """ parsed_url, resource_id, metadata = (None, None, None) if isinstance(res, dict): if 'resource_url' not in res or 'resource_id' not in res: LOG.error('Passed resource dict must contain keys ' 'resource_id and resource_url.') else: metadata = res parsed_url = netutils.urlsplit(res['resource_url']) resource_id = res['resource_id'] else: metadata = {} parsed_url = netutils.urlsplit(res) resource_id = res return parsed_url, resource_id, metadata def _get_inspector(self, parsed_url): if parsed_url.scheme not in self.inspectors: try: driver = insloader.get_inspector(parsed_url) self.inspectors[parsed_url.scheme] = driver except Exception as err: LOG.exception("Cannot load inspector %(name)s: %(err)s", dict(name=parsed_url.scheme, err=err)) raise return self.inspectors[parsed_url.scheme] def get_samples(self, manager, cache, resources=None): """Return an iterable of Sample instances from polling the resources. :param manager: The service manager invoking the plugin :param cache: A dictionary for passing data between plugins :param resources: end point to poll data from """ resources = resources or [] h_cache = cache.setdefault(self.CACHE_KEY, {}) sample_iters = [] # Get the meter identifiers to poll identifier = self.meter_definition.name for resource in resources: parsed_url, res, extra_metadata = self._parse_resource(resource) if parsed_url is None: LOG.error("Skip invalid resource %s", resource) continue ins = self._get_inspector(parsed_url) try: # Call hardware inspector to poll for the data i_cache = h_cache.setdefault(res, {}) # Prepare inspector parameters and cache it for performance param_key = parsed_url.scheme + '.' + identifier inspector_param = self.cached_inspector_params.get(param_key) if not inspector_param: param = getattr(self.meter_definition, parsed_url.scheme + '_inspector', {}) inspector_param = ins.prepare_params(param) self.cached_inspector_params[param_key] = inspector_param if identifier not in i_cache: i_cache[identifier] = list(ins.inspect_generic( host=parsed_url, cache=i_cache, extra_metadata=extra_metadata, param=inspector_param)) # Generate samples if i_cache[identifier]: sample_iters.append(self.generate_samples( parsed_url, i_cache[identifier])) except Exception as err: msg = ('inspector call failed for %(ident)s ' 'host %(host)s: %(err)s' % dict(ident=identifier, host=parsed_url.hostname, err=err)) if "timeout" in str(err): LOG.warning(msg) else: LOG.exception(msg) return itertools.chain(*sample_iters) def generate_samples(self, host_url, data): """Generate a list of Sample from the data returned by inspector :param host_url: host url of the endpoint :param data: list of data returned by the corresponding inspector """ samples = [] definition = self.meter_definition for (value, metadata, extra) in data: s = util.make_sample_from_host(host_url, name=definition.name, sample_type=definition.type, unit=definition.unit, volume=value, res_metadata=metadata, extra=extra, name_prefix=None) samples.append(s) return samples @classmethod def build_pollsters(cls, conf): if not cls.mapping: definition_cfg = declarative.load_definitions( conf, {}, conf.hardware.meter_definitions_file, pkg_resources.resource_filename(__name__, "data/snmp.yaml")) cls.mapping = load_definition(definition_cfg) pollsters = [] for name in cls.mapping: pollster = cls(conf) pollster._update_meter_definition(cls.mapping[name]) pollsters.append((name, pollster)) return pollsters def load_definition(config_def): mappings = {} for meter_def in config_def.get('metric', []): try: meter = MeterDefinition(meter_def) mappings[meter.name] = meter except declarative.DefinitionException as e: errmsg = "Error loading meter definition: %s" LOG.error(errmsg, e.brief_message) return mappings ceilometer-10.0.0/ceilometer/hardware/inspector/0000775000175100017510000000000013236733440021704 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/hardware/inspector/snmp.py0000666000175100017510000003233513236733243023244 0ustar zuulzuul00000000000000# # Copyright 2014 ZHAW SoE # Copyright 2014 Intel Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Inspector for collecting data over SNMP""" import copy from oslo_log import log from pysnmp.entity.rfc3413.oneliner import cmdgen from pysnmp.proto import rfc1905 import six import six.moves.urllib.parse as urlparse from ceilometer.hardware.inspector import base LOG = log.getLogger(__name__) class SNMPException(Exception): pass def parse_snmp_return(ret, is_bulk=False): """Check the return value of snmp operations :param ret: a tuple of (errorIndication, errorStatus, errorIndex, data) returned by pysnmp :param is_bulk: True if the ret value is from GetBulkRequest :return: a tuple of (err, data) err: True if error found, or False if no error found data: a string of error description if error found, or the actual return data of the snmp operation """ err = True (errIndication, errStatus, errIdx, varBinds) = ret if errIndication: data = errIndication elif errStatus: if is_bulk: varBinds = varBinds[-1] data = "%s at %s" % (errStatus.prettyPrint(), errIdx and varBinds[int(errIdx) - 1] or "?") else: err = False data = varBinds return err, data EXACT = 'type_exact' PREFIX = 'type_prefix' _auth_proto_mapping = { 'md5': cmdgen.usmHMACMD5AuthProtocol, 'sha': cmdgen.usmHMACSHAAuthProtocol, } _priv_proto_mapping = { 'des': cmdgen.usmDESPrivProtocol, 'aes128': cmdgen.usmAesCfb128Protocol, '3des': cmdgen.usm3DESEDEPrivProtocol, 'aes192': cmdgen.usmAesCfb192Protocol, 'aes256': cmdgen.usmAesCfb256Protocol, } _usm_proto_mapping = { 'auth_proto': ('authProtocol', _auth_proto_mapping), 'priv_proto': ('privProtocol', _priv_proto_mapping), } class SNMPInspector(base.Inspector): # Default port _port = 161 _CACHE_KEY_OID = "snmp_cached_oid" # NOTE: The following mapping has been moved to the yaml file identified # by the config options hardware.meter_definitions_file. However, we still # keep the description here for code reading purpose. """ The following mapping define how to construct (value, metadata, extra) returned by inspect_generic MAPPING = { 'identifier: { 'matching_type': EXACT or PREFIX, 'metric_oid': (oid, value_converter) 'metadata': { metadata_name1: (oid1, value_converter), metadata_name2: (oid2, value_converter), }, 'post_op': special func to modify the return data, }, } For matching_type of EXACT, each item in the above mapping will return exact one (value, metadata, extra) tuple. The value would be returned from SNMP request GetRequest for oid of 'metric_oid', the metadata dict would be constructed based on the returning from SNMP GetRequest for oids of 'metadata'. For matching_type of PREFIX, SNMP request GetBulkRequest would be sent to get values for oids of 'metric_oid' and 'metadata' of each item in the above mapping. And each item might return multiple (value, metadata, extra) tuples, e.g. Suppose we have the following mapping: MAPPING = { 'disk.size.total': { 'matching_type': PREFIX, 'metric_oid': ("1.3.6.1.4.1.2021.9.1.6", int) 'metadata': { 'device': ("1.3.6.1.4.1.2021.9.1.3", str), 'path': ("1.3.6.1.4.1.2021.9.1.2", str), }, 'post_op': None, }, and the SNMP have the following oid/value(s): { '1.3.6.1.4.1.2021.9.1.6.1': 19222656, '1.3.6.1.4.1.2021.9.1.3.1': "/dev/sda2", '1.3.6.1.4.1.2021.9.1.2.1': "/" '1.3.6.1.4.1.2021.9.1.6.2': 808112, '1.3.6.1.4.1.2021.9.1.3.2': "tmpfs", '1.3.6.1.4.1.2021.9.1.2.2': "/run", } So here we'll return 2 instances of (value, metadata, extra): (19222656, {'device': "/dev/sda2", 'path': "/"}, None) (808112, {'device': "tmpfs", 'path': "/run"}, None) The post_op is assumed to be implemented by new metric developer. It could be used to add additional special metadata(e.g. ip address), or it could be used to add information into extra dict to be returned to construct the pollster how to build final sample, e.g. extra.update('project_id': xy, 'user_id': zw) """ def _query_oids(self, host, oids, cache, is_bulk): # send GetRequest or GetBulkRequest to get oids values and # populate the values into cache authData = self._get_auth_strategy(host) transport = cmdgen.UdpTransportTarget((host.hostname, host.port or self._port)) oid_cache = cache.setdefault(self._CACHE_KEY_OID, {}) cmd_runner = cmdgen.CommandGenerator() if is_bulk: ret = cmd_runner.bulkCmd(authData, transport, 0, 100, *oids, lookupValues=True) else: ret = cmd_runner.getCmd(authData, transport, *oids, lookupValues=True) (error, data) = parse_snmp_return(ret, is_bulk) if error: raise SNMPException("An error occurred, oids %(oid)s, " "host %(host)s, %(err)s" % dict(oid=oids, host=host.hostname, err=data)) # save result into cache if is_bulk: for var_bind_table_row in data: for name, val in var_bind_table_row: oid_cache[str(name)] = val else: for name, val in data: oid_cache[str(name)] = val @staticmethod def find_matching_oids(oid_cache, oid, match_type, find_one=True): matched = [] if match_type == PREFIX: for key in oid_cache.keys(): if key.startswith(oid): matched.append(key) if find_one: break else: if oid in oid_cache: matched.append(oid) return matched @staticmethod def get_oid_value(oid_cache, oid_def, suffix='', host=None): oid, converter = oid_def value = oid_cache[oid + suffix] if converter: try: value = converter(value) except ValueError: if isinstance(value, rfc1905.NoSuchObject): LOG.debug("OID %s%s has no value" % ( oid, " on %s" % host.hostname if host else "")) return None return value @classmethod def construct_metadata(cls, oid_cache, meta_defs, suffix='', host=None): metadata = {} for key, oid_def in six.iteritems(meta_defs): metadata[key] = cls.get_oid_value(oid_cache, oid_def, suffix, host) return metadata @classmethod def _find_missing_oids(cls, meter_def, cache): # find oids have not been queried and cached new_oids = [] oid_cache = cache.setdefault(cls._CACHE_KEY_OID, {}) # check metric_oid if not cls.find_matching_oids(oid_cache, meter_def['metric_oid'][0], meter_def['matching_type']): new_oids.append(meter_def['metric_oid'][0]) for metadata in meter_def['metadata'].values(): if not cls.find_matching_oids(oid_cache, metadata[0], meter_def['matching_type']): new_oids.append(metadata[0]) return new_oids def inspect_generic(self, host, cache, extra_metadata, param): # the snmp definition for the corresponding meter meter_def = param # collect oids that needs to be queried oids_to_query = self._find_missing_oids(meter_def, cache) # query oids and populate into caches if oids_to_query: self._query_oids(host, oids_to_query, cache, meter_def['matching_type'] == PREFIX) # construct (value, metadata, extra) oid_cache = cache[self._CACHE_KEY_OID] # find all oids which needed to construct final sample values # for matching type of EXACT, only 1 sample would be generated # for matching type of PREFIX, multiple samples could be generated oids_for_sample_values = self.find_matching_oids( oid_cache, meter_def['metric_oid'][0], meter_def['matching_type'], False) input_extra_metadata = extra_metadata for oid in oids_for_sample_values: suffix = oid[len(meter_def['metric_oid'][0]):] value = self.get_oid_value(oid_cache, meter_def['metric_oid'], suffix, host) # get the metadata for this sample value metadata = self.construct_metadata(oid_cache, meter_def['metadata'], suffix, host) extra_metadata = copy.deepcopy(input_extra_metadata) or {} # call post_op for special cases if meter_def['post_op']: func = getattr(self, meter_def['post_op'], None) if func: value = func(host, cache, meter_def, value, metadata, extra_metadata, suffix) yield (value, metadata, extra_metadata) def _post_op_memory_avail_to_used(self, host, cache, meter_def, value, metadata, extra, suffix): _memory_total_oid = "1.3.6.1.4.1.2021.4.5.0" if _memory_total_oid not in cache[self._CACHE_KEY_OID]: self._query_oids(host, [_memory_total_oid], cache, False) total_value = self.get_oid_value(cache[self._CACHE_KEY_OID], (_memory_total_oid, int)) if total_value is None: return None return total_value - value def _post_op_net(self, host, cache, meter_def, value, metadata, extra, suffix): # add ip address into metadata _interface_ip_oid = "1.3.6.1.2.1.4.20.1.2" oid_cache = cache.setdefault(self._CACHE_KEY_OID, {}) if not self.find_matching_oids(oid_cache, _interface_ip_oid, PREFIX): # populate the oid into cache self._query_oids(host, [_interface_ip_oid], cache, True) ip_addr = '' for k, v in six.iteritems(oid_cache): if k.startswith(_interface_ip_oid) and v == int(suffix[1:]): ip_addr = k.replace(_interface_ip_oid + ".", "") metadata.update(ip=ip_addr) # update resource_id for each nic interface self._suffix_resource_id(host, metadata, 'name', extra) return value def _post_op_disk(self, host, cache, meter_def, value, metadata, extra, suffix): self._suffix_resource_id(host, metadata, 'device', extra) return value @staticmethod def _suffix_resource_id(host, metadata, key, extra): prefix = metadata.get(key) if prefix: res_id = extra.get('resource_id') or host.hostname res_id = res_id + ".%s" % metadata.get(key) extra.update(resource_id=res_id) @staticmethod def _get_auth_strategy(host): options = urlparse.parse_qs(host.query) kwargs = {} for key in _usm_proto_mapping: opt = options.get(key, [None])[-1] value = _usm_proto_mapping[key][1].get(opt) if value: kwargs[_usm_proto_mapping[key][0]] = value priv_pass = options.get('priv_password', [None])[-1] if priv_pass: kwargs['privKey'] = priv_pass if host.password: kwargs['authKey'] = host.password if kwargs: auth_strategy = cmdgen.UsmUserData(host.username, **kwargs) else: auth_strategy = cmdgen.CommunityData(host.username or 'public') return auth_strategy def prepare_params(self, param): processed = {} processed['matching_type'] = param['matching_type'] processed['metric_oid'] = (param['oid'], eval(param['type'])) processed['post_op'] = param.get('post_op', None) processed['metadata'] = {} for k, v in six.iteritems(param.get('metadata', {})): processed['metadata'][k] = (v['oid'], eval(v['type'])) return processed ceilometer-10.0.0/ceilometer/hardware/inspector/__init__.py0000666000175100017510000000171113236733243024020 0ustar zuulzuul00000000000000# # Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from stevedore import driver def get_inspector(parsed_url, namespace='ceilometer.hardware.inspectors'): """Get inspector driver and load it. :param parsed_url: urlparse.SplitResult object for the inspector :param namespace: Namespace to use to look for drivers. """ loaded_driver = driver.DriverManager(namespace, parsed_url.scheme) return loaded_driver.driver() ceilometer-10.0.0/ceilometer/hardware/inspector/base.py0000666000175100017510000000306613236733243023200 0ustar zuulzuul00000000000000# # Copyright 2014 ZHAW SoE # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Inspector abstraction for read-only access to hardware components""" import abc import six @six.add_metaclass(abc.ABCMeta) class Inspector(object): @abc.abstractmethod def inspect_generic(self, host, cache, extra_metadata, param): """A generic inspect function. :param host: the target host :param cache: cache passed from the pollster :param extra_metadata: extra dict to be used as metadata :param param: a dict of inspector specific param :return: an iterator of (value, metadata, extra) containing the sample value, metadata dict to construct sample's metadata, and extra dict of extra metadata to help constructing sample """ def prepare_params(self, param): """Parse the params to a format which the inspector itself recognizes. :param param: inspector params from meter definition file :return: a dict of param which the inspector recognized """ return {} ceilometer-10.0.0/ceilometer/hardware/discovery.py0000666000175100017510000001203613236733243022264 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ceilometer import nova_client from ceilometer.polling import plugin_base LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('url_scheme', default='snmp://', help='URL scheme to use for hardware nodes.'), cfg.StrOpt('readonly_user_name', default='ro_snmp_user', help='SNMPd user name of all nodes running in the cloud.'), cfg.StrOpt('readonly_user_password', default='password', help='SNMPd v3 authentication password of all the nodes ' 'running in the cloud.', secret=True), cfg.StrOpt('readonly_user_auth_proto', choices=['md5', 'sha'], help='SNMPd v3 authentication algorithm of all the nodes ' 'running in the cloud'), cfg.StrOpt('readonly_user_priv_proto', choices=['des', 'aes128', '3des', 'aes192', 'aes256'], help='SNMPd v3 encryption algorithm of all the nodes ' 'running in the cloud'), cfg.StrOpt('readonly_user_priv_password', help='SNMPd v3 encryption password of all the nodes ' 'running in the cloud.', secret=True), cfg.StrOpt('tripleo_network_name', default='ctlplane', help='Name of the control plane Tripleo network') ] class NodesDiscoveryTripleO(plugin_base.DiscoveryBase): def __init__(self, conf): super(NodesDiscoveryTripleO, self).__init__(conf) self.nova_cli = nova_client.Client(conf) self.last_run = None self.instances = {} def _make_resource_url(self, ip): hwconf = self.conf.hardware url = hwconf.url_scheme username = hwconf.readonly_user_name password = hwconf.readonly_user_password if username: url += username if password: url += ':' + password if username or password: url += '@' url += ip opts = ['auth_proto', 'priv_proto', 'priv_password'] query = "&".join(opt + "=" + hwconf['readonly_user_%s' % opt] for opt in opts if hwconf['readonly_user_%s' % opt]) if query: url += '?' + query return url def discover(self, manager, param=None): """Discover resources to monitor. instance_get_all will return all instances if last_run is None, and will return only the instances changed since the last_run time. """ try: instances = self.nova_cli.instance_get_all(self.last_run) except Exception: # NOTE(zqfan): instance_get_all is wrapped and will log exception # when there is any error. It is no need to raise it again and # print one more time. return [] for instance in instances: if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted', 'error']: self.instances.pop(instance.id, None) else: self.instances[instance.id] = instance self.last_run = timeutils.utcnow(True).isoformat() resources = [] for instance in self.instances.values(): addresses = instance.addresses.get( self.conf.hardware.tripleo_network_name) if addresses is None: # NOTE(sileht): This is not a tripleo undercloud instance, this # is a cheap detection if ironic node deployed by tripleo, but # nova don't expose anything more useful and we must not log a # ERROR when the instance is not a tripleo undercloud one. continue try: ip_address = addresses[0].get('addr') final_address = self._make_resource_url(ip_address) resource = { 'resource_id': instance.id, 'resource_url': final_address, 'mac_addr': addresses[0].get('OS-EXT-IPS-MAC:mac_addr'), 'image_id': instance.image['id'], 'flavor_id': instance.flavor['id'] } resources.append(resource) except KeyError: LOG.error("Couldn't obtain IP address of " "instance %s" % instance.id) return resources ceilometer-10.0.0/ceilometer/hardware/__init__.py0000666000175100017510000000000013236733243022000 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/declarative.py0000666000175100017510000001506313236733243020746 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from jsonpath_rw_ext import parser from oslo_log import log import six import yaml from ceilometer.i18n import _ LOG = log.getLogger(__name__) class DefinitionException(Exception): def __init__(self, message, definition_cfg): msg = '%s %s: %s' % (self.__class__.__name__, definition_cfg, message) super(DefinitionException, self).__init__(msg) self.brief_message = message class MeterDefinitionException(DefinitionException): pass class EventDefinitionException(DefinitionException): pass class ResourceDefinitionException(DefinitionException): pass class Definition(object): JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser() GETTERS_CACHE = {} def __init__(self, name, cfg, plugin_manager): self.cfg = cfg self.name = name self.plugin = None if isinstance(cfg, dict): if 'fields' not in cfg: raise DefinitionException( _("The field 'fields' is required for %s") % name, self.cfg) if 'plugin' in cfg: plugin_cfg = cfg['plugin'] if isinstance(plugin_cfg, six.string_types): plugin_name = plugin_cfg plugin_params = {} else: try: plugin_name = plugin_cfg['name'] except KeyError: raise DefinitionException( _('Plugin specified, but no plugin name supplied ' 'for %s') % name, self.cfg) plugin_params = plugin_cfg.get('parameters') if plugin_params is None: plugin_params = {} try: plugin_ext = plugin_manager[plugin_name] except KeyError: raise DefinitionException( _('No plugin named %(plugin)s available for ' '%(name)s') % dict( plugin=plugin_name, name=name), self.cfg) plugin_class = plugin_ext.plugin self.plugin = plugin_class(**plugin_params) fields = cfg['fields'] else: # Simple definition "foobar: jsonpath" fields = cfg if isinstance(fields, list): # NOTE(mdragon): if not a string, we assume a list. if len(fields) == 1: fields = fields[0] else: fields = '|'.join('(%s)' % path for path in fields) if isinstance(fields, six.integer_types): self.getter = fields else: try: self.getter = self.make_getter(fields) except Exception as e: raise DefinitionException( _("Parse error in JSONPath specification " "'%(jsonpath)s' for %(name)s: %(err)s") % dict(jsonpath=fields, name=name, err=e), self.cfg) def _get_path(self, match): if match.context is not None: for path_element in self._get_path(match.context): yield path_element yield str(match.path) def parse(self, obj, return_all_values=False): if callable(self.getter): values = self.getter(obj) else: return self.getter values = [match for match in values if return_all_values or match.value is not None] if self.plugin is not None: if return_all_values and not self.plugin.support_return_all_values: raise DefinitionException("Plugin %s don't allows to " "return multiple values" % self.cfg["plugin"]["name"], self.cfg) values_map = [('.'.join(self._get_path(match)), match.value) for match in values] values = [v for v in self.plugin.trait_values(values_map) if v is not None] else: values = [match.value for match in values if match is not None] if return_all_values: return values else: return values[0] if values else None def make_getter(self, fields): if fields in self.GETTERS_CACHE: return self.GETTERS_CACHE[fields] else: getter = self.JSONPATH_RW_PARSER.parse(fields).find self.GETTERS_CACHE[fields] = getter return getter def load_definitions(conf, defaults, config_file, fallback_file=None): """Setup a definitions from yaml config file.""" if not os.path.exists(config_file): config_file = conf.find_file(config_file) if not config_file and fallback_file is not None: LOG.debug("No Definitions configuration file found! " "Using default config.") config_file = fallback_file if config_file is not None: LOG.debug("Loading definitions configuration file: %s", config_file) with open(config_file) as cf: config = cf.read() try: definition_cfg = yaml.safe_load(config) except yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark errmsg = (_("Invalid YAML syntax in Definitions file " "%(file)s at line: %(line)s, column: %(column)s.") % dict(file=config_file, line=mark.line + 1, column=mark.column + 1)) else: errmsg = (_("YAML error reading Definitions file " "%(file)s") % dict(file=config_file)) LOG.error(errmsg) raise else: LOG.debug("No Definitions configuration file found! " "Using default config.") definition_cfg = defaults LOG.info("Definitions: %s", definition_cfg) return definition_cfg ceilometer-10.0.0/ceilometer/tests/0000775000175100017510000000000013236733440017243 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/__init__.py0000666000175100017510000000000013236733243021345 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/base.py0000666000175100017510000000720213236733243020533 0ustar zuulzuul00000000000000# Copyright 2012 New Dream Network (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test base classes. """ import functools import os import tempfile import fixtures import oslo_messaging.conffixture from oslo_utils import timeutils from oslotest import base import six from testtools import testcase import yaml import ceilometer from ceilometer import messaging class BaseTestCase(base.BaseTestCase): def setup_messaging(self, conf, exchange=None): self.useFixture(oslo_messaging.conffixture.ConfFixture(conf)) conf.set_override("notification_driver", ["messaging"]) if not exchange: exchange = 'ceilometer' conf.set_override("control_exchange", exchange) # NOTE(sileht): Ensure a new oslo.messaging driver is loaded # between each tests self.transport = messaging.get_transport(conf, "fake://", cache=False) self.useFixture(fixtures.MockPatch( 'ceilometer.messaging.get_transport', return_value=self.transport)) def cfg2file(self, data): cfgfile = tempfile.NamedTemporaryFile(mode='w', delete=False) self.addCleanup(os.remove, cfgfile.name) cfgfile.write(yaml.safe_dump(data)) cfgfile.close() return cfgfile.name def assertTimestampEqual(self, first, second, msg=None): """Checks that two timestamps are equals. This relies on assertAlmostEqual to avoid rounding problem, and only checks up the first microsecond values. """ return self.assertAlmostEqual( timeutils.delta_seconds(first, second), 0.0, places=5) def assertIsEmpty(self, obj): try: if len(obj) != 0: self.fail("%s is not empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) def assertIsNotEmpty(self, obj): try: if len(obj) == 0: self.fail("%s is empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) @staticmethod def path_get(project_file=None): root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', ) ) if project_file: return os.path.join(root, project_file) else: return root def _skip_decorator(func): @functools.wraps(func) def skip_if_not_implemented(*args, **kwargs): try: return func(*args, **kwargs) except ceilometer.NotImplementedError as e: raise testcase.TestSkipped(six.text_type(e)) return skip_if_not_implemented class SkipNotImplementedMeta(type): def __new__(cls, name, bases, local): for attr in local: value = local[attr] if callable(value) and ( attr.startswith('test_') or attr == 'setUp'): local[attr] = _skip_decorator(value) return type.__new__(cls, name, bases, local) ceilometer-10.0.0/ceilometer/tests/unit/0000775000175100017510000000000013236733440020222 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/ipmi/0000775000175100017510000000000013236733440021160 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/ipmi/notifications/0000775000175100017510000000000013236733440024031 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/ipmi/notifications/__init__.py0000666000175100017510000000000013236733243026133 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py0000666000175100017510000007403513236733243027405 0ustar zuulzuul00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample data for test_ipmi. This data is provided as a sample of the data expected from the ipmitool driver in the Ironic project, which is the publisher of the notifications being tested. """ TEMPERATURE_DATA = { 'DIMM GH VR Temp (0x3b)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '26 (+/- 0.500) degrees C', 'Entity ID': '20.6 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM GH VR Temp (0x3b)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'CPU1 VR Temp (0x36)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '32 (+/- 0.500) degrees C', 'Entity ID': '20.1 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'CPU1 VR Temp (0x36)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM EF VR Temp (0x3a)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '26 (+/- 0.500) degrees C', 'Entity ID': '20.5 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM EF VR Temp (0x3a)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'CPU2 VR Temp (0x37)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '31 (+/- 0.500) degrees C', 'Entity ID': '20.2 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'CPU2 VR Temp (0x37)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'Ambient Temp (0x32)': { 'Status': 'ok', 'Sensor Reading': '25 (+/- 0) degrees C', 'Entity ID': '12.1 (Front Panel Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Upper non-critical': '43.000', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Upper non-recoverable': '50.000', 'Positive Hysteresis': '4.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '46.000', 'Sensor ID': 'Ambient Temp (0x32)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '25.000' }, 'Mezz Card Temp (0x35)': { 'Status': 'Disabled', 'Sensor Reading': 'Disabled', 'Entity ID': '44.1 (I/O Module)', 'Event Message Control': 'Per-threshold', 'Upper non-critical': '70.000', 'Upper non-recoverable': '85.000', 'Positive Hysteresis': '4.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'Mezz Card Temp (0x35)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '25.000' }, 'PCH Temp (0x3c)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '46 (+/- 0.500) degrees C', 'Entity ID': '45.1 (Processor/IO Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '93.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '103.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '98.000', 'Sensor ID': 'PCH Temp (0x3c)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM CD VR Temp (0x39)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '27 (+/- 0.500) degrees C', 'Entity ID': '20.4 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM CD VR Temp (0x39)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'PCI Riser 2 Temp (0x34)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '30 (+/- 0) degrees C', 'Entity ID': '16.2 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 2 Temp (0x34)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM AB VR Temp (0x38)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '28 (+/- 0.500) degrees C', 'Entity ID': '20.3 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM AB VR Temp (0x38)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '38 (+/- 0) degrees C', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } CURRENT_DATA = { 'Avg Power (0x2e)': { 'Status': 'ok', 'Sensor Reading': '130 (+/- 0) Watts', 'Entity ID': '21.0 (Power Management)', 'Assertions Enabled': '', 'Event Message Control': 'Per-threshold', 'Readable Thresholds': 'No Thresholds', 'Positive Hysteresis': 'Unspecified', 'Sensor Type (Analog)': 'Current', 'Negative Hysteresis': 'Unspecified', 'Maximum sensor range': 'Unspecified', 'Sensor ID': 'Avg Power (0x2e)', 'Assertion Events': '', 'Minimum sensor range': '2550.000', 'Settable Thresholds': 'No Thresholds' } } FAN_DATA = { 'Fan 4A Tach (0x46)': { 'Status': 'ok', 'Sensor Reading': '6900 (+/- 0) RPM', 'Entity ID': '29.4 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 4A Tach (0x46)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 5A Tach (0x48)': { 'Status': 'ok', 'Sensor Reading': '7140 (+/- 0) RPM', 'Entity ID': '29.5 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 5A Tach (0x48)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 3A Tach (0x44)': { 'Status': 'ok', 'Sensor Reading': '6900 (+/- 0) RPM', 'Entity ID': '29.3 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 3A Tach (0x44)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 1A Tach (0x40)': { 'Status': 'ok', 'Sensor Reading': '6960 (+/- 0) RPM', 'Entity ID': '29.1 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 1A Tach (0x40)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 3B Tach (0x45)': { 'Status': 'ok', 'Sensor Reading': '7104 (+/- 0) RPM', 'Entity ID': '29.3 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 3B Tach (0x45)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 2A Tach (0x42)': { 'Status': 'ok', 'Sensor Reading': '7080 (+/- 0) RPM', 'Entity ID': '29.2 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 2A Tach (0x42)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 4B Tach (0x47)': { 'Status': 'ok', 'Sensor Reading': '7488 (+/- 0) RPM', 'Entity ID': '29.4 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 4B Tach (0x47)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 2B Tach (0x43)': { 'Status': 'ok', 'Sensor Reading': '7168 (+/- 0) RPM', 'Entity ID': '29.2 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 2B Tach (0x43)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 5B Tach (0x49)': { 'Status': 'ok', 'Sensor Reading': '7296 (+/- 0) RPM', 'Entity ID': '29.5 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 5B Tach (0x49)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 1B Tach (0x41)': { 'Status': 'ok', 'Sensor Reading': '7296 (+/- 0) RPM', 'Entity ID': '29.1 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 1B Tach (0x41)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 6B Tach (0x4b)': { 'Status': 'ok', 'Sensor Reading': '7616 (+/- 0) RPM', 'Entity ID': '29.6 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 6B Tach (0x4b)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 6A Tach (0x4a)': { 'Status': 'ok', 'Sensor Reading': '7080 (+/- 0) RPM', 'Entity ID': '29.6 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 6A Tach (0x4a)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' } } VOLTAGE_DATA = { 'Planar 12V (0x18)': { 'Status': 'ok', 'Sensor Reading': '12.312 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.108', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '10.692', 'Negative Hysteresis': '0.108', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '13.446', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 12V (0x18)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '12.042' }, 'Planar 3.3V (0x16)': { 'Status': 'ok', 'Sensor Reading': '3.309 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.028', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '3.039', 'Negative Hysteresis': '0.028', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '3.564', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 3.3V (0x16)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3.309' }, 'Planar VBAT (0x1c)': { 'Status': 'ok', 'Sensor Reading': '3.137 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lnc- lcr-', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Readable Thresholds': 'lcr lnc', 'Positive Hysteresis': '0.025', 'Deassertions Enabled': 'lnc- lcr-', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '2.095', 'Negative Hysteresis': '0.025', 'Lower non-critical': '2.248', 'Maximum sensor range': 'Unspecified', 'Sensor ID': 'Planar VBAT (0x1c)', 'Settable Thresholds': 'lcr lnc', 'Threshold Read Mask': 'lcr lnc', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3.010' }, 'Planar 5V (0x17)': { 'Status': 'ok', 'Sensor Reading': '5.062 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.045', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '4.475', 'Negative Hysteresis': '0.045', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '5.582', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 5V (0x17)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4.995' } } SENSOR_DATA = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': TEMPERATURE_DATA, 'Current': CURRENT_DATA, 'Fan': FAN_DATA, 'Voltage': VOLTAGE_DATA } } } EMPTY_PAYLOAD = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { } } } MISSING_SENSOR = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } } } } BAD_SENSOR = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': 'some bad stuff', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } } } } NO_SENSOR_ID = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Sensor Reading': '26 C', }, } } } } NO_NODE_ID = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Sensor Reading': '26 C', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', }, } } } } ceilometer-10.0.0/ceilometer/tests/unit/ipmi/notifications/test_ironic.py0000666000175100017510000002044313236733243026733 0ustar zuulzuul00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for producing IPMI sample messages from notification events. """ import mock from oslotest import base from ceilometer.ipmi.notifications import ironic as ipmi from ceilometer import sample from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data class TestNotifications(base.BaseTestCase): def test_ipmi_temperature_notification(self): """Test IPMI Temperature sensor data. Based on the above ipmi_testdata the expected sample for a single temperature reading has:: * a resource_id composed from the node_uuid Sensor ID * a name composed from 'hardware.ipmi.' and 'temperature' * a volume from the first chunk of the Sensor Reading * a unit from the last chunk of the Sensor Reading * some readings are skipped if the value is 'Disabled' * metatata with the node id """ processor = ipmi.TemperatureSensorNotification(None, None) counters = dict([(counter.resource_id, counter) for counter in processor.build_sample( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(10, len(counters), 'expected 10 temperature readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-dimm_gh_vr_temp_(0x3b)' ) test_counter = counters[resource_id] self.assertEqual(26.0, test_counter.volume) self.assertEqual('C', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.temperature', test_counter.name) self.assertEqual('hardware.ipmi.metrics.update', test_counter.resource_metadata['event_type']) self.assertEqual('f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', test_counter.resource_metadata['node']) def test_ipmi_current_notification(self): """Test IPMI Current sensor data. A single current reading is effectively the same as temperature, modulo "current". """ processor = ipmi.CurrentSensorNotification(None, None) counters = dict([(counter.resource_id, counter) for counter in processor.build_sample( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(1, len(counters), 'expected 1 current reading') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-avg_power_(0x2e)' ) test_counter = counters[resource_id] self.assertEqual(130.0, test_counter.volume) self.assertEqual('W', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.current', test_counter.name) def test_ipmi_fan_notification(self): """Test IPMI Fan sensor data. A single fan reading is effectively the same as temperature, modulo "fan". """ processor = ipmi.FanSensorNotification(None, None) counters = dict([(counter.resource_id, counter) for counter in processor.build_sample( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(12, len(counters), 'expected 12 fan readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-fan_4a_tach_(0x46)' ) test_counter = counters[resource_id] self.assertEqual(6900.0, test_counter.volume) self.assertEqual('RPM', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.fan', test_counter.name) def test_ipmi_voltage_notification(self): """Test IPMI Voltage sensor data. A single voltage reading is effectively the same as temperature, modulo "voltage". """ processor = ipmi.VoltageSensorNotification(None, None) counters = dict([(counter.resource_id, counter) for counter in processor.build_sample( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(4, len(counters), 'expected 4 volate readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-planar_vbat_(0x1c)' ) test_counter = counters[resource_id] self.assertEqual(3.137, test_counter.volume) self.assertEqual('V', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.voltage', test_counter.name) def test_disabed_skips_metric(self): """Test that a meter which a disabled volume is skipped.""" processor = ipmi.TemperatureSensorNotification(None, None) counters = dict([(counter.resource_id, counter) for counter in processor.build_sample( ipmi_test_data.SENSOR_DATA)]) self.assertEqual(10, len(counters), 'expected 10 temperature readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-mezz_card_temp_(0x35)' ) self.assertNotIn(resource_id, counters) def test_empty_payload_no_metrics_success(self): processor = ipmi.TemperatureSensorNotification(None, None) counters = dict([(counter.resource_id, counter) for counter in processor.build_sample( ipmi_test_data.EMPTY_PAYLOAD)]) self.assertEqual(0, len(counters), 'expected 0 readings') @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_sensor_data(self, mylog): processor = ipmi.TemperatureSensorNotification(None, None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.build_sample(ipmi_test_data.MISSING_SENSOR)) self.assertEqual( 'invalid sensor data for ' 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' "missing 'Sensor Reading' in payload", messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_sensor_data_malformed(self, mylog): processor = ipmi.TemperatureSensorNotification(None, None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.build_sample(ipmi_test_data.BAD_SENSOR)) self.assertEqual( 'invalid sensor data for ' 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' 'unable to parse sensor reading: some bad stuff', messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_node_uuid(self, mylog): """Test for desired error message when 'node_uuid' missing. Presumably this will never happen given the way the data is created, but better defensive than dead. """ processor = ipmi.TemperatureSensorNotification(None, None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.build_sample(ipmi_test_data.NO_NODE_ID)) self.assertEqual( 'invalid sensor data for missing id: missing key in payload: ' "'node_uuid'", messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_sensor_id(self, mylog): """Test for desired error message when 'Sensor ID' missing.""" processor = ipmi.TemperatureSensorNotification(None, None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.build_sample(ipmi_test_data.NO_SENSOR_ID)) self.assertEqual( 'invalid sensor data for missing id: missing key in payload: ' "'Sensor ID'", messages[0] ) ceilometer-10.0.0/ceilometer/tests/unit/ipmi/pollsters/0000775000175100017510000000000013236733440023207 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/ipmi/pollsters/__init__.py0000666000175100017510000000000013236733243025311 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/ipmi/pollsters/base.py0000666000175100017510000000524413236733243024503 0ustar zuulzuul00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import fixtures import mock import six from ceilometer.polling import manager from ceilometer import service from ceilometer.tests import base @six.add_metaclass(abc.ABCMeta) class TestPollsterBase(base.BaseTestCase): def setUp(self): super(TestPollsterBase, self).setUp() self.CONF = service.prepare_service([], []) def fake_data(self): """Fake data used for test.""" return None def fake_sensor_data(self, sensor_type): """Fake sensor data used for test.""" return None @abc.abstractmethod def make_pollster(self): """Produce right pollster for test.""" def _test_get_samples(self): nm = mock.Mock() nm.read_inlet_temperature.side_effect = self.fake_data nm.read_outlet_temperature.side_effect = self.fake_data nm.read_power_all.side_effect = self.fake_data nm.read_airflow.side_effect = self.fake_data nm.read_cups_index.side_effect = self.fake_data nm.read_cups_utilization.side_effect = self.fake_data nm.read_sensor_any.side_effect = self.fake_sensor_data # We should mock the pollster first before initialize the Manager # so that we don't trigger the sudo in pollsters' __init__(). self.useFixture(fixtures.MockPatch( 'ceilometer.ipmi.platform.intel_node_manager.NodeManager', return_value=nm)) self.useFixture(fixtures.MockPatch( 'ceilometer.ipmi.platform.ipmi_sensor.IPMISensor', return_value=nm)) self.mgr = manager.AgentManager(0, self.CONF, ['ipmi']) self.pollster = self.make_pollster() def _verify_metering(self, length, expected_vol=None, node=None): cache = {} resources = ['local_host'] samples = list(self.pollster.get_samples(self.mgr, cache, resources)) self.assertEqual(length, len(samples)) if expected_vol: self.assertTrue(any(s.volume == expected_vol for s in samples)) if node: self.assertTrue(any(s.resource_metadata['node'] == node for s in samples)) ceilometer-10.0.0/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py0000666000175100017510000000706013236733243026137 0ustar zuulzuul00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.ipmi.pollsters import sensor from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data from ceilometer.tests.unit.ipmi.pollsters import base TEMPERATURE_SENSOR_DATA = { 'Temperature': ipmi_test_data.TEMPERATURE_DATA } CURRENT_SENSOR_DATA = { 'Current': ipmi_test_data.CURRENT_DATA } FAN_SENSOR_DATA = { 'Fan': ipmi_test_data.FAN_DATA } VOLTAGE_SENSOR_DATA = { 'Voltage': ipmi_test_data.VOLTAGE_DATA } MISSING_SENSOR_DATA = ipmi_test_data.MISSING_SENSOR['payload']['payload'] MALFORMED_SENSOR_DATA = ipmi_test_data.BAD_SENSOR['payload']['payload'] MISSING_ID_SENSOR_DATA = ipmi_test_data.NO_SENSOR_ID['payload']['payload'] class TestTemperatureSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return TEMPERATURE_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(10, float(32), self.CONF.host) class TestMissingSensorData(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MISSING_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestMalformedSensorData(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MALFORMED_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestMissingSensorId(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MISSING_ID_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestFanSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return FAN_SENSOR_DATA def make_pollster(self): return sensor.FanSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(12, float(7140), self.CONF.host) class TestCurrentSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return CURRENT_SENSOR_DATA def make_pollster(self): return sensor.CurrentSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(1, float(130), self.CONF.host) class TestVoltageSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return VOLTAGE_SENSOR_DATA def make_pollster(self): return sensor.VoltageSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(4, float(3.309), self.CONF.host) ceilometer-10.0.0/ceilometer/tests/unit/ipmi/pollsters/test_node.py0000666000175100017510000001035413236733243025553 0ustar zuulzuul00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.ipmi.pollsters import node from ceilometer.tests.unit.ipmi.pollsters import base class TestPowerPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['13', '00']} def make_pollster(self): return node.PowerPollster(self.CONF) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 19(0x13 as current_value) self._verify_metering(1, 19, self.CONF.host) class TestInletTemperaturePollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['23', '00']} def make_pollster(self): return node.InletTemperaturePollster(self.CONF) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 35(0x23 as current_value) self._verify_metering(1, 35, self.CONF.host) class TestOutletTemperaturePollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['25', '00']} def make_pollster(self): return node.OutletTemperaturePollster(self.CONF) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 37(0x25 as current_value) self._verify_metering(1, 37, self.CONF.host) class TestAirflowPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Current_value": ['be', '00']} def make_pollster(self): return node.AirflowPollster(self.CONF) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 190(0xbe as current_value) self._verify_metering(1, 190, self.CONF.host) class TestCUPSIndexPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"CUPS_Index": ['2e', '00']} def make_pollster(self): return node.CUPSIndexPollster(self.CONF) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 190(0xbe) self._verify_metering(1, 46, self.CONF.host) class CPUUtilPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"CPU_Utilization": ['33', '00', '00', '00', '00', '00', '00', '00']} def make_pollster(self): return node.CPUUtilPollster(self.CONF) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 190(0xbe) self._verify_metering(1, 51, self.CONF.host) class MemUtilPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"Mem_Utilization": ['05', '00', '00', '00', '00', '00', '00', '00']} def make_pollster(self): return node.MemUtilPollster(self.CONF) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 5(0x05) self._verify_metering(1, 5, self.CONF.host) class IOUtilPollster(base.TestPollsterBase): def fake_data(self): # data after parsing Intel Node Manager output return {"IO_Utilization": ['00', '00', '00', '00', '00', '00', '00', '00']} def make_pollster(self): return node.IOUtilPollster(self.CONF) def test_get_samples(self): self._test_get_samples() # only one sample, and value is 0(0x00) self._verify_metering(1, 0, self.CONF.host) ceilometer-10.0.0/ceilometer/tests/unit/ipmi/__init__.py0000666000175100017510000000000013236733243023262 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/ipmi/platform/0000775000175100017510000000000013236733440023004 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py0000666000175100017510000003223313236733243027250 0ustar zuulzuul00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample data for test_intel_node_manager and test_ipmi_sensor. This data is provided as a sample of the data expected from the ipmitool binary, which produce Node Manager/IPMI raw data """ sensor_temperature_data = """Sensor ID : SSB Therm Trip (0xd) Entity ID : 7.1 (System Board) Sensor Type (Discrete): Temperature Assertions Enabled : Digital State [State Asserted] Deassertions Enabled : Digital State [State Asserted] Sensor ID : BB P1 VR Temp (0x20) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Temperature Sensor Reading : 25 (+/- 0) degrees C Status : ok Nominal Reading : 58.000 Normal Minimum : 10.000 Normal Maximum : 105.000 Upper critical : 115.000 Upper non-critical : 110.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : Front Panel Temp (0x21) Entity ID : 12.1 (Front Panel Board) Sensor Type (Analog) : Temperature Sensor Reading : 23 (+/- 0) degrees C Status : ok Nominal Reading : 28.000 Normal Minimum : 10.000 Normal Maximum : 45.000 Upper critical : 55.000 Upper non-critical : 50.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : SSB Temp (0x22) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Temperature Sensor Reading : 43 (+/- 0) degrees C Status : ok Nominal Reading : 52.000 Normal Minimum : 10.000 Normal Maximum : 93.000 Upper critical : 103.000 Upper non-critical : 98.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ """ sensor_voltage_data = """Sensor ID : VR Watchdog (0xb) Entity ID : 7.1 (System Board) Sensor Type (Discrete): Voltage Assertions Enabled : Digital State [State Asserted] Deassertions Enabled : Digital State [State Asserted] Sensor ID : BB +12.0V (0xd0) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : 11.831 (+/- 0) Volts Status : ok Nominal Reading : 11.935 Normal Minimum : 11.363 Normal Maximum : 12.559 Upper critical : 13.391 Upper non-critical : 13.027 Lower critical : 10.635 Lower non-critical : 10.947 Positive Hysteresis : 0.052 Negative Hysteresis : 0.052 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : BB +1.35 P1LV AB (0xe4) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : Disabled Status : Disabled Nominal Reading : 1.342 Normal Minimum : 1.275 Normal Maximum : 1.409 Upper critical : 1.488 Upper non-critical : 1.445 Lower critical : 1.201 Lower non-critical : 1.244 Positive Hysteresis : 0.006 Negative Hysteresis : 0.006 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Event Status : Unavailable Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : BB +5.0V (0xd1) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : 4.959 (+/- 0) Volts Status : ok Nominal Reading : 4.981 Normal Minimum : 4.742 Normal Maximum : 5.241 Upper critical : 5.566 Upper non-critical : 5.415 Lower critical : 4.416 Lower non-critical : 4.546 Positive Hysteresis : 0.022 Negative Hysteresis : 0.022 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ """ sensor_current_data = """Sensor ID : PS1 Curr Out % (0x58) Entity ID : 10.1 (Power Supply) Sensor Type (Analog) : Current Sensor Reading : 11 (+/- 0) unspecified Status : ok Nominal Reading : 50.000 Normal Minimum : 0.000 Normal Maximum : 100.000 Upper critical : 118.000 Upper non-critical : 100.000 Positive Hysteresis : Unspecified Negative Hysteresis : Unspecified Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : unc ucr Settable Thresholds : unc ucr Threshold Read Mask : unc ucr Assertion Events : Assertions Enabled : unc+ ucr+ Deassertions Enabled : unc+ ucr+ Sensor ID : PS2 Curr Out % (0x59) Entity ID : 10.2 (Power Supply) Sensor Type (Analog) : Current Sensor Reading : 0 (+/- 0) unspecified Status : ok Nominal Reading : 50.000 Normal Minimum : 0.000 Normal Maximum : 100.000 Upper critical : 118.000 Upper non-critical : 100.000 Positive Hysteresis : Unspecified Negative Hysteresis : Unspecified Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : unc ucr Settable Thresholds : unc ucr Threshold Read Mask : unc ucr Assertion Events : Assertions Enabled : unc+ ucr+ Deassertions Enabled : unc+ ucr+ """ sensor_fan_data = """Sensor ID : System Fan 1 (0x30) Entity ID : 29.1 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 2 (0x32) Entity ID : 29.2 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 3 (0x34) Entity ID : 29.3 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 4 (0x36) Entity ID : 29.4 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4606 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- """ sensor_status_cmd = 'ipmitoolraw0x0a0x2c0x00' init_sensor_cmd = 'ipmitoolraw0x0a0x2c0x01' sdr_dump_cmd = 'ipmitoolsdrdump' sdr_info_cmd = 'ipmitoolsdrinfo' read_sensor_all_cmd = 'ipmitoolsdr-v' read_sensor_temperature_cmd = 'ipmitoolsdr-vtypeTemperature' read_sensor_voltage_cmd = 'ipmitoolsdr-vtypeVoltage' read_sensor_current_cmd = 'ipmitoolsdr-vtypeCurrent' read_sensor_fan_cmd = 'ipmitoolsdr-vtypeFan' device_id_cmd = 'ipmitoolraw0x060x01' nm_device_id_cmd = 'ipmitool-b0x6-t0x2craw0x060x01' nm_version_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xca0x570x010x00' get_power_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x010x000x00' get_inlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x020x000x00' get_outlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x050x000x00' get_airflow_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x040x000x00' get_cups_index_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x01' get_cups_util_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x05' device_id = (' 21 01 01 04 02 bf 57 01 00 49 00 01 07 50 0b', '') nm_device_id = (' 50 01 02 15 02 21 57 01 00 02 0b 02 09 10 01', '') nm_version_v2 = (' 57 01 00 03 02 00 02 15', '') nm_version_v3 = (' 57 01 00 05 03 00 03 06', '') # start from byte 3, get cur- 57 00(87), min- 03 00(3) # max- 37 02(567), avg- 5c 00(92) power_data = (' 57 01 00 57 00 03 00 37 02 5c 00 cc 37 f4 53 ce\n' ' 9b 12 01 50\n', '') # start from byte 3, get cur- 17 00(23), min- 16 00(22) # max- 18 00(24), avg- 17 00(23) inlet_temperature_data = (' 57 01 00 17 00 16 00 18 00 17 00 f3 6f fe 53 85\n' ' b7 02 00 50\n', '') # start from byte 3, get cur- 19 00(25), min- 18 00(24) # max- 1b 00(27), avg- 19 00(25) outlet_temperature_data = (' 57 01 00 19 00 18 00 1b 00 19 00 f3 6f fe 53 85\n' ' b7 02 00 50\n', '') # start from byte 3, get cur- be 00(190), min- 96 00(150) # max- 26 02(550), avg- cb 00(203) airflow_data = (' 57 01 00 be 00 96 00 26 02 cb 00 e1 65 c1 54 db\n' ' b7 02 00 50\n', '') # start from byte 3, cups index 2e 00 (46) cups_index_data = (' 57 01 00 2e 00\n', '') # start from byte 3, get cup_util - 33 00 ...(51), mem_util - 05 00 ...(5) # io_util - 00 00 ...(0) cups_util_data = (' 57 01 00 33 00 00 00 00 00 00 00 05 00 00 00 00\n' ' 00 00 00 00 00 00 00 00 00 00 00\n', '') sdr_info = ('', '') sensor_temperature = (sensor_temperature_data, '') sensor_voltage = (sensor_voltage_data, '') sensor_current = (sensor_current_data, '') sensor_fan = (sensor_fan_data, '') ceilometer-10.0.0/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py0000666000175100017510000001075113236733243026753 0ustar zuulzuul00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from ceilometer.ipmi.platform import ipmi_sensor from ceilometer.tests.unit.ipmi.platform import fake_utils from ceilometer import utils class TestIPMISensor(base.BaseTestCase): def setUp(self): super(TestIPMISensor, self).setUp() utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) self.ipmi = ipmi_sensor.IPMISensor() @classmethod def tearDownClass(cls): # reset inited to force an initialization of singleton for next test ipmi_sensor.IPMISensor()._inited = False super(TestIPMISensor, cls).tearDownClass() def test_read_sensor_temperature(self): sensors = self.ipmi.read_sensor_any('Temperature') self.assertTrue(self.ipmi.ipmi_support) # only temperature data returned. self.assertIn('Temperature', sensors) self.assertEqual(1, len(sensors)) # 4 sensor data in total, ignore 1 without 'Sensor Reading'. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(3, len(sensors['Temperature'])) sensor = sensors['Temperature']['BB P1 VR Temp (0x20)'] self.assertEqual('25 (+/- 0) degrees C', sensor['Sensor Reading']) def test_read_sensor_voltage(self): sensors = self.ipmi.read_sensor_any('Voltage') # only voltage data returned. self.assertIn('Voltage', sensors) self.assertEqual(1, len(sensors)) # 4 sensor data in total, ignore 1 without 'Sensor Reading'. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(3, len(sensors['Voltage'])) sensor = sensors['Voltage']['BB +5.0V (0xd1)'] self.assertEqual('4.959 (+/- 0) Volts', sensor['Sensor Reading']) def test_read_sensor_current(self): sensors = self.ipmi.read_sensor_any('Current') # only Current data returned. self.assertIn('Current', sensors) self.assertEqual(1, len(sensors)) # 2 sensor data in total. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(2, len(sensors['Current'])) sensor = sensors['Current']['PS1 Curr Out % (0x58)'] self.assertEqual('11 (+/- 0) unspecified', sensor['Sensor Reading']) def test_read_sensor_fan(self): sensors = self.ipmi.read_sensor_any('Fan') # only Fan data returned. self.assertIn('Fan', sensors) self.assertEqual(1, len(sensors)) # 2 sensor data in total. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(4, len(sensors['Fan'])) sensor = sensors['Fan']['System Fan 2 (0x32)'] self.assertEqual('4704 (+/- 0) RPM', sensor['Sensor Reading']) class TestNonIPMISensor(base.BaseTestCase): def setUp(self): super(TestNonIPMISensor, self).setUp() utils.execute = mock.Mock(side_effect=fake_utils.execute_without_ipmi) self.ipmi = ipmi_sensor.IPMISensor() @classmethod def tearDownClass(cls): # reset inited to force an initialization of singleton for next test ipmi_sensor.IPMISensor()._inited = False super(TestNonIPMISensor, cls).tearDownClass() def test_read_sensor_temperature(self): sensors = self.ipmi.read_sensor_any('Temperature') self.assertFalse(self.ipmi.ipmi_support) # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_voltage(self): sensors = self.ipmi.read_sensor_any('Voltage') # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_current(self): sensors = self.ipmi.read_sensor_any('Current') # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_fan(self): sensors = self.ipmi.read_sensor_any('Fan') # Non-IPMI platform return empty data self.assertEqual({}, sensors) ceilometer-10.0.0/ceilometer/tests/unit/ipmi/platform/__init__.py0000666000175100017510000000000013236733243025106 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/ipmi/platform/fake_utils.py0000666000175100017510000000747213236733243025521 0ustar zuulzuul00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii from ceilometer.ipmi.platform import exception as nmexcept from ceilometer.ipmi.platform import intel_node_manager as node_manager from ceilometer.tests.unit.ipmi.platform import ipmitool_test_data as test_data def get_sensor_status_init(parameter=''): return (' 01\n', '') def get_sensor_status_uninit(parameter=''): return (' 00\n', '') def init_sensor_agent(parameter=''): return (' 00\n', '') def get_nm_version_v2(parameter=''): return test_data.nm_version_v2 def get_nm_version_v3(parameter=''): return test_data.nm_version_v3 def sdr_dump(data_file=''): if data_file == '': raise ValueError("No file specified for ipmitool sdr dump") fake_slave_address = '2c' fake_channel = '60' hexstr = node_manager.INTEL_PREFIX + fake_slave_address + fake_channel data = binascii.unhexlify(hexstr) with open(data_file, 'wb') as bin_fp: bin_fp.write(data) return ('', '') def _execute(funcs, *cmd, **kwargs): datas = { test_data.device_id_cmd: test_data.device_id, test_data.nm_device_id_cmd: test_data.nm_device_id, test_data.get_power_cmd: test_data.power_data, test_data.get_inlet_temp_cmd: test_data.inlet_temperature_data, test_data.get_outlet_temp_cmd: test_data.outlet_temperature_data, test_data.get_airflow_cmd: test_data.airflow_data, test_data.get_cups_index_cmd: test_data.cups_index_data, test_data.get_cups_util_cmd: test_data.cups_util_data, test_data.sdr_info_cmd: test_data.sdr_info, test_data.read_sensor_temperature_cmd: test_data.sensor_temperature, test_data.read_sensor_voltage_cmd: test_data.sensor_voltage, test_data.read_sensor_current_cmd: test_data.sensor_current, test_data.read_sensor_fan_cmd: test_data.sensor_fan, } if cmd[1] == 'sdr' and cmd[2] == 'dump': # ipmitool sdr dump /tmp/XXXX cmd_str = "".join(cmd[:3]) par_str = cmd[3] else: cmd_str = "".join(cmd) par_str = '' try: return datas[cmd_str] except KeyError: return funcs[cmd_str](par_str) def execute_with_nm_v3(*cmd, **kwargs): """test version of execute on Node Manager V3.0 platform.""" funcs = {test_data.sensor_status_cmd: get_sensor_status_init, test_data.init_sensor_cmd: init_sensor_agent, test_data.sdr_dump_cmd: sdr_dump, test_data.nm_version_cmd: get_nm_version_v3} return _execute(funcs, *cmd, **kwargs) def execute_with_nm_v2(*cmd, **kwargs): """test version of execute on Node Manager V2.0 platform.""" funcs = {test_data.sensor_status_cmd: get_sensor_status_init, test_data.init_sensor_cmd: init_sensor_agent, test_data.sdr_dump_cmd: sdr_dump, test_data.nm_version_cmd: get_nm_version_v2} return _execute(funcs, *cmd, **kwargs) def execute_without_nm(*cmd, **kwargs): """test version of execute on Non-Node Manager platform.""" funcs = {test_data.sensor_status_cmd: get_sensor_status_uninit, test_data.init_sensor_cmd: init_sensor_agent, test_data.sdr_dump_cmd: sdr_dump} return _execute(funcs, *cmd, **kwargs) def execute_without_ipmi(*cmd, **kwargs): raise nmexcept.IPMIException ceilometer-10.0.0/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py0000666000175100017510000001555013236733243030240 0ustar zuulzuul00000000000000# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import tempfile import mock from oslotest import base import six from ceilometer.ipmi.platform import intel_node_manager as node_manager from ceilometer import service from ceilometer.tests.unit.ipmi.platform import fake_utils from ceilometer import utils @six.add_metaclass(abc.ABCMeta) class _Base(base.BaseTestCase): @abc.abstractmethod def init_test_engine(self): """Prepare specific ipmitool as engine for different NM version.""" def setUp(self): super(_Base, self).setUp() conf = service.prepare_service([], []) self.init_test_engine() with mock.patch.object(node_manager.NodeManager, '__new__', side_effect=self._new_no_singleton): self.nm = node_manager.NodeManager(conf) @staticmethod def _new_no_singleton(cls, *args, **kwargs): if six.PY3: # We call init manually due to a py3 bug: # https://bugs.python.org/issue25731 obj = super(node_manager.NodeManager, cls).__new__(cls) obj.__init__(*args, **kwargs) return obj else: return super(node_manager.NodeManager, cls).__new__( cls, *args, **kwargs) class TestNodeManagerV3(_Base): def init_test_engine(self): utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v3) def test_read_airflow(self): airflow = self.nm.read_airflow() avg_val = node_manager._hex(airflow["Average_value"]) max_val = node_manager._hex(airflow["Maximum_value"]) min_val = node_manager._hex(airflow["Minimum_value"]) cur_val = node_manager._hex(airflow["Current_value"]) # get NM 3.0 self.assertEqual(5, self.nm.nm_version) # see ipmi_test_data.py for raw data self.assertEqual(190, cur_val) self.assertEqual(150, min_val) self.assertEqual(550, max_val) self.assertEqual(203, avg_val) def test_read_outlet_temperature(self): temperature = self.nm.read_outlet_temperature() avg_val = node_manager._hex(temperature["Average_value"]) max_val = node_manager._hex(temperature["Maximum_value"]) min_val = node_manager._hex(temperature["Minimum_value"]) cur_val = node_manager._hex(temperature["Current_value"]) # get NM 3.0 self.assertEqual(5, self.nm.nm_version) # see ipmi_test_data.py for raw data self.assertEqual(25, cur_val) self.assertEqual(24, min_val) self.assertEqual(27, max_val) self.assertEqual(25, avg_val) def test_read_cups_utilization(self): cups_util = self.nm.read_cups_utilization() cpu_util = node_manager._hex(cups_util["CPU_Utilization"]) mem_util = node_manager._hex(cups_util["Mem_Utilization"]) io_util = node_manager._hex(cups_util["IO_Utilization"]) # see ipmi_test_data.py for raw data self.assertEqual(51, cpu_util) self.assertEqual(5, mem_util) self.assertEqual(0, io_util) def test_read_cups_index(self): cups_index = self.nm.read_cups_index() index = node_manager._hex(cups_index["CUPS_Index"]) self.assertEqual(46, index) class TestNodeManager(_Base): def init_test_engine(self): utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) def test_read_power_all(self): power = self.nm.read_power_all() avg_val = node_manager._hex(power["Average_value"]) max_val = node_manager._hex(power["Maximum_value"]) min_val = node_manager._hex(power["Minimum_value"]) cur_val = node_manager._hex(power["Current_value"]) # get NM 2.0 self.assertEqual(3, self.nm.nm_version) # see ipmi_test_data.py for raw data self.assertEqual(87, cur_val) self.assertEqual(3, min_val) self.assertEqual(567, max_val) self.assertEqual(92, avg_val) def test_read_inlet_temperature(self): temperature = self.nm.read_inlet_temperature() avg_val = node_manager._hex(temperature["Average_value"]) max_val = node_manager._hex(temperature["Maximum_value"]) min_val = node_manager._hex(temperature["Minimum_value"]) cur_val = node_manager._hex(temperature["Current_value"]) # see ipmi_test_data.py for raw data self.assertEqual(23, cur_val) self.assertEqual(22, min_val) self.assertEqual(24, max_val) self.assertEqual(23, avg_val) def test_read_airflow(self): airflow = self.nm.read_airflow() self.assertEqual({}, airflow) def test_read_outlet_temperature(self): temperature = self.nm.read_outlet_temperature() self.assertEqual({}, temperature) def test_read_cups_utilization(self): cups_util = self.nm.read_cups_utilization() self.assertEqual({}, cups_util) def test_read_cups_index(self): cups_index = self.nm.read_cups_index() self.assertEqual({}, cups_index) class TestNonNodeManager(_Base): def init_test_engine(self): utils.execute = mock.Mock(side_effect=fake_utils.execute_without_nm) def test_read_power_all(self): # no NM support self.assertEqual(0, self.nm.nm_version) power = self.nm.read_power_all() # Non-Node Manager platform return empty data self.assertEqual({}, power) def test_read_inlet_temperature(self): temperature = self.nm.read_inlet_temperature() # Non-Node Manager platform return empty data self.assertEqual({}, temperature) class ParseSDRFileTestCase(base.BaseTestCase): def setUp(self): super(ParseSDRFileTestCase, self).setUp() self.temp_file = tempfile.NamedTemporaryFile().name def test_parsing_found(self): data = b'\x00\xFF\x00\xFF\x57\x01\x00\x0D\x01\x0A\xB2\x00\xFF' with open(self.temp_file, 'wb') as f: f.write(data) result = node_manager.NodeManager._parse_slave_and_channel( self.temp_file) self.assertEqual(('0a', 'b'), result) def test_parsing_not_found(self): data = b'\x00\xFF\x00\xFF\x52\x01\x80\x0D\x01\x6A\xB7\x00\xFF' with open(self.temp_file, 'wb') as f: f.write(data) result = node_manager.NodeManager._parse_slave_and_channel( self.temp_file) self.assertIsNone(result) ceilometer-10.0.0/ceilometer/tests/unit/test_notification.py0000666000175100017510000005026413236733243024333 0ustar zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Ceilometer notify daemon.""" import time import mock import oslo_messaging from oslo_utils import fileutils import six import yaml from ceilometer import messaging from ceilometer import notification from ceilometer.publisher import test as test_publisher from ceilometer import service from ceilometer.tests import base as tests_base TEST_NOTICE_CTXT = { u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'is_admin': True, u'project_id': u'7c150a59fe714e6f9263774af9688f0e', u'quota_class': None, u'read_deleted': u'no', u'remote_address': u'10.0.2.15', u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'roles': [u'admin'], u'timestamp': u'2012-05-08T20:23:41.425105', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', } TEST_NOTICE_METADATA = { u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'timestamp': u'2012-05-08 20:23:48.028195', } TEST_NOTICE_PAYLOAD = { u'created_at': u'2012-05-08 20:23:41', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'fixed_ips': [{u'address': u'10.0.0.2', u'floating_ips': [], u'meta': {}, u'type': u'fixed', u'version': 4}], u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-08 20:23:47.985999', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', } class BaseNotificationTest(tests_base.BaseTestCase): def run_service(self, srv): srv.run() self.addCleanup(srv.terminate) if srv.conf.notification.workload_partitioning: start = time.time() while time.time() - start < 10: if srv.group_state and srv.pipeline_listener: break # ensure pipeline is set if HA time.sleep(0.1) else: self.fail('Did not start pipeline queues') class TestNotification(BaseNotificationTest): def setUp(self): super(TestNotification, self).setUp() self.CONF = service.prepare_service([], []) self.setup_messaging(self.CONF) self.srv = notification.NotificationService(0, self.CONF) def test_targets(self): self.assertEqual(15, len(self.srv.get_targets())) def test_start_multiple_listeners(self): urls = ["fake://vhost1", "fake://vhost2"] self.CONF.set_override("messaging_urls", urls, group="notification") self.srv.run() self.addCleanup(self.srv.terminate) self.assertEqual(2, len(self.srv.listeners)) @mock.patch('oslo_messaging.get_batch_notification_listener') def test_unique_consumers(self, mock_listener): self.CONF.set_override('notification_control_exchanges', ['dup'] * 2, group='notification') self.run_service(self.srv) # 1 target, 1 listener self.assertEqual(1, len(mock_listener.call_args_list[0][0][1])) self.assertEqual(1, len(self.srv.listeners)) def test_select_pipelines(self): self.CONF.set_override('pipelines', ['event'], group='notification') self.srv.run() self.addCleanup(self.srv.terminate) self.assertEqual(1, len(self.srv.managers)) self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) @mock.patch('ceilometer.notification.LOG') def test_select_pipelines_missing(self, logger): self.CONF.set_override('pipelines', ['meter', 'event', 'bad'], group='notification') self.srv.run() self.addCleanup(self.srv.terminate) self.assertEqual(2, len(self.srv.managers)) logger.error.assert_called_with( 'Could not load the following pipelines: %s', set(['bad'])) class BaseRealNotification(BaseNotificationTest): def setup_pipeline(self, counter_names): pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 5, 'meters': counter_names, 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ['test://'] }] }) if six.PY3: pipeline = pipeline.encode('utf-8') pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, prefix="pipeline", suffix="yaml") return pipeline_cfg_file def setup_event_pipeline(self, event_names): ev_pipeline = yaml.dump({ 'sources': [{ 'name': 'test_event', 'events': event_names, 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'publishers': ['test://'] }] }) if six.PY3: ev_pipeline = ev_pipeline.encode('utf-8') ev_pipeline_cfg_file = fileutils.write_to_tempfile( content=ev_pipeline, prefix="event_pipeline", suffix="yaml") return ev_pipeline_cfg_file def setUp(self): super(BaseRealNotification, self).setUp() self.CONF = service.prepare_service([], []) self.setup_messaging(self.CONF, 'nova') pipeline_cfg_file = self.setup_pipeline(['vcpus', 'memory']) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.expected_samples = 2 ev_pipeline_cfg_file = self.setup_event_pipeline( ['compute.instance.*']) self.expected_events = 1 self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file) self.publisher = test_publisher.TestPublisher(self.CONF, "") def _check_notification_service(self): self.run_service(self.srv) notifier = messaging.get_notifier(self.transport, "compute.vagrant-precise") notifier.info({}, 'compute.instance.create.end', TEST_NOTICE_PAYLOAD) start = time.time() while time.time() - start < 60: if (len(self.publisher.samples) >= self.expected_samples and len(self.publisher.events) >= self.expected_events): break resources = list(set(s.resource_id for s in self.publisher.samples)) self.assertEqual(self.expected_samples, len(self.publisher.samples)) self.assertEqual(self.expected_events, len(self.publisher.events)) self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources) class TestRealNotification(BaseRealNotification): def setUp(self): super(TestRealNotification, self).setUp() self.srv = notification.NotificationService(0, self.CONF) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_service(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self._check_notification_service() @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_service_error_topic(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self.run_service(self.srv) notifier = messaging.get_notifier(self.transport, 'compute.vagrant-precise') notifier.error({}, 'compute.instance.error', TEST_NOTICE_PAYLOAD) start = time.time() while time.time() - start < 60: if len(self.publisher.events) >= self.expected_events: break self.assertEqual(self.expected_events, len(self.publisher.events)) class TestRealNotificationHA(BaseRealNotification): def setUp(self): super(TestRealNotificationHA, self).setUp() self.CONF.set_override('workload_partitioning', True, group='notification') self.CONF.set_override("backend_url", "zake://", group="coordination") self.srv = notification.NotificationService(0, self.CONF) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_service(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self._check_notification_service() @mock.patch.object(oslo_messaging.MessageHandlingServer, 'stop') @mock.patch.object(oslo_messaging.MessageHandlingServer, 'wait') @mock.patch.object(oslo_messaging.MessageHandlingServer, 'start') def test_notification_threads(self, m_listener, m_wait, m_stop): self.CONF.set_override('batch_size', 1, group='notification') self.srv.run() m_listener.assert_called_with( override_pool_size=self.CONF.max_parallel_requests) m_listener.reset_mock() self.CONF.set_override('batch_size', 2, group='notification') self.srv._refresh_agent() m_listener.assert_called_with(override_pool_size=1) @mock.patch('oslo_messaging.get_batch_notification_listener') def test_reset_listener_on_refresh(self, mock_listener): mock_listener.side_effect = [ mock.MagicMock(), # main listener mock.MagicMock(), # pipeline listener mock.MagicMock(), # refresh pipeline listener ] self.run_service(self.srv) listener = self.srv.pipeline_listener self.srv._refresh_agent() self.assertIsNot(listener, self.srv.pipeline_listener) def test_hashring_targets(self): maybe = {"maybe": 0} def _once_over_five(item): maybe["maybe"] += 1 return maybe["maybe"] % 5 == 0 hashring = mock.MagicMock() hashring.belongs_to_self = _once_over_five self.srv.partition_coordinator = pc = mock.MagicMock() pc.join_partitioned_group.return_value = hashring self.run_service(self.srv) topics = [target.topic for target in self.srv.pipeline_listener.targets] self.assertEqual(4, len(topics)) self.assertEqual( {'ceilometer_ipc-sample-4', 'ceilometer_ipc-sample-9', 'ceilometer_ipc-event-4', 'ceilometer_ipc-event-9'}, set(topics)) @mock.patch('oslo_messaging.get_batch_notification_listener') def test_notify_to_relevant_endpoint(self, mock_listener): self.run_service(self.srv) targets = mock_listener.call_args[0][1] self.assertIsNotEmpty(targets) pipe_list = [] for mgr in self.srv.managers: for pipe in mgr.pipelines: pipe_list.append(pipe.name) for pipe in pipe_list: for endpoint in mock_listener.call_args[0][2]: self.assertTrue(hasattr(endpoint, 'filter_rule')) if endpoint.filter_rule.match(None, None, pipe, None, None): break else: self.fail('%s not handled by any endpoint' % pipe) @mock.patch('oslo_messaging.Notifier.sample') def test_broadcast_to_relevant_pipes_only(self, mock_notifier): self.run_service(self.srv) for endpoint in self.srv.listeners[0].dispatcher.endpoints: if (hasattr(endpoint, 'filter_rule') and not endpoint.filter_rule.match(None, None, 'nonmatching.end', None, None)): continue endpoint.info([{ 'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'nonmatching.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertFalse(mock_notifier.called) for endpoint in self.srv.listeners[0].dispatcher.endpoints: if (hasattr(endpoint, 'filter_rule') and not endpoint.filter_rule.match(None, None, 'compute.instance.create.end', None, None)): continue endpoint.info([{ 'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertTrue(mock_notifier.called) self.assertEqual(3, mock_notifier.call_count) self.assertEqual(1, len([i for i in mock_notifier.call_args_list if 'event_type' in i[1]['payload'][0]])) self.assertEqual(2, len([i for i in mock_notifier.call_args_list if 'counter_name' in i[1]['payload'][0]])) class TestRealNotificationMultipleAgents(BaseNotificationTest): def setup_pipeline(self, transformers): pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 5, 'meters': ['vcpus', 'memory'], 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'transformers': transformers, 'publishers': ['test://'] }] }) if six.PY3: pipeline = pipeline.encode('utf-8') pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, prefix="pipeline", suffix="yaml") return pipeline_cfg_file def setup_event_pipeline(self): pipeline = yaml.dump({ 'sources': [], 'sinks': [] }) if six.PY3: pipeline = pipeline.encode('utf-8') pipeline_cfg_file = fileutils.write_to_tempfile( content=pipeline, prefix="event_pipeline", suffix="yaml") return pipeline_cfg_file def setUp(self): super(TestRealNotificationMultipleAgents, self).setUp() self.CONF = service.prepare_service([], []) self.setup_messaging(self.CONF, 'nova') pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) event_pipeline_cfg_file = self.setup_event_pipeline() self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.CONF.set_override("event_pipeline_cfg_file", event_pipeline_cfg_file) self.CONF.set_override("backend_url", "zake://", group="coordination") self.CONF.set_override('workload_partitioning', True, group='notification') self.CONF.set_override('pipeline_processing_queues', 2, group='notification') self.CONF.set_override('check_watchers', 1, group='coordination') self.publisher = test_publisher.TestPublisher(self.CONF, "") self.publisher2 = test_publisher.TestPublisher(self.CONF, "") def _check_notifications(self, fake_publisher_cls): fake_publisher_cls.side_effect = [self.publisher, self.publisher2] maybe = {"srv": 0, "srv2": -1} def _sometimes_srv(item): maybe["srv"] += 1 return (maybe["srv"] % 2) == 0 self.srv = notification.NotificationService(0, self.CONF) self.srv.partition_coordinator = pc = mock.MagicMock() hashring_srv1 = mock.MagicMock() hashring_srv1.belongs_to_self = _sometimes_srv hashring_srv1.ring.nodes = {'id1': mock.Mock()} pc.join_partitioned_group.return_value = hashring_srv1 self.run_service(self.srv) def _sometimes_srv2(item): maybe["srv2"] += 1 return (maybe["srv2"] % 2) == 0 self.srv2 = notification.NotificationService(0, self.CONF) self.srv2.partition_coordinator = pc = mock.MagicMock() hashring = mock.MagicMock() hashring.belongs_to_self = _sometimes_srv2 hashring.ring.nodes = {'id1': mock.Mock(), 'id2': mock.Mock()} self.srv.hashring.ring.nodes = hashring.ring.nodes.copy() pc.join_partitioned_group.return_value = hashring self.run_service(self.srv2) notifier = messaging.get_notifier(self.transport, "compute.vagrant-precise") payload1 = TEST_NOTICE_PAYLOAD.copy() payload1['instance_id'] = '0' notifier.info({}, 'compute.instance.create.end', payload1) payload2 = TEST_NOTICE_PAYLOAD.copy() payload2['instance_id'] = '1' notifier.info({}, 'compute.instance.create.end', payload2) self.expected_samples = 4 with mock.patch('six.moves.builtins.hash', lambda x: int(x)): start = time.time() while time.time() - start < 10: if (len(self.publisher.samples + self.publisher2.samples) >= self.expected_samples and len(self.srv.group_state) == 2): break time.sleep(0.1) self.assertEqual(2, len(self.publisher.samples)) self.assertEqual(2, len(self.publisher2.samples)) self.assertEqual(1, len(set( s.resource_id for s in self.publisher.samples))) self.assertEqual(1, len(set( s.resource_id for s in self.publisher2.samples))) self.assertEqual(2, len(self.srv.group_state)) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_multiple_agents_no_transform(self, fake_publisher_cls): pipeline_cfg_file = self.setup_pipeline([]) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self._check_notifications(fake_publisher_cls) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_multiple_agents_transform(self, fake_publisher_cls): pipeline_cfg_file = self.setup_pipeline( [{ 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }]) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self._check_notifications(fake_publisher_cls) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_multiple_agents_multiple_transform(self, fake_publisher_cls): pipeline_cfg_file = self.setup_pipeline( [{ 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }, { 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }]) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self._check_notifications(fake_publisher_cls) ceilometer-10.0.0/ceilometer/tests/unit/test_bin.py0000666000175100017510000001202413236733243022405 0ustar zuulzuul00000000000000# Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess import time from oslo_utils import fileutils import six from ceilometer.tests import base class BinTestCase(base.BaseTestCase): def setUp(self): super(BinTestCase, self).setUp() content = ("[DEFAULT]\n" "transport_url = fake://\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') def tearDown(self): super(BinTestCase, self).tearDown() os.remove(self.tempfile) def test_upgrade_run(self): subp = subprocess.Popen(['ceilometer-upgrade', '--skip-gnocchi-resource-types', "--config-file=%s" % self.tempfile]) self.assertEqual(0, subp.wait()) class BinSendSampleTestCase(base.BaseTestCase): def setUp(self): super(BinSendSampleTestCase, self).setUp() pipeline_cfg_file = self.path_get( 'ceilometer/pipeline/data/pipeline.yaml') content = ("[DEFAULT]\n" "transport_url = fake://\n" "pipeline_cfg_file={0}\n".format(pipeline_cfg_file)) if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') def tearDown(self): super(BinSendSampleTestCase, self).tearDown() os.remove(self.tempfile) def test_send_counter_run(self): subp = subprocess.Popen(['ceilometer-send-sample', "--config-file=%s" % self.tempfile, "--sample-resource=someuuid", "--sample-name=mycounter"]) self.assertEqual(0, subp.wait()) class BinCeilometerPollingServiceTestCase(base.BaseTestCase): def setUp(self): super(BinCeilometerPollingServiceTestCase, self).setUp() self.tempfile = None self.subp = None def tearDown(self): if self.subp: try: self.subp.kill() except OSError: pass os.remove(self.tempfile) super(BinCeilometerPollingServiceTestCase, self).tearDown() def test_starting_with_duplication_namespaces(self): content = ("[DEFAULT]\n" "transport_url = fake://\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') self.subp = subprocess.Popen(['ceilometer-polling', "--config-file=%s" % self.tempfile, "--polling-namespaces", "compute", "compute"], stderr=subprocess.PIPE) expected = (b'Duplicated values: [\'compute\', \'compute\'] ' b'found in CLI options, auto de-duplicated') # NOTE(gordc): polling process won't quit so wait for a bit and check start = time.time() while time.time() - start < 5: output = self.subp.stderr.readline() if expected in output: break else: self.fail('Did not detect expected warning: %s' % expected) def test_polling_namespaces_invalid_value_in_config(self): content = ("[DEFAULT]\n" "transport_url = fake://\n" "polling_namespaces = ['central']\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') self.subp = subprocess.Popen( ["ceilometer-polling", "--config-file=%s" % self.tempfile], stderr=subprocess.PIPE) __, err = self.subp.communicate() self.assertIn(b"Exception: Valid values are ['compute', 'central', " b"'ipmi'], but found [\"['central']\"]", err) ceilometer-10.0.0/ceilometer/tests/unit/meter/0000775000175100017510000000000013236733440021336 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/meter/test_meter_plugins.py0000666000175100017510000000617313236733243025636 0ustar zuulzuul00000000000000# # Copyright 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from ceilometer.event import trait_plugins class TestTimedeltaPlugin(base.BaseTestCase): def setUp(self): super(TestTimedeltaPlugin, self).setUp() self.plugin = trait_plugins.TimedeltaPlugin() def test_timedelta_transformation(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T16:04:32')] value = self.plugin.trait_value(match_list) self.assertEqual(3600, value) def test_timedelta_missing_field(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32')] with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: self.assertIsNone(self.plugin.trait_value(match_list)) log.warning.assert_called_once_with( 'Timedelta plugin is required two timestamp fields to create ' 'timedelta value.') def test_timedelta_exceed_field(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T16:04:32'), ('test.timestamp3', '2016-03-02T16:10:32')] with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: self.assertIsNone(self.plugin.trait_value(match_list)) log.warning.assert_called_once_with( 'Timedelta plugin is required two timestamp fields to create ' 'timedelta value.') def test_timedelta_invalid_timestamp(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T15:004:32')] with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: self.assertIsNone(self.plugin.trait_value(match_list)) msg = log.warning._mock_call_args[0][0] self.assertTrue(msg.startswith('Failed to parse date from set ' 'fields, both fields ') ) def test_timedelta_reverse_timestamp_order(self): match_list = [('test.timestamp1', '2016-03-02T15:15:32'), ('test.timestamp2', '2016-03-02T15:10:32')] value = self.plugin.trait_value(match_list) self.assertEqual(300, value) def test_timedelta_precise_difference(self): match_list = [('test.timestamp1', '2016-03-02T15:10:32.786893'), ('test.timestamp2', '2016-03-02T15:10:32.786899')] value = self.plugin.trait_value(match_list) self.assertEqual(0.000006, value) ceilometer-10.0.0/ceilometer/tests/unit/meter/test_notifications.py0000666000175100017510000011751413236733243025634 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer.meter.notifications """ import copy import fixtures import mock import six import yaml from oslo_utils import encodeutils from oslo_utils import fileutils from ceilometer import declarative from ceilometer.meter import notifications from ceilometer import service as ceilometer_service from ceilometer.tests import base as test NOTIFICATION = { 'event_type': u'test.create', 'metadata': {'timestamp': u'2015-06-19T09:19:35.786893', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': {u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', u'resource_id': u'bea70e51c7340cb9d555b15cbfcaec23', u'timestamp': u'2015-06-19T09:19:35.785330', u'created_at': u'2015-06-19T09:25:35.785330', u'launched_at': u'2015-06-19T09:25:40.785330', u'message_signature': u'fake_signature1', u'resource_metadata': {u'foo': u'bar'}, u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', u'volume': 1.0, u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', }, 'ctxt': {u'tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', u'request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', u'user': u'e1d870e51c7340cb9d555b15cbfcaec2'}, 'publisher_id': "foo123" } USER_META = { 'event_type': u'test.create', 'metadata': {'timestamp': u'2015-06-19T09:19:35.786893', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': {u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', u'resource_id': u'bea70e51c7340cb9d555b15cbfcaec23', u'timestamp': u'2015-06-19T09:19:35.785330', u'created_at': u'2015-06-19T09:25:35.785330', u'launched_at': u'2015-06-19T09:25:40.785330', u'message_signature': u'fake_signature1', u'resource_metadata': {u'foo': u'bar'}, u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', u'volume': 1.0, u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', u'metadata': {u'metering.xyz': u'abc', u'ignore': u'this'}, }, 'ctxt': {u'tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', u'request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', u'user': u'e1d870e51c7340cb9d555b15cbfcaec2'}, 'publisher_id': "foo123" } MIDDLEWARE_EVENT = { u'ctxt': {u'request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', u'quota_class': None, u'service_catalog': [], u'auth_token': None, u'user_id': None, u'is_admin': True, u'user': None, u'remote_address': None, u'roles': [], u'timestamp': u'2013-07-29T06:51:34.348091', u'project_name': None, u'read_deleted': u'no', u'tenant': None, u'instance_lock_checked': False, u'project_id': None, u'user_name': None}, u'event_type': u'objectstore.http.request', u'publisher_id': u'ceilometermiddleware', u'metadata': {u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', u'timestamp': u'2013-07-29T06:51:34.474815+00:00', u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2'}, u'payload': { 'typeURI': 'http: //schemas.dmtf.org/cloud/audit/1.0/event', 'eventTime': '2013-07-29T06:51:34.474815+00:00', 'target': { 'action': 'get', 'typeURI': 'service/storage/object', 'id': 'account', 'metadata': { 'path': '/1.0/CUSTOM_account/container/obj', 'version': '1.0', 'container': 'container', 'object': 'obj' } }, 'observer': { 'id': 'target' }, 'eventType': 'activity', 'measurements': [ { 'metric': { 'metricId': 'openstack: uuid', 'name': 'storage.objects.outgoing.bytes', 'unit': 'B' }, 'result': 28 }, { 'metric': { 'metricId': 'openstack: uuid2', 'name': 'storage.objects.incoming.bytes', 'unit': 'B' }, 'result': 1 } ], 'initiator': { 'typeURI': 'service/security/account/user', 'project_id': None, 'id': 'openstack: 288f6260-bf37-4737-a178-5038c84ba244' }, 'action': 'read', 'outcome': 'success', 'id': 'openstack: 69972bb6-14dd-46e4-bdaf-3148014363dc' } } FULL_MULTI_MSG = { 'event_type': u'full.sample', 'payload': [{ u'counter_name': u'instance1', u'user_id': u'user1', u'resource_id': u'res1', u'counter_unit': u'ns', u'counter_volume': 28.0, u'project_id': u'proj1', u'counter_type': u'gauge' }, { u'counter_name': u'instance2', u'user_id': u'user2', u'resource_id': u'res2', u'counter_unit': u'%', u'counter_volume': 1.0, u'project_id': u'proj2', u'counter_type': u'delta' }], u'ctxt': {u'domain': None, u'request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', u'auth_token': None, u'read_only': False, u'resource_uuid': None, u'user_identity': u'fake_user_identity---', u'show_deleted': False, u'tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', u'is_admin': True, u'project_domain': None, u'user': u'e1d870e51c7340cb9d555b15cbfcaec2', u'user_domain': None}, 'publisher_id': u'ceilometer.api', 'metadata': {'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e', 'timestamp': u'2015-06-19T09:19:35.786893'}, } METRICS_UPDATE = { u'event_type': u'compute.metrics.update', u'payload': { u'metrics': [ {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.frequency', 'value': 1600, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.user.time', 'value': 17421440000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.kernel.time', 'value': 7852600000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.idle.time', 'value': 1307374400000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.iowait.time', 'value': 11697470000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.user.percent', 'value': 0.012959045637294348, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.kernel.percent', 'value': 0.005841204961898534, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.idle.percent', 'value': 0.9724985141658965, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.iowait.percent', 'value': 0.008701235234910634, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': u'2013-07-29T06:51:34.472416', 'name': 'cpu.percent', 'value': 0.027501485834103515, 'source': 'libvirt.LibvirtDriver'}], u'nodename': u'tianst.sh.intel.com', u'host': u'tianst', u'host_id': u'10.0.1.1'}, u'publisher_id': u'compute.tianst.sh.intel.com', u'metadata': {u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', u'timestamp': u'2013-07-29 06:51:34.474815', u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2'}, u'ctxt': {u'request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', u'quota_class': None, u'service_catalog': [], u'auth_token': None, u'user_id': None, u'is_admin': True, u'user': None, u'remote_address': None, u'roles': [], u'timestamp': u'2013-07-29T06:51:34.348091', u'project_name': None, u'read_deleted': u'no', u'tenant': None, u'instance_lock_checked': False, u'project_id': None, u'user_name': None} } class TestMeterDefinition(test.BaseTestCase): def test_config_definition(self): cfg = dict(name="test", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id") handler = notifications.MeterDefinition(cfg, mock.Mock(), mock.Mock()) self.assertTrue(handler.match_type("test.create")) sample = list(handler.to_samples(NOTIFICATION))[0] self.assertEqual(1.0, sample["volume"]) self.assertEqual("bea70e51c7340cb9d555b15cbfcaec23", sample["resource_id"]) self.assertEqual("30be1fc9a03c4e94ab05c403a8a377f2", sample["project_id"]) def test_config_required_missing_fields(self): cfg = dict() try: notifications.MeterDefinition(cfg, mock.Mock(), mock.Mock()) except declarative.DefinitionException as e: self.assertIn("Required fields ['name', 'type', 'event_type'," " 'unit', 'volume', 'resource_id']" " not specified", encodeutils.exception_to_unicode(e)) def test_bad_type_cfg_definition(self): cfg = dict(name="test", type="foo", event_type="bar.create", unit="foo", volume="bar", resource_id="bea70e51c7340cb9d555b15cbfcaec23") try: notifications.MeterDefinition(cfg, mock.Mock(), mock.Mock()) except declarative.DefinitionException as e: self.assertIn("Invalid type foo specified", encodeutils.exception_to_unicode(e)) class TestMeterProcessing(test.BaseTestCase): def setUp(self): super(TestMeterProcessing, self).setUp() self.CONF = ceilometer_service.prepare_service([], []) self.path = self.useFixture(fixtures.TempDir()).path self.handler = notifications.ProcessMeterNotifications( self.CONF, mock.Mock()) def _load_meter_def_file(self, cfgs=None): self.CONF.set_override('meter_definitions_dirs', [self.path], group='meter') cfgs = cfgs or [] if not isinstance(cfgs, list): cfgs = [cfgs] meter_cfg_files = list() for cfg in cfgs: if six.PY3: cfg = cfg.encode('utf-8') meter_cfg_files.append(fileutils.write_to_tempfile(content=cfg, path=self.path, prefix="meters", suffix=".yaml")) self.handler.definitions = self.handler._load_definitions() @mock.patch('ceilometer.meter.notifications.LOG') def test_bad_meter_definition_skip(self, LOG): cfg = yaml.dump( {'metric': [dict(name="good_test_1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="bad_test_2", type="bad_type", event_type="bar.create", unit="foo", volume="bar", resource_id="bea70e51c7340cb9d555b15cbfcaec23"), dict(name="good_test_3", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) self.assertEqual(2, len(self.handler.definitions)) args, kwargs = LOG.error.call_args_list[0] self.assertEqual("Error loading meter definition: %s", args[0]) self.assertTrue(args[1].endswith("Invalid type bad_type specified")) def test_jsonpath_values_parsed(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('test1', s1['name']) self.assertEqual(1.0, s1['volume']) self.assertEqual('bea70e51c7340cb9d555b15cbfcaec23', s1['resource_id']) self.assertEqual('30be1fc9a03c4e94ab05c403a8a377f2', s1['project_id']) def test_multiple_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) data = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(2, len(data)) expected_names = ['test1', 'test2'] for s in data: self.assertIn(s.as_dict()['name'], expected_names) def test_unmatched_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.update", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(0, len(c)) def test_regex_match_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) def test_default_timestamp(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", multi="name")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(MIDDLEWARE_EVENT['metadata']['timestamp'], s1['timestamp']) def test_custom_timestamp(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", multi="name", timestamp='$.payload.eventTime')]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(MIDDLEWARE_EVENT['payload']['eventTime'], s1['timestamp']) def test_custom_timestamp_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="'prefix-' + $.payload.nodename", timestamp="$.payload.metrics" "[?(@.name='cpu.frequency')].timestamp")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual("2013-07-29T06:51:34.472416+00:00", s1['timestamp']) def test_default_metadata(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = NOTIFICATION['payload'].copy() meta['host'] = NOTIFICATION['publisher_id'] meta['event_type'] = NOTIFICATION['event_type'] self.assertEqual(meta, s1['resource_metadata']) def test_datetime_plugin(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="gauge", unit="sec", volume={"fields": ["$.payload.created_at", "$.payload.launched_at"], "plugin": "timedelta"}, resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(5.0, s1['volume']) def test_custom_metadata(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id", metadata={'proj': '$.payload.project_id', 'dict': '$.payload.resource_metadata'})]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = {'proj': s1['project_id'], 'dict': NOTIFICATION['payload']['resource_metadata']} self.assertEqual(meta, s1['resource_metadata']) def test_user_meta(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id", user_metadata="$.payload.metadata",)]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(USER_META)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = {'user_metadata': {'xyz': 'abc'}} self.assertEqual(meta, s1['resource_metadata']) def test_user_meta_and_custom(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id", user_metadata="$.payload.metadata", metadata={'proj': '$.payload.project_id'})]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(USER_META)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = {'user_metadata': {'xyz': 'abc'}, 'proj': s1['project_id']} self.assertEqual(meta, s1['resource_metadata']) def test_multi_match_event_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(2, len(c)) def test_multi_meter_payload(self): cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "volume", "unit"])]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(MIDDLEWARE_EVENT)) self.assertEqual(2, len(c)) s1 = c[0].as_dict() self.assertEqual('storage.objects.outgoing.bytes', s1['name']) self.assertEqual(28, s1['volume']) self.assertEqual('B', s1['unit']) s2 = c[1].as_dict() self.assertEqual('storage.objects.incoming.bytes', s2['name']) self.assertEqual(1, s2['volume']) self.assertEqual('B', s2['unit']) def test_multi_meter_payload_single(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit"])]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('storage.objects.outgoing.bytes', s1['name']) self.assertEqual(28, s1['volume']) self.assertEqual('B', s1['unit']) def test_multi_meter_payload_none(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup="name")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(0, len(c)) def test_multi_meter_payload_all_multi(self): cfg = yaml.dump( {'metric': [dict(name="$.payload.[*].counter_name", event_type="full.sample", type="$.payload.[*].counter_type", unit="$.payload.[*].counter_unit", volume="$.payload.[*].counter_volume", resource_id="$.payload.[*].resource_id", project_id="$.payload.[*].project_id", user_id="$.payload.[*].user_id", lookup=['name', 'type', 'unit', 'volume', 'resource_id', 'project_id', 'user_id'])]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(FULL_MULTI_MSG)) self.assertEqual(2, len(c)) msg = FULL_MULTI_MSG['payload'] for idx, val in enumerate(c): s1 = val.as_dict() self.assertEqual(msg[idx]['counter_name'], s1['name']) self.assertEqual(msg[idx]['counter_volume'], s1['volume']) self.assertEqual(msg[idx]['counter_unit'], s1['unit']) self.assertEqual(msg[idx]['counter_type'], s1['type']) self.assertEqual(msg[idx]['resource_id'], s1['resource_id']) self.assertEqual(msg[idx]['project_id'], s1['project_id']) self.assertEqual(msg[idx]['user_id'], s1['user_id']) @mock.patch('ceilometer.meter.notifications.LOG') def test_multi_meter_payload_invalid_missing(self, LOG): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][0]['result'] del event['payload']['measurements'][1]['result'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit", "volume"])]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(0, len(c)) LOG.warning.assert_called_with('Only 0 fetched meters contain ' '"volume" field instead of 2.') @mock.patch('ceilometer.meter.notifications.LOG') def test_multi_meter_payload_invalid_short(self, LOG): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][0]['result'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit", "volume"])]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(0, len(c)) LOG.warning.assert_called_with('Only 1 fetched meters contain ' '"volume" field instead of 2.') def test_arithmetic_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.percent', event_type="compute.metrics.update", type='gauge', unit="percent", volume="$.payload.metrics[" "?(@.name='cpu.percent')].value" " * 100", resource_id="$.payload.host + '_'" " + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.percent', s1['name']) self.assertEqual(2.7501485834103514, s1['volume']) self.assertEqual("tianst_tianst.sh.intel.com", s1['resource_id']) def test_string_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="$.payload.host + '_'" " + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual(1600, s1['volume']) self.assertEqual("tianst_tianst.sh.intel.com", s1['resource_id']) def test_prefix_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="'prefix-' + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual(1600, s1['volume']) self.assertEqual("prefix-tianst.sh.intel.com", s1['resource_id']) def test_duplicate_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) def test_multi_files_multi_meters(self): cfg1 = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) cfg2 = yaml.dump( {'metric': [dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file([cfg1, cfg2]) data = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(2, len(data)) expected_names = ['test1', 'test2'] for s in data: self.assertIn(s.as_dict()['name'], expected_names) def test_multi_files_duplicate_meter(self): cfg1 = yaml.dump( {'metric': [dict(name="test", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) cfg2 = yaml.dump( {'metric': [dict(name="test", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file([cfg1, cfg2]) data = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(data)) self.assertEqual(data[0].as_dict()['name'], 'test') def test_multi_files_empty_payload(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'] cfg1 = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup="name")]}) cfg2 = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup="name")]}) self._load_meter_def_file([cfg1, cfg2]) data = list(self.handler.build_sample(event)) self.assertEqual(0, len(data)) def test_multi_files_unmatched_meter(self): cfg1 = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) cfg2 = yaml.dump( {'metric': [dict(name="test2", event_type="test.update", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file([cfg1, cfg2]) data = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(data)) self.assertEqual(data[0].as_dict()['name'], 'test1') @mock.patch('ceilometer.meter.notifications.LOG') def test_multi_files_bad_meter(self, LOG): cfg1 = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="bad_test", type="bad_type", event_type="bar.create", unit="foo", volume="bar", resource_id="bea70e51c7340cb9d555b15cbfcaec23")]}) cfg2 = yaml.dump( {'metric': [dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file([cfg1, cfg2]) data = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(2, len(data)) expected_names = ['test1', 'test2'] for s in data: self.assertIn(s.as_dict()['name'], expected_names) args, kwargs = LOG.error.call_args_list[0] self.assertEqual("Error loading meter definition: %s", args[0]) self.assertTrue(args[1].endswith("Invalid type bad_type specified")) ceilometer-10.0.0/ceilometer/tests/unit/meter/__init__.py0000666000175100017510000000000013236733243023440 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/pipeline_base.py0000666000175100017510000025212013236733243023400 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import datetime import traceback import unittest import fixtures import mock import monotonic from oslo_utils import timeutils import six from stevedore import extension from ceilometer.pipeline import base as pipe_base from ceilometer.pipeline import sample as pipeline from ceilometer import publisher from ceilometer.publisher import test as test_publisher from ceilometer import sample from ceilometer import service from ceilometer.tests import base from ceilometer import transformer from ceilometer.transformer import accumulator from ceilometer.transformer import arithmetic from ceilometer.transformer import conversions @six.add_metaclass(abc.ABCMeta) class BasePipelineTestCase(base.BaseTestCase): def fake_transform_manager(self): class_name_ext = { 'update': self.TransformerClass, 'except': self.TransformerClassException, 'drop': self.TransformerClassDrop, 'accumulator': accumulator.TransformerAccumulator, 'aggregator': conversions.AggregatorTransformer, 'unit_conversion': conversions.ScalingTransformer, 'rate_of_change': conversions.RateOfChangeTransformer, 'arithmetic': arithmetic.ArithmeticTransformer, 'delta': conversions.DeltaTransformer, } return extension.ExtensionManager.make_test_instance([ extension.Extension(name, None, transformer, None) for name, transformer in class_name_ext.items()]) def get_publisher(self, conf, url, namespace=''): fake_drivers = {'test://': test_publisher.TestPublisher, 'new://': test_publisher.TestPublisher, 'except://': self.PublisherClassException} return fake_drivers[url](conf, url) class PublisherClassException(publisher.ConfigPublisherBase): def publish_samples(self, samples): raise Exception() def publish_events(self, events): raise Exception() class TransformerClass(transformer.TransformerBase): samples = [] grouping_keys = ['counter_name'] def __init__(self, append_name='_update'): self.__class__.samples = [] self.append_name = append_name @staticmethod def flush(): return [] def handle_sample(self, counter): self.__class__.samples.append(counter) newname = getattr(counter, 'name') + self.append_name return sample.Sample( name=newname, type=counter.type, volume=counter.volume, unit=counter.unit, user_id=counter.user_id, project_id=counter.project_id, resource_id=counter.resource_id, timestamp=counter.timestamp, resource_metadata=counter.resource_metadata, ) class TransformerClassDrop(transformer.TransformerBase): samples = [] grouping_keys = ['resource_id'] def __init__(self): self.__class__.samples = [] def handle_sample(self, counter): self.__class__.samples.append(counter) class TransformerClassException(object): grouping_keys = ['resource_id'] @staticmethod def handle_sample(counter): raise Exception() def setUp(self): super(BasePipelineTestCase, self).setUp() self.CONF = service.prepare_service([], []) self.test_counter = sample.Sample( name='a', type=sample.TYPE_GAUGE, volume=1, unit='B', user_id="test_user", project_id="test_proj", resource_id="test_resource", timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ) self.useFixture(fixtures.MockPatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) self.useFixture(fixtures.MockPatchObject( pipeline.SamplePipelineManager, 'get_transform_manager', side_effect=self.fake_transform_manager)) self._setup_pipeline_cfg() self._reraise_exception = True self.useFixture(fixtures.MockPatch( 'ceilometer.pipeline.base.LOG.exception', side_effect=self._handle_reraise_exception)) def _handle_reraise_exception(self, *args, **kwargs): if self._reraise_exception: raise Exception(traceback.format_exc()) @abc.abstractmethod def _setup_pipeline_cfg(self): """Setup the appropriate form of pipeline config.""" @abc.abstractmethod def _augment_pipeline_cfg(self): """Augment the pipeline config with an additional element.""" @abc.abstractmethod def _break_pipeline_cfg(self): """Break the pipeline config with a malformed element.""" @abc.abstractmethod def _dup_pipeline_name_cfg(self): """Break the pipeline config with duplicate pipeline name.""" @abc.abstractmethod def _set_pipeline_cfg(self, field, value): """Set a field to a value in the pipeline config.""" @abc.abstractmethod def _extend_pipeline_cfg(self, field, value): """Extend an existing field in the pipeline config with a value.""" @abc.abstractmethod def _unset_pipeline_cfg(self, field): """Clear an existing field in the pipeline config.""" def _build_and_set_new_pipeline(self): name = self.cfg2file(self.pipeline_cfg) self.CONF.set_override('pipeline_cfg_file', name) def _exception_create_pipelinemanager(self): self._build_and_set_new_pipeline() self.assertRaises(pipe_base.PipelineException, pipeline.SamplePipelineManager, self.CONF) def test_no_meters(self): self._unset_pipeline_cfg('meters') self._exception_create_pipelinemanager() def test_no_transformers(self): self._unset_pipeline_cfg('transformers') self._build_and_set_new_pipeline() pipeline.SamplePipelineManager(self.CONF) def test_no_name(self): self._unset_pipeline_cfg('name') self._exception_create_pipelinemanager() def test_no_publishers(self): self._unset_pipeline_cfg('publishers') self._exception_create_pipelinemanager() def test_check_counters_include_exclude_same(self): counter_cfg = ['a', '!a'] self._set_pipeline_cfg('meters', counter_cfg) self._exception_create_pipelinemanager() def test_check_counters_include_exclude(self): counter_cfg = ['a', '!b'] self._set_pipeline_cfg('meters', counter_cfg) self._exception_create_pipelinemanager() def test_check_counters_wildcard_included(self): counter_cfg = ['a', '*'] self._set_pipeline_cfg('meters', counter_cfg) self._exception_create_pipelinemanager() def test_check_publishers_invalid_publisher(self): publisher_cfg = ['test_invalid'] self._set_pipeline_cfg('publishers', publisher_cfg) def test_check_transformer_invalid_transformer(self): transformer_cfg = [ {'name': "test_invalid", 'parameters': {}} ] self._set_pipeline_cfg('transformers', transformer_cfg) self._exception_create_pipelinemanager() def test_publisher_transformer_invoked(self): self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) def test_multiple_included_counters(self): counter_cfg = ['a', 'b'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([self.test_counter]) self.assertEqual(2, len(publisher.samples)) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('b_update', getattr(publisher.samples[1], "name")) @mock.patch('ceilometer.pipeline.sample.LOG') def test_none_volume_counter(self, LOG): self._set_pipeline_cfg('meters', ['empty_volume']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) publisher = pipeline_manager.pipelines[0].publishers[0] test_s = sample.Sample( name='empty_volume', type=self.test_counter.type, volume=None, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([test_s]) LOG.warning.assert_called_once_with( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has no volume (volume: %(counter_volume)s), the ' 'sample will be dropped' % {'counter_name': test_s.name, 'resource_id': test_s.resource_id, 'timestamp': test_s.timestamp, 'counter_volume': test_s.volume}) self.assertEqual(0, len(publisher.samples)) @mock.patch('ceilometer.pipeline.sample.LOG') def test_fake_volume_counter(self, LOG): self._set_pipeline_cfg('meters', ['fake_volume']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) publisher = pipeline_manager.pipelines[0].publishers[0] test_s = sample.Sample( name='fake_volume', type=self.test_counter.type, volume='fake_value', unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([test_s]) LOG.warning.assert_called_once_with( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has volume which is not a number ' '(volume: %(counter_volume)s), the sample will be dropped' % {'counter_name': test_s.name, 'resource_id': test_s.resource_id, 'timestamp': test_s.timestamp, 'counter_volume': test_s.volume}) self.assertEqual(0, len(publisher.samples)) def test_counter_dont_match(self): counter_cfg = ['nomatch'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) self.assertEqual(0, publisher.calls) def test_wildcard_counter(self): counter_cfg = ['*'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) def test_wildcard_excluded_counters(self): counter_cfg = ['*', '!a'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_meter('a')) def test_wildcard_excluded_counters_not_excluded(self): counter_cfg = ['*', '!b'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) def test_all_excluded_counters_not_excluded(self): counter_cfg = ['!b', '!c'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) def test_all_excluded_counters_is_excluded(self): counter_cfg = ['!a', '!c'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_meter('a')) self.assertTrue(pipe.source.support_meter('b')) self.assertFalse(pipe.source.support_meter('c')) def test_wildcard_and_excluded_wildcard_counters(self): counter_cfg = ['*', '!disk.*'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_meter('disk.read.bytes')) self.assertTrue(pipe.source.support_meter('cpu')) def test_included_counter_and_wildcard_counters(self): counter_cfg = ['cpu', 'disk.*'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertTrue(pipe.source.support_meter('disk.read.bytes')) self.assertTrue(pipe.source.support_meter('cpu')) self.assertFalse(pipe.source.support_meter('instance')) def test_excluded_counter_and_excluded_wildcard_counters(self): counter_cfg = ['!cpu', '!disk.*'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_meter('disk.read.bytes')) self.assertFalse(pipe.source.support_meter('cpu')) self.assertTrue(pipe.source.support_meter('instance')) def test_multiple_pipeline(self): self._augment_pipeline_cfg() self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, publisher.calls) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) new_publisher = pipeline_manager.pipelines[1].publishers[0] self.assertEqual(1, len(new_publisher.samples)) self.assertEqual(1, new_publisher.calls) self.assertEqual('b_new', getattr(new_publisher.samples[0], "name")) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) self.assertEqual('b', getattr(self.TransformerClass.samples[1], "name")) def test_multiple_pipeline_exception(self): self._reraise_exception = False self._break_pipeline_cfg() self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, publisher.calls) self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], "name")) self.assertEqual('b', getattr(self.TransformerClass.samples[1], "name")) def test_none_transformer_pipeline(self): self._set_pipeline_cfg('transformers', None) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, publisher.calls) self.assertEqual('a', getattr(publisher.samples[0], 'name')) def test_empty_transformer_pipeline(self): self._set_pipeline_cfg('transformers', []) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, publisher.calls) self.assertEqual('a', getattr(publisher.samples[0], 'name')) def test_multiple_transformer_same_class(self): transformer_cfg = [ { 'name': 'update', 'parameters': {} }, { 'name': 'update', 'parameters': {} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, publisher.calls) self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update_update', getattr(publisher.samples[0], 'name')) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], 'name')) self.assertEqual('a_update', getattr(self.TransformerClass.samples[1], 'name')) def test_multiple_transformer_same_class_different_parameter(self): transformer_cfg = [ { 'name': 'update', 'parameters': { "append_name": "_update", } }, { 'name': 'update', 'parameters': { "append_name": "_new", } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) self.assertEqual(2, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], 'name')) self.assertEqual('a_update', getattr(self.TransformerClass.samples[1], 'name')) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name')) def test_multiple_transformer_drop_transformer(self): transformer_cfg = [ { 'name': 'update', 'parameters': { "append_name": "_update", } }, { 'name': 'drop', 'parameters': {} }, { 'name': 'update', 'parameters': { "append_name": "_new", } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) self.assertEqual(1, len(self.TransformerClass.samples)) self.assertEqual('a', getattr(self.TransformerClass.samples[0], 'name')) self.assertEqual(1, len(self.TransformerClassDrop.samples)) self.assertEqual('a_update', getattr(self.TransformerClassDrop.samples[0], 'name')) def test_multiple_publisher(self): self._set_pipeline_cfg('publishers', ['test://', 'new://']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(new_publisher.samples)) self.assertEqual('a_update', getattr(new_publisher.samples[0], 'name')) self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) def test_multiple_publisher_isolation(self): self._reraise_exception = False self._set_pipeline_cfg('publishers', ['except://', 'new://']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(new_publisher.samples)) self.assertEqual('a_update', getattr(new_publisher.samples[0], 'name')) def test_multiple_counter_pipeline(self): self._set_pipeline_cfg('meters', ['a', 'b']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter, sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, )]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(2, len(publisher.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) self.assertEqual('b_update', getattr(publisher.samples[1], 'name')) def test_flush_pipeline_cache(self): CACHE_SIZE = 10 extra_transformer_cfg = [ { 'name': 'accumulator', 'parameters': { 'size': CACHE_SIZE, } }, { 'name': 'update', 'parameters': { 'append_name': '_new' } }, ] self._extend_pipeline_cfg('transformers', extra_transformer_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data(self.test_counter) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.flush() self.assertEqual(0, len(publisher.samples)) pipe.publish_data(self.test_counter) pipe.flush() self.assertEqual(0, len(publisher.samples)) for i in range(CACHE_SIZE - 2): pipe.publish_data(self.test_counter) pipe.flush() self.assertEqual(CACHE_SIZE, len(publisher.samples)) self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name')) def test_flush_pipeline_cache_multiple_counter(self): CACHE_SIZE = 3 extra_transformer_cfg = [ { 'name': 'accumulator', 'parameters': { 'size': CACHE_SIZE } }, { 'name': 'update', 'parameters': { 'append_name': '_new' } }, ] self._extend_pipeline_cfg('transformers', extra_transformer_cfg) self._set_pipeline_cfg('meters', ['a', 'b']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter, sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, )]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) with pipeline_manager.publisher() as p: p([self.test_counter]) self.assertEqual(CACHE_SIZE, len(publisher.samples)) self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name')) self.assertEqual('b_update_new', getattr(publisher.samples[1], 'name')) def test_flush_pipeline_cache_before_publisher(self): extra_transformer_cfg = [{ 'name': 'accumulator', 'parameters': {} }] self._extend_pipeline_cfg('transformers', extra_transformer_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] publisher = pipe.publishers[0] pipe.publish_data(self.test_counter) self.assertEqual(0, len(publisher.samples)) pipe.flush() self.assertEqual(1, len(publisher.samples)) self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) def test_global_unit_conversion(self): scale = 'volume / ((10**6) * 60)' transformer_cfg = [ { 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': scale}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['cpu']) counters = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=1200000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ), ] self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data(counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) pipe.flush() self.assertEqual(1, len(publisher.samples)) cpu_mins = publisher.samples[-1] self.assertEqual('cpu_mins', getattr(cpu_mins, 'name')) self.assertEqual('min', getattr(cpu_mins, 'unit')) self.assertEqual(sample.TYPE_CUMULATIVE, getattr(cpu_mins, 'type')) self.assertEqual(20, getattr(cpu_mins, 'volume')) # FIXME(sileht): Since the pipeline configuration is loaded from a file # this tests won't pass anymore because of encoding issue. @unittest.skip("fixme: unicode failure") def test_unit_identified_source_unit_conversion(self): transformer_cfg = [ { 'name': 'unit_conversion', 'parameters': { 'source': {'unit': '°C'}, 'target': {'unit': '°F', 'scale': '(volume * 1.8) + 32'}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['core_temperature', 'ambient_temperature']) counters = [ sample.Sample( name='core_temperature', type=sample.TYPE_GAUGE, volume=36.0, unit='°C', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ), sample.Sample( name='ambient_temperature', type=sample.TYPE_GAUGE, volume=88.8, unit='°F', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ), ] self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data(counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(2, len(publisher.samples)) core_temp = publisher.samples[0] self.assertEqual('core_temperature', getattr(core_temp, 'name')) self.assertEqual('°F', getattr(core_temp, 'unit')) self.assertEqual(96.8, getattr(core_temp, 'volume')) amb_temp = publisher.samples[1] self.assertEqual('ambient_temperature', getattr(amb_temp, 'name')) self.assertEqual('°F', getattr(amb_temp, 'unit')) self.assertEqual(88.8, getattr(amb_temp, 'volume')) self.assertEqual(96.8, getattr(core_temp, 'volume')) def _do_test_rate_of_change_conversion(self, prev, curr, type, expected, offset=1, weight=None): s = ("(resource_metadata.user_metadata.autoscaling_weight or 1.0)" "* (resource_metadata.non.existent or 1.0)" "* (100.0 / (10**9 * (resource_metadata.cpu_number or 1)))") transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': {}, 'target': {'name': 'cpu_util', 'unit': '%', 'type': sample.TYPE_GAUGE, 'scale': s}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['cpu']) now = datetime.datetime.utcnow() later = now + datetime.timedelta(minutes=offset) um = {'autoscaling_weight': weight} if weight else {} counters = [ sample.Sample( name='cpu', type=type, volume=prev, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=now.isoformat(), resource_metadata={'cpu_number': 4, 'user_metadata': um}, ), sample.Sample( name='cpu', type=type, volume=prev, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource2', timestamp=now.isoformat(), resource_metadata={'cpu_number': 2, 'user_metadata': um}, ), sample.Sample( name='cpu', type=type, volume=curr, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=later.isoformat(), resource_metadata={'cpu_number': 4, 'user_metadata': um}, ), sample.Sample( name='cpu', type=type, volume=curr, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource2', timestamp=later.isoformat(), resource_metadata={'cpu_number': 2, 'user_metadata': um}, ), ] self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data(counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(2, len(publisher.samples)) pipe.flush() self.assertEqual(2, len(publisher.samples)) cpu_util = publisher.samples[0] self.assertEqual('cpu_util', getattr(cpu_util, 'name')) self.assertEqual('test_resource', getattr(cpu_util, 'resource_id')) self.assertEqual('%', getattr(cpu_util, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type')) self.assertEqual(expected, getattr(cpu_util, 'volume')) cpu_util = publisher.samples[1] self.assertEqual('cpu_util', getattr(cpu_util, 'name')) self.assertEqual('test_resource2', getattr(cpu_util, 'resource_id')) self.assertEqual('%', getattr(cpu_util, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type')) self.assertEqual(expected * 2, getattr(cpu_util, 'volume')) def test_rate_of_change_conversion(self): self._do_test_rate_of_change_conversion(120000000000, 180000000000, sample.TYPE_CUMULATIVE, 25.0) def test_rate_of_change_conversion_weight(self): self._do_test_rate_of_change_conversion(120000000000, 180000000000, sample.TYPE_CUMULATIVE, 27.5, weight=1.1) def test_rate_of_change_conversion_negative_cumulative_delta(self): self._do_test_rate_of_change_conversion(180000000000, 120000000000, sample.TYPE_CUMULATIVE, 50.0) def test_rate_of_change_conversion_negative_gauge_delta(self): self._do_test_rate_of_change_conversion(180000000000, 120000000000, sample.TYPE_GAUGE, -25.0) def test_rate_of_change_conversion_zero_delay(self): self._do_test_rate_of_change_conversion(120000000000, 120000000000, sample.TYPE_CUMULATIVE, 0.0, offset=0) def test_rate_of_change_no_predecessor(self): s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': {}, 'target': {'name': 'cpu_util', 'unit': '%', 'type': sample.TYPE_GAUGE, 'scale': s} } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['cpu']) now = datetime.datetime.utcnow() counters = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=120000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=now.isoformat(), resource_metadata={'cpu_number': 4} ), ] self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data(counters) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.flush() self.assertEqual(0, len(publisher.samples)) def test_rate_of_change_precision(self): s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': {}, 'target': {'name': 'cpu_util', 'unit': '%', 'type': sample.TYPE_GAUGE, 'scale': s} } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['cpu']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] now = datetime.datetime.utcnow() now_time = monotonic.monotonic() # Simulate a laggy poller later = now + datetime.timedelta(seconds=12345) later_time = now_time + 10 counters = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=125000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=now.isoformat(), monotonic_time=now_time, resource_metadata={'cpu_number': 4} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=165000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=later.isoformat(), monotonic_time=later_time, resource_metadata={'cpu_number': 4} ), ] pipe.publish_data(counters) publisher = pipe.publishers[0] self.assertEqual(1, len(publisher.samples)) cpu_util_sample = publisher.samples[0] self.assertAlmostEqual(100.0, cpu_util_sample.volume) def test_rate_of_change_max(self): s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': {}, 'target': {'name': 'cpu_util', 'unit': '%', 'type': sample.TYPE_GAUGE, 'scale': s, 'max': 100} } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['cpu']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] now = datetime.datetime.utcnow() later = now + datetime.timedelta(seconds=10) rounding = 12345 counters = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=125000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=now.isoformat(), resource_metadata={'cpu_number': 4} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=165000000000 + rounding, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=later.isoformat(), resource_metadata={'cpu_number': 4} ), ] pipe.publish_data(counters) publisher = pipe.publishers[0] self.assertEqual(1, len(publisher.samples)) cpu_util_sample = publisher.samples[0] self.assertAlmostEqual(100.0, cpu_util_sample.volume) @mock.patch('ceilometer.transformer.conversions.LOG') def test_rate_of_change_out_of_order(self, the_log): s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': {}, 'target': {'name': 'cpu_util', 'unit': '%', 'type': sample.TYPE_GAUGE, 'scale': s} } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['cpu']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] now = datetime.datetime.utcnow() earlier = now - datetime.timedelta(seconds=10) later = now + datetime.timedelta(seconds=10) counters = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=125000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=now.isoformat(), resource_metadata={'cpu_number': 4} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=120000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=earlier.isoformat(), resource_metadata={'cpu_number': 4} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=130000000000, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=later.isoformat(), resource_metadata={'cpu_number': 4} ), ] pipe.publish_data(counters) publisher = pipe.publishers[0] self.assertEqual(1, len(publisher.samples)) pipe.flush() self.assertEqual(1, len(publisher.samples)) cpu_util_sample = publisher.samples[0] self.assertAlmostEqual(12.5, cpu_util_sample.volume) the_log.warning.assert_called_with( 'dropping out of time order sample: %s', (counters[1],) ) def _do_test_rate_of_change_mapping(self, pipe, meters, units): now = datetime.datetime.utcnow() base = 1000 offset = 7 rate = 42 later = now + datetime.timedelta(minutes=offset) counters = [] for v, ts in [(base, now.isoformat()), (base + (offset * 60 * rate), later.isoformat())]: for n, u, r in [(meters[0], units[0], 'resource1'), (meters[1], units[1], 'resource2')]: s = sample.Sample( name=n, type=sample.TYPE_CUMULATIVE, volume=v, unit=u, user_id='test_user', project_id='test_proj', resource_id=r, timestamp=ts, resource_metadata={}, ) counters.append(s) pipe.publish_data(counters) publisher = pipe.publishers[0] self.assertEqual(2, len(publisher.samples)) pipe.flush() self.assertEqual(2, len(publisher.samples)) bps = publisher.samples[0] self.assertEqual('%s.rate' % meters[0], getattr(bps, 'name')) self.assertEqual('resource1', getattr(bps, 'resource_id')) self.assertEqual('%s/s' % units[0], getattr(bps, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(bps, 'type')) self.assertEqual(rate, getattr(bps, 'volume')) rps = publisher.samples[1] self.assertEqual('%s.rate' % meters[1], getattr(rps, 'name')) self.assertEqual('resource2', getattr(rps, 'resource_id')) self.assertEqual('%s/s' % units[1], getattr(rps, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(rps, 'type')) self.assertEqual(rate, getattr(rps, 'volume')) def test_rate_of_change_mapping(self): map_from = {'name': 'disk\\.(read|write)\\.(bytes|requests)', 'unit': '(B|request)'} map_to = {'name': 'disk.\\1.\\2.rate', 'unit': '\\1/s'} transformer_cfg = [ { 'name': 'rate_of_change', 'parameters': { 'source': { 'map_from': map_from }, 'target': { 'map_to': map_to, 'type': sample.TYPE_GAUGE }, }, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['disk.read.bytes', 'disk.write.requests']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] meters = ('disk.read.bytes', 'disk.write.requests') units = ('B', 'request') self._do_test_rate_of_change_mapping(pipe, meters, units) def _do_test_aggregator(self, parameters, expected_length): transformer_cfg = [ { 'name': 'aggregator', 'parameters': parameters, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['storage.objects.incoming.bytes']) counters = [ sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=16, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=53, unit='B', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=42, unit='B', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=15, unit='B', user_id='test_user', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=2, unit='B', user_id='test_user_bis', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '3.0'} ), ] self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data(counters) pipe.flush() publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(expected_length, len(publisher.samples)) return sorted(publisher.samples, key=lambda s: s.volume) def test_aggregator_meter_type(self): volumes = [1.0, 2.0, 3.0] transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': len(volumes) * len(sample.TYPES)} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['testgauge', 'testcumulative', 'testdelta']) counters = [] for sample_type in sample.TYPES: for volume in volumes: counters.append(sample.Sample( name='test' + sample_type, type=sample_type, volume=volume, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} )) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data(counters) pipe.flush() publisher = pipeline_manager.pipelines[0].publishers[0] actual = sorted(s.volume for s in publisher.samples) self.assertEqual([2.0, 3.0, 6.0], actual) def test_aggregator_metadata(self): for conf, expected_version in [('last', '2.0'), ('first', '1.0')]: samples = self._do_test_aggregator({ 'resource_metadata': conf, 'target': {'name': 'aggregated-bytes'} }, expected_length=4) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(2, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) s = samples[1] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(15, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': '2.0'}, s.resource_metadata) s = samples[2] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(42, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': expected_version}, s.resource_metadata) s = samples[3] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(95, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': expected_version}, s.resource_metadata) def test_aggregator_user_last_and_metadata_last(self): samples = self._do_test_aggregator({ 'resource_metadata': 'last', 'user_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=2) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(44, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) s = samples[1] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(110, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': '2.0'}, s.resource_metadata) def test_aggregator_user_first_and_metadata_last(self): samples = self._do_test_aggregator({ 'resource_metadata': 'last', 'user_id': 'first', 'target': {'name': 'aggregated-bytes'} }, expected_length=2) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(44, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) s = samples[1] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(110, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj_bis', s.project_id) self.assertEqual({'version': '2.0'}, s.resource_metadata) def test_aggregator_all_first(self): samples = self._do_test_aggregator({ 'resource_metadata': 'first', 'user_id': 'first', 'project_id': 'first', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '1.0'}, s.resource_metadata) def test_aggregator_all_last(self): samples = self._do_test_aggregator({ 'resource_metadata': 'last', 'user_id': 'last', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) def test_aggregator_all_mixed(self): samples = self._do_test_aggregator({ 'resource_metadata': 'drop', 'user_id': 'first', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({}, s.resource_metadata) def test_aggregator_metadata_default(self): samples = self._do_test_aggregator({ 'user_id': 'last', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) @mock.patch('ceilometer.transformer.conversions.LOG') def test_aggregator_metadata_invalid(self, mylog): samples = self._do_test_aggregator({ 'resource_metadata': 'invalid', 'user_id': 'last', 'project_id': 'last', 'target': {'name': 'aggregated-bytes'} }, expected_length=1) s = samples[0] self.assertTrue(mylog.warning.called) self.assertEqual('aggregated-bytes', s.name) self.assertEqual(154, s.volume) self.assertEqual('test_user_bis', s.user_id) self.assertEqual('test_proj', s.project_id) self.assertEqual({'version': '3.0'}, s.resource_metadata) def test_aggregator_sized_flush(self): transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 2}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['storage.objects.incoming.bytes']) counters = [ sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=16, unit='B', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ) ] self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data([counters[0]]) pipe.flush() publisher = pipe.publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.publish_data([counters[1]]) pipe.flush() publisher = pipe.publishers[0] self.assertEqual(2, len(publisher.samples)) @mock.patch.object(timeutils, 'utcnow') def test_aggregator_timed_flush(self, mock_utcnow): now = datetime.datetime.utcnow() mock_utcnow.return_value = now transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 900, 'retention_time': 60}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['storage.objects.incoming.bytes']) counters = [ sample.Sample( name='storage.objects.incoming.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data(counters) pipe.flush() publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) mock_utcnow.return_value = now + datetime.timedelta(seconds=120) pipe.flush() publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) def test_aggregator_without_authentication(self): transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 2}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['storage.objects.outgoing.bytes']) counters = [ sample.Sample( name='storage.objects.outgoing.bytes', type=sample.TYPE_DELTA, volume=26, unit='B', user_id=None, project_id=None, resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='storage.objects.outgoing.bytes', type=sample.TYPE_DELTA, volume=16, unit='B', user_id=None, project_id=None, resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ) ] self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data([counters[0]]) pipe.flush() publisher = pipe.publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.publish_data([counters[1]]) pipe.flush() publisher = pipe.publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(42, getattr(publisher.samples[0], 'volume')) self.assertEqual("test_resource", getattr(publisher.samples[0], 'resource_id')) def test_aggregator_to_rate_of_change_transformer_two_resources(self): resource_id = ['1ca738a1-c49c-4401-8346-5c60ebdb03f4', '5dd418a6-c6a9-49c9-9cef-b357d72c71dd'] aggregator = conversions.AggregatorTransformer(size="2", timestamp="last") rate_of_change_transformer = conversions.RateOfChangeTransformer() counter_time = timeutils.parse_isotime('2016-01-01T12:00:00+00:00') for offset in range(2): counter = copy.copy(self.test_counter) counter.timestamp = datetime.datetime.isoformat(counter_time) counter.resource_id = resource_id[0] counter.volume = offset counter.type = sample.TYPE_CUMULATIVE counter.unit = 'ns' aggregator.handle_sample(counter) if offset == 1: test_time = counter_time counter_time = counter_time + datetime.timedelta(0, 1) aggregated_counters = aggregator.flush() self.assertEqual(len(aggregated_counters), 1) self.assertEqual(aggregated_counters[0].timestamp, datetime.datetime.isoformat(test_time)) rate_of_change_transformer.handle_sample(aggregated_counters[0]) for offset in range(2): counter = copy.copy(self.test_counter) counter.timestamp = datetime.datetime.isoformat(counter_time) counter.resource_id = resource_id[offset] counter.volume = 2 counter.type = sample.TYPE_CUMULATIVE counter.unit = 'ns' aggregator.handle_sample(counter) if offset == 0: test_time = counter_time counter_time = counter_time + datetime.timedelta(0, 1) aggregated_counters = aggregator.flush() self.assertEqual(len(aggregated_counters), 2) for counter in aggregated_counters: if counter.resource_id == resource_id[0]: rateOfChange = rate_of_change_transformer.handle_sample( counter) self.assertEqual(counter.timestamp, datetime.datetime.isoformat(test_time)) self.assertEqual(rateOfChange.volume, 1) def _do_test_arithmetic_expr_parse(self, expr, expected): actual = arithmetic.ArithmeticTransformer.parse_expr(expr) self.assertEqual(expected, actual) def test_arithmetic_expr_parse(self): expr = '$(cpu) + $(cpu.util)' expected = ('cpu.volume + _cpu_util_ESC.volume', { 'cpu': 'cpu', 'cpu.util': '_cpu_util_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def test_arithmetic_expr_parse_parameter(self): expr = '$(cpu) + $(cpu.util).resource_metadata' expected = ('cpu.volume + _cpu_util_ESC.resource_metadata', { 'cpu': 'cpu', 'cpu.util': '_cpu_util_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def test_arithmetic_expr_parse_reserved_keyword(self): expr = '$(class) + $(cpu.util)' expected = ('_class_ESC.volume + _cpu_util_ESC.volume', { 'class': '_class_ESC', 'cpu.util': '_cpu_util_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def test_arithmetic_expr_parse_already_escaped(self): expr = '$(class) + $(_class_ESC)' expected = ('_class_ESC.volume + __class_ESC_ESC.volume', { 'class': '_class_ESC', '_class_ESC': '__class_ESC_ESC' }) self._do_test_arithmetic_expr_parse(expr, expected) def _do_test_arithmetic(self, expression, scenario, expected): transformer_cfg = [ { 'name': 'arithmetic', 'parameters': { 'target': {'name': 'new_meter', 'unit': '%', 'type': sample.TYPE_GAUGE, 'expr': expression}, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', list(set(s['name'] for s in scenario))) counters = [] test_resources = ['test_resource1', 'test_resource2'] for resource_id in test_resources: for s in scenario: counters.append(sample.Sample( name=s['name'], type=sample.TYPE_CUMULATIVE, volume=s['volume'], unit='ns', user_id='test_user', project_id='test_proj', resource_id=resource_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata=s.get('metadata') )) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] for s in counters: pipe.publish_data(s) pipe.flush() publisher = pipeline_manager.pipelines[0].publishers[0] expected_len = len(test_resources) * len(expected) self.assertEqual(expected_len, len(publisher.samples)) # bucket samples by resource first samples_by_resource = dict((r, []) for r in test_resources) for s in publisher.samples: samples_by_resource[s.resource_id].append(s) for resource_id in samples_by_resource: self.assertEqual(len(expected), len(samples_by_resource[resource_id])) for i, s in enumerate(samples_by_resource[resource_id]): self.assertEqual('new_meter', getattr(s, 'name')) self.assertEqual(resource_id, getattr(s, 'resource_id')) self.assertEqual('%', getattr(s, 'unit')) self.assertEqual(sample.TYPE_GAUGE, getattr(s, 'type')) self.assertEqual(expected[i], getattr(s, 'volume')) def test_arithmetic_transformer(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [50.0] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_expr_empty(self): expression = '' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_expr_misconfigured(self): expression = '512.0 * 3' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_nan(self): expression = 'float(\'nan\') * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_exception(self): expression = '$(memory) / 0' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), ] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_multiple_samples(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=2048.0), dict(name='memory.usage', volume=512.0), dict(name='memory', volume=1024.0), ] expected = [25.0] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_missing(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [dict(name='memory.usage', volume=512.0)] expected = [] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_more_than_needed(self): expression = '100.0 * $(memory.usage) / $(memory)' scenario = [ dict(name='memory', volume=1024.0), dict(name='memory.usage', volume=512.0), dict(name='cpu_util', volume=90.0), ] expected = [50.0] self._do_test_arithmetic(expression, scenario, expected) def test_arithmetic_transformer_cache_cleared(self): transformer_cfg = [ { 'name': 'arithmetic', 'parameters': { 'target': {'name': 'new_meter', 'expr': '$(memory.usage) + 2'} } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['memory.usage']) counter = sample.Sample( name='memory.usage', type=sample.TYPE_GAUGE, volume=1024.0, unit='MB', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata=None ) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data([counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) pipe.flush() self.assertEqual(1, len(publisher.samples)) self.assertEqual(1026.0, publisher.samples[0].volume) pipe.flush() self.assertEqual(1, len(publisher.samples)) counter.volume = 2048.0 pipe.publish_data([counter]) pipe.flush() self.assertEqual(2, len(publisher.samples)) self.assertEqual(2050.0, publisher.samples[1].volume) @mock.patch.object(timeutils, 'utcnow') def test_aggregator_timed_flush_no_matching_samples(self, mock_utcnow): now = datetime.datetime.utcnow() mock_utcnow.return_value = now transformer_cfg = [ { 'name': 'aggregator', 'parameters': {'size': 900, 'retention_time': 60}, }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['unrelated-sample']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) mock_utcnow.return_value = now + datetime.timedelta(seconds=200) pipe = pipeline_manager.pipelines[0] pipe.flush() publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) def _do_test_delta(self, data, expected, growth_only=False): transformer_cfg = [ { 'name': 'delta', 'parameters': { 'target': {'name': 'new_meter'}, 'growth_only': growth_only, } }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._set_pipeline_cfg('meters', ['cpu']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] pipe.publish_data(data) pipe.flush() publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(expected, len(publisher.samples)) return publisher.samples def test_delta_transformer(self): samples = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=26, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=16, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=53, unit='ns', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] deltas = self._do_test_delta(samples, 2) self.assertEqual('new_meter', deltas[0].name) self.assertEqual('delta', deltas[0].type) self.assertEqual('ns', deltas[0].unit) self.assertEqual({'version': '2.0'}, deltas[0].resource_metadata) self.assertEqual(-10, deltas[0].volume) self.assertEqual('new_meter', deltas[1].name) self.assertEqual('delta', deltas[1].type) self.assertEqual('ns', deltas[1].unit) self.assertEqual({'version': '1.0'}, deltas[1].resource_metadata) self.assertEqual(37, deltas[1].volume) def test_delta_transformer_out_of_order(self): samples = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=26, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=16, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=((timeutils.utcnow() - datetime.timedelta(minutes=5)) .isoformat()), resource_metadata={'version': '2.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=53, unit='ns', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] deltas = self._do_test_delta(samples, 1) self.assertEqual('new_meter', deltas[0].name) self.assertEqual('delta', deltas[0].type) self.assertEqual('ns', deltas[0].unit) self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata) self.assertEqual(27, deltas[0].volume) def test_delta_transformer_growth_only(self): samples = [ sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=26, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=16, unit='ns', user_id='test_user', project_id='test_proj', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '2.0'} ), sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, volume=53, unit='ns', user_id='test_user_bis', project_id='test_proj_bis', resource_id='test_resource', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'version': '1.0'} ), ] deltas = self._do_test_delta(samples, 1, True) self.assertEqual('new_meter', deltas[0].name) self.assertEqual('delta', deltas[0].type) self.assertEqual('ns', deltas[0].unit) self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata) self.assertEqual(37, deltas[0].volume) def test_unique_pipeline_names(self): self._dup_pipeline_name_cfg() self._exception_create_pipelinemanager() def test_get_pipeline_grouping_key(self): transformer_cfg = [ { 'name': 'update', 'parameters': {} }, { 'name': 'unit_conversion', 'parameters': { 'source': {}, 'target': {'name': 'cpu_mins', 'unit': 'min', 'scale': 'volume'}, } }, { 'name': 'update', 'parameters': {} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) self.assertEqual(set(['resource_id', 'counter_name']), set(pipeline_manager.pipelines[0].get_grouping_key())) def test_get_pipeline_duplicate_grouping_key(self): transformer_cfg = [ { 'name': 'update', 'parameters': {} }, { 'name': 'update', 'parameters': {} }, ] self._set_pipeline_cfg('transformers', transformer_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) self.assertEqual(['counter_name'], pipeline_manager.pipelines[0].get_grouping_key()) ceilometer-10.0.0/ceilometer/tests/unit/event/0000775000175100017510000000000013236733440021343 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/event/__init__.py0000666000175100017510000000000013236733243023445 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/event/test_converter.py0000666000175100017510000010042613236733243024771 0ustar zuulzuul00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import jsonpath_rw_ext import mock import six from ceilometer import declarative from ceilometer.event import converter from ceilometer.event import models from ceilometer import service as ceilometer_service from ceilometer.tests import base class ConverterBase(base.BaseTestCase): @staticmethod def _create_test_notification(event_type, message_id, **kw): return dict(event_type=event_type, metadata=dict(message_id=message_id, timestamp="2013-08-08 21:06:37.803826"), publisher_id="compute.host-1-2-3", payload=kw, ) def assertIsValidEvent(self, event, notification): self.assertIsNot( None, event, "Notification dropped unexpectedly:" " %s" % str(notification)) self.assertIsInstance(event, models.Event) def assertIsNotValidEvent(self, event, notification): self.assertIs( None, event, "Notification NOT dropped when expected to be dropped:" " %s" % str(notification)) def assertHasTrait(self, event, name, value=None, dtype=None): traits = [trait for trait in event.traits if trait.name == name] self.assertGreater(len(traits), 0, "Trait %s not found in event %s" % (name, event)) trait = traits[0] if value is not None: self.assertEqual(value, trait.value) if dtype is not None: self.assertEqual(dtype, trait.dtype) if dtype == models.Trait.INT_TYPE: self.assertIsInstance(trait.value, int) elif dtype == models.Trait.FLOAT_TYPE: self.assertIsInstance(trait.value, float) elif dtype == models.Trait.DATETIME_TYPE: self.assertIsInstance(trait.value, datetime.datetime) elif dtype == models.Trait.TEXT_TYPE: self.assertIsInstance(trait.value, six.string_types) def assertDoesNotHaveTrait(self, event, name): traits = [trait for trait in event.traits if trait.name == name] self.assertEqual( len(traits), 0, "Extra Trait %s found in event %s" % (name, event)) def assertHasDefaultTraits(self, event): text = models.Trait.TEXT_TYPE self.assertHasTrait(event, 'service', dtype=text) def _cmp_tree(self, this, other): if hasattr(this, 'right') and hasattr(other, 'right'): return (self._cmp_tree(this.right, other.right) and self._cmp_tree(this.left, other.left)) if not hasattr(this, 'right') and not hasattr(other, 'right'): return this == other return False def assertPathsEqual(self, path1, path2): self.assertTrue(self._cmp_tree(path1, path2), 'JSONPaths not equivalent %s %s' % (path1, path2)) class TestTraitDefinition(ConverterBase): def setUp(self): super(TestTraitDefinition, self).setUp() self.n1 = self._create_test_notification( "test.thing", "uuid-for-notif-0001", instance_uuid="uuid-for-instance-0001", instance_id="id-for-instance-0001", instance_uuid2=None, instance_id2=None, host='host-1-2-3', bogus_date='', image_meta=dict( disk_gb='20', thing='whatzit'), foobar=50) self.ext1 = mock.MagicMock(name='mock_test_plugin') self.test_plugin_class = self.ext1.plugin self.test_plugin = self.test_plugin_class() self.test_plugin.trait_values.return_value = ['foobar'] self.ext1.reset_mock() self.ext2 = mock.MagicMock(name='mock_nothing_plugin') self.nothing_plugin_class = self.ext2.plugin self.nothing_plugin = self.nothing_plugin_class() self.nothing_plugin.trait_values.return_value = [None] self.ext2.reset_mock() self.fake_plugin_mgr = dict(test=self.ext1, nothing=self.ext2) def test_to_trait_with_plugin(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='test')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with() self.test_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait_null_match_with_plugin(self): cfg = dict(type='text', fields=['payload.nothere', 'payload.bogus'], plugin=dict(name='test')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with() self.test_plugin.trait_values.assert_called_once_with([]) def test_to_trait_with_plugin_null(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='nothing')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsNone(t) self.nothing_plugin_class.assert_called_once_with() self.nothing_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait_with_plugin_with_parameters(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='test', parameters=dict(a=1, b='foo'))) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with(a=1, b='foo') self.test_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait(self): cfg = dict(type='text', fields='payload.instance_id') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('id-for-instance-0001', t.value) cfg = dict(type='int', fields='payload.image_meta.disk_gb') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.INT_TYPE, t.dtype) self.assertEqual(20, t.value) def test_to_trait_multiple(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('id-for-instance-0001', t.value) cfg = dict(type='text', fields=['payload.instance_uuid', 'payload.instance_id']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_multiple_different_nesting(self): cfg = dict(type='int', fields=['payload.foobar', 'payload.image_meta.disk_gb']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual(50, t.value) cfg = dict(type='int', fields=['payload.image_meta.disk_gb', 'payload.foobar']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual(20, t.value) def test_to_trait_some_null_multiple(self): cfg = dict(type='text', fields=['payload.instance_id2', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_some_missing_multiple(self): cfg = dict(type='text', fields=['payload.not_here_boss', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_missing(self): cfg = dict(type='text', fields='payload.not_here_boss') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsNone(t) def test_to_trait_null(self): cfg = dict(type='text', fields='payload.instance_id2') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsNone(t) def test_to_trait_empty_nontext(self): cfg = dict(type='datetime', fields='payload.bogus_date') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsNone(t) def test_to_trait_multiple_null_missing(self): cfg = dict(type='text', fields=['payload.not_here_boss', 'payload.instance_id2']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsNone(t) def test_missing_fields_config(self): self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', dict(), self.fake_plugin_mgr) def test_string_fields_config(self): cfg = dict(fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertPathsEqual(t.getter.__self__, jsonpath_rw_ext.parse('payload.test')) def test_list_fields_config(self): cfg = dict(fields=['payload.test', 'payload.other']) t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertPathsEqual( t.getter.__self__, jsonpath_rw_ext.parse('(payload.test)|(payload.other)')) def test_invalid_path_config(self): # test invalid jsonpath... cfg = dict(fields='payload.bogus(') self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', cfg, self.fake_plugin_mgr) def test_invalid_plugin_config(self): # test invalid jsonpath... cfg = dict(fields='payload.test', plugin=dict(bogus="true")) self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'test_trait', cfg, self.fake_plugin_mgr) def test_unknown_plugin(self): # test invalid jsonpath... cfg = dict(fields='payload.test', plugin=dict(name='bogus')) self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'test_trait', cfg, self.fake_plugin_mgr) def test_type_config(self): cfg = dict(type='text', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.TEXT_TYPE, t.trait_type) cfg = dict(type='int', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.INT_TYPE, t.trait_type) cfg = dict(type='float', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.FLOAT_TYPE, t.trait_type) cfg = dict(type='datetime', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.DATETIME_TYPE, t.trait_type) def test_invalid_type_config(self): # test invalid jsonpath... cfg = dict(type='bogus', fields='payload.test') self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', cfg, self.fake_plugin_mgr) class TestEventDefinition(ConverterBase): def setUp(self): super(TestEventDefinition, self).setUp() self.traits_cfg = { 'instance_id': { 'type': 'text', 'fields': ['payload.instance_uuid', 'payload.instance_id'], }, 'host': { 'type': 'text', 'fields': 'payload.host', }, } self.test_notification1 = self._create_test_notification( "test.thing", "uuid-for-notif-0001", instance_id="uuid-for-instance-0001", host='host-1-2-3') self.test_notification2 = self._create_test_notification( "test.thing", "uuid-for-notif-0002", instance_id="uuid-for-instance-0002") self.test_notification3 = self._create_test_notification( "test.thing", "uuid-for-notif-0003", instance_id="uuid-for-instance-0003", host=None) self.fake_plugin_mgr = {} def test_to_event(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) e = edef.to_event('INFO', self.test_notification1) self.assertEqual('test.thing', e.event_type) self.assertEqual(datetime.datetime(2013, 8, 8, 21, 6, 37, 803826), e.generated) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'host', value='host-1-2-3', dtype=dtype) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0001', dtype=dtype) def test_to_event_missing_trait(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) e = edef.to_event('INFO', self.test_notification2) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0002', dtype=dtype) self.assertDoesNotHaveTrait(e, 'host') def test_to_event_null_trait(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) e = edef.to_event('INFO', self.test_notification3) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0003', dtype=dtype) self.assertDoesNotHaveTrait(e, 'host') def test_bogus_cfg_no_traits(self): bogus = dict(event_type='test.foo') self.assertRaises(declarative.DefinitionException, converter.EventDefinition, bogus, self.fake_plugin_mgr, []) def test_bogus_cfg_no_type(self): bogus = dict(traits=self.traits_cfg) self.assertRaises(declarative.DefinitionException, converter.EventDefinition, bogus, self.fake_plugin_mgr, []) def test_included_type_string(self): cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertEqual(1, len(edef._included_types)) self.assertEqual('test.thing', edef._included_types[0]) self.assertEqual(0, len(edef._excluded_types)) self.assertTrue(edef.included_type('test.thing')) self.assertFalse(edef.excluded_type('test.thing')) self.assertTrue(edef.match_type('test.thing')) self.assertFalse(edef.match_type('random.thing')) def test_included_type_list(self): cfg = dict(event_type=['test.thing', 'other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertEqual(2, len(edef._included_types)) self.assertEqual(0, len(edef._excluded_types)) self.assertTrue(edef.included_type('test.thing')) self.assertTrue(edef.included_type('other.thing')) self.assertFalse(edef.excluded_type('test.thing')) self.assertTrue(edef.match_type('test.thing')) self.assertTrue(edef.match_type('other.thing')) self.assertFalse(edef.match_type('random.thing')) def test_excluded_type_string(self): cfg = dict(event_type='!test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertEqual(1, len(edef._included_types)) self.assertEqual('*', edef._included_types[0]) self.assertEqual('test.thing', edef._excluded_types[0]) self.assertEqual(1, len(edef._excluded_types)) self.assertEqual('test.thing', edef._excluded_types[0]) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.included_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertTrue(edef.match_type('random.thing')) def test_excluded_type_list(self): cfg = dict(event_type=['!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertEqual(1, len(edef._included_types)) self.assertEqual(2, len(edef._excluded_types)) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.excluded_type('other.thing')) self.assertFalse(edef.excluded_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertFalse(edef.match_type('other.thing')) self.assertTrue(edef.match_type('random.thing')) def test_mixed_type_list(self): cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertEqual(1, len(edef._included_types)) self.assertEqual(2, len(edef._excluded_types)) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.excluded_type('other.thing')) self.assertFalse(edef.excluded_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertFalse(edef.match_type('other.thing')) self.assertFalse(edef.match_type('random.whatzit')) self.assertTrue(edef.match_type('random.thing')) def test_catchall(self): cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['*', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['*'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertTrue(edef.is_catchall) cfg = dict(event_type=['*', 'foo'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertTrue(edef.is_catchall) def test_default_traits(self): cfg = dict(event_type='test.thing', traits={}) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() traits = set(edef.traits.keys()) for dt in default_traits: self.assertIn(dt, traits) self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS), len(edef.traits)) def test_traits(self): cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() traits = set(edef.traits.keys()) for dt in default_traits: self.assertIn(dt, traits) self.assertIn('host', traits) self.assertIn('instance_id', traits) self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS) + 2, len(edef.traits)) class TestNotificationConverter(ConverterBase): def setUp(self): super(TestNotificationConverter, self).setUp() self.CONF = ceilometer_service.prepare_service([], []) self.valid_event_def1 = [{ 'event_type': 'compute.instance.create.*', 'traits': { 'instance_id': { 'type': 'text', 'fields': ['payload.instance_uuid', 'payload.instance_id'], }, 'host': { 'type': 'text', 'fields': 'payload.host', }, }, }] self.test_notification1 = self._create_test_notification( "compute.instance.create.start", "uuid-for-notif-0001", instance_id="uuid-for-instance-0001", host='host-1-2-3') self.test_notification2 = self._create_test_notification( "bogus.notification.from.mars", "uuid-for-notif-0002", weird='true', host='cydonia') self.fake_plugin_mgr = {} @mock.patch('oslo_utils.timeutils.utcnow') def test_converter_missing_keys(self, mock_utcnow): self.CONF.set_override('drop_unmatched_notifications', False, group='event') # test a malformed notification now = datetime.datetime.utcnow() mock_utcnow.return_value = now c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) message = {'event_type': "foo", 'metadata': {'message_id': "abc", 'timestamp': str(now)}, 'publisher_id': "1"} e = c.to_event('INFO', message) self.assertIsValidEvent(e, message) self.assertEqual(1, len(e.traits)) self.assertEqual("foo", e.event_type) self.assertEqual(now, e.generated) def test_converter_with_catchall(self): self.CONF.set_override('drop_unmatched_notifications', False, group='event') c = converter.NotificationEventsConverter( self.CONF, self.valid_event_def1, self.fake_plugin_mgr) self.assertEqual(2, len(c.definitions)) e = c.to_event('INFO', self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(3, len(e.traits)) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id') self.assertHasTrait(e, 'host') e = c.to_event('INFO', self.test_notification2) self.assertIsValidEvent(e, self.test_notification2) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) self.assertDoesNotHaveTrait(e, 'instance_id') self.assertDoesNotHaveTrait(e, 'host') def test_converter_without_catchall(self): self.CONF.set_override('drop_unmatched_notifications', True, group='event') c = converter.NotificationEventsConverter( self.CONF, self.valid_event_def1, self.fake_plugin_mgr) self.assertEqual(1, len(c.definitions)) e = c.to_event('INFO', self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(3, len(e.traits)) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id') self.assertHasTrait(e, 'host') e = c.to_event('INFO', self.test_notification2) self.assertIsNotValidEvent(e, self.test_notification2) def test_converter_empty_cfg_with_catchall(self): self.CONF.set_override('drop_unmatched_notifications', False, group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertEqual(1, len(c.definitions)) e = c.to_event('INFO', self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) e = c.to_event('INFO', self.test_notification2) self.assertIsValidEvent(e, self.test_notification2) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) def test_converter_empty_cfg_without_catchall(self): self.CONF.set_override('drop_unmatched_notifications', True, group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertEqual(0, len(c.definitions)) e = c.to_event('INFO', self.test_notification1) self.assertIsNotValidEvent(e, self.test_notification1) e = c.to_event('INFO', self.test_notification2) self.assertIsNotValidEvent(e, self.test_notification2) @staticmethod def _convert_message(convert, level): message = {'priority': level, 'event_type': "foo", 'publisher_id': "1", 'metadata': {'message_id': "abc", 'timestamp': "2013-08-08 21:06:37.803826"}} return convert.to_event(level, message) def test_store_raw_all(self): self.CONF.set_override('store_raw', ['info', 'error'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertTrue(self._convert_message(c, 'error').raw) def test_store_raw_info_only(self): self.CONF.set_override('store_raw', ['info'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_error_only(self): self.CONF.set_override('store_raw', ['error'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertTrue(self._convert_message(c, 'error').raw) def test_store_raw_skip_all(self): c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_info_only_no_case(self): self.CONF.set_override('store_raw', ['INFO'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_bad_skip_all(self): self.CONF.set_override('store_raw', ['unknown'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_bad_and_good(self): self.CONF.set_override('store_raw', ['info', 'unknown'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) @mock.patch('ceilometer.declarative.LOG') def test_setup_events_load_config_in_code_tree(self, mocked_log): self.CONF.set_override('definitions_cfg_file', '/not/existing/file', group='event') self.CONF.set_override('drop_unmatched_notifications', False, group='event') c = converter.setup_events(self.CONF, self.fake_plugin_mgr) self.assertIsInstance(c, converter.NotificationEventsConverter) log_called_args = mocked_log.debug.call_args_list self.assertEqual( 'No Definitions configuration file found! Using default config.', log_called_args[0][0][0]) self.assertTrue(log_called_args[1][0][0].startswith( 'Loading definitions configuration file:')) ceilometer-10.0.0/ceilometer/tests/unit/event/test_endpoint.py0000666000175100017510000001637013236733243024606 0ustar zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Ceilometer notify daemon.""" import fixtures import mock import oslo_messaging from oslo_utils import fileutils import six import yaml from ceilometer.pipeline import event as event_pipe from ceilometer import publisher from ceilometer.publisher import test from ceilometer import service from ceilometer.tests import base as tests_base TEST_NOTICE_CTXT = { u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'is_admin': True, u'project_id': u'7c150a59fe714e6f9263774af9688f0e', u'quota_class': None, u'read_deleted': u'no', u'remote_address': u'10.0.2.15', u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'roles': [u'admin'], u'timestamp': u'2012-05-08T20:23:41.425105', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', } TEST_NOTICE_METADATA = { u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'timestamp': u'2012-05-08 20:23:48.028195', } TEST_NOTICE_PAYLOAD = { u'created_at': u'2012-05-08 20:23:41', u'deleted_at': u'', u'disk_gb': 0, u'display_name': u'testme', u'fixed_ips': [{u'address': u'10.0.0.2', u'floating_ips': [], u'meta': {}, u'type': u'fixed', u'version': 4}], u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', u'instance_type': u'm1.tiny', u'instance_type_id': 2, u'launched_at': u'2012-05-08 20:23:47.985999', u'memory_mb': 512, u'state': u'active', u'state_description': u'', u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', u'vcpus': 1, u'root_gb': 0, u'ephemeral_gb': 0, u'host': u'compute-host-name', u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', u'os_type': u'linux?', u'architecture': u'x86', u'image_ref': u'UUID', u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', } class TestEventEndpoint(tests_base.BaseTestCase): @staticmethod def get_publisher(conf, url, namespace=''): fake_drivers = {'test://': test.TestPublisher, 'except://': test.TestPublisher} return fake_drivers[url](conf, url) def _setup_pipeline(self, publishers): ev_pipeline = yaml.dump({ 'sources': [{ 'name': 'test_event', 'events': ['test.test'], 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'publishers': publishers }] }) if six.PY3: ev_pipeline = ev_pipeline.encode('utf-8') ev_pipeline_cfg_file = fileutils.write_to_tempfile( content=ev_pipeline, prefix="event_pipeline", suffix="yaml") self.CONF.set_override('event_pipeline_cfg_file', ev_pipeline_cfg_file) ev_pipeline_mgr = event_pipe.EventPipelineManager(self.CONF) return ev_pipeline_mgr def _setup_endpoint(self, publishers): ev_pipeline_mgr = self._setup_pipeline(publishers) self.endpoint = event_pipe.EventEndpoint( ev_pipeline_mgr.conf, ev_pipeline_mgr.publisher()) self.endpoint.event_converter = mock.MagicMock() self.endpoint.event_converter.to_event.return_value = mock.MagicMock( event_type='test.test') def setUp(self): super(TestEventEndpoint, self).setUp() self.CONF = service.prepare_service([], []) self.setup_messaging(self.CONF) self.useFixture(fixtures.MockPatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) self.fake_publisher = mock.Mock() self.useFixture(fixtures.MockPatch( 'ceilometer.publisher.test.TestPublisher', return_value=self.fake_publisher)) def test_message_to_event(self): self._setup_endpoint(['test://']) self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) def test_bad_event_non_ack_and_requeue(self): self._setup_endpoint(['test://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") ret = self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) def test_message_to_event_bad_event(self): self._setup_endpoint(['test://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") message = { 'payload': {'event_type': "foo", 'message_id': "abc"}, 'metadata': {}, 'ctxt': {} } with mock.patch("ceilometer.pipeline.event.LOG") as mock_logger: ret = self.endpoint.process_notifications('info', [message]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) exception_mock = mock_logger.error self.assertIn('Exit after error from publisher', exception_mock.call_args_list[0][0][0]) def test_message_to_event_bad_event_multi_publish(self): self._setup_endpoint(['test://', 'except://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") message = { 'payload': {'event_type': "foo", 'message_id': "abc"}, 'metadata': {}, 'ctxt': {} } with mock.patch("ceilometer.pipeline.event.LOG") as mock_logger: ret = self.endpoint.process_notifications('info', [message]) self.assertEqual(oslo_messaging.NotificationResult.HANDLED, ret) exception_mock = mock_logger.error self.assertIn('Continue after error from publisher', exception_mock.call_args_list[0][0][0]) ceilometer-10.0.0/ceilometer/tests/unit/event/test_trait_plugins.py0000666000175100017510000001035013236733243025642 0ustar zuulzuul00000000000000# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from ceilometer.event import trait_plugins class TestSplitterPlugin(base.BaseTestCase): def setUp(self): super(TestSplitterPlugin, self).setUp() self.pclass = trait_plugins.SplitterTraitPlugin def test_split(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('test', value) param = dict(separator='-', segment=1) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('foobar', value) param = dict(separator='-', segment=1, max_split=1) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('foobar-baz', value) def test_no_sep(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [('test.thing', 'test.foobar.baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('test.foobar.baz', value) def test_no_segment(self): param = dict(separator='-', segment=5) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertIsNone(value) def test_no_match(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [] value = plugin.trait_values(match_list) self.assertEqual([], value) class TestBitfieldPlugin(base.BaseTestCase): def setUp(self): super(TestBitfieldPlugin, self).setUp() self.pclass = trait_plugins.BitfieldTraitPlugin self.init = 0 self.params = dict(initial_bitfield=self.init, flags=[dict(path='payload.foo', bit=0, value=42), dict(path='payload.foo', bit=1, value=12), dict(path='payload.thud', bit=1, value=23), dict(path='thingy.boink', bit=4), dict(path='thingy.quux', bit=6, value="wokka"), dict(path='payload.bar', bit=10, value='test')]) def test_bitfield(self): match_list = [('payload.foo', 12), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x412, value[0]) def test_initial(self): match_list = [('payload.foo', 12), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] self.params['initial_bitfield'] = 0x2000 plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x2412, value[0]) def test_no_match(self): match_list = [] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(self.init, value[0]) def test_multi(self): match_list = [('payload.foo', 12), ('payload.thud', 23), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x412, value[0]) ceilometer-10.0.0/ceilometer/tests/unit/test_messaging.py0000666000175100017510000000520613236733243023616 0ustar zuulzuul00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging.conffixture from oslotest import base from ceilometer import messaging from ceilometer import service class MessagingTests(base.BaseTestCase): def setUp(self): super(MessagingTests, self).setUp() self.CONF = service.prepare_service([], []) self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF)) def test_get_transport_invalid_url(self): self.assertRaises(oslo_messaging.InvalidTransportURL, messaging.get_transport, self.CONF, "notvalid!") def test_get_transport_url_caching(self): t1 = messaging.get_transport(self.CONF, 'fake://') t2 = messaging.get_transport(self.CONF, 'fake://') self.assertEqual(t1, t2) def test_get_transport_default_url_caching(self): t1 = messaging.get_transport(self.CONF) t2 = messaging.get_transport(self.CONF) self.assertEqual(t1, t2) def test_get_transport_default_url_no_caching(self): t1 = messaging.get_transport(self.CONF, cache=False) t2 = messaging.get_transport(self.CONF, cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_no_caching(self): t1 = messaging.get_transport(self.CONF, 'fake://', cache=False) t2 = messaging.get_transport(self.CONF, 'fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_default_url_caching_mix(self): t1 = messaging.get_transport(self.CONF) t2 = messaging.get_transport(self.CONF, cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_caching_mix(self): t1 = messaging.get_transport(self.CONF, 'fake://') t2 = messaging.get_transport(self.CONF, 'fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_optional(self): self.CONF.set_override('transport_url', 'non-url') self.assertIsNone(messaging.get_transport(self.CONF, optional=True, cache=False)) ceilometer-10.0.0/ceilometer/tests/unit/test_declarative.py0000666000175100017510000000313713236733243024125 0ustar zuulzuul00000000000000# # Copyright 2016 Mirantis, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from ceilometer import declarative from ceilometer.tests import base class TestDefinition(base.BaseTestCase): def setUp(self): super(TestDefinition, self).setUp() self.configs = [ "_field1", "_field2|_field3", {'fields': 'field4.`split(., 1, 1)`'}, {'fields': ['field5.arg', 'field6'], 'type': 'text'} ] self.parser = mock.MagicMock() parser_patch = fixtures.MockPatch( "jsonpath_rw_ext.parser.ExtentedJsonPathParser.parse", new=self.parser) self.useFixture(parser_patch) def test_caching_parsers(self): for config in self.configs * 2: declarative.Definition("test", config, mock.MagicMock()) self.assertEqual(4, self.parser.call_count) self.parser.assert_has_calls([ mock.call("_field1"), mock.call("_field2|_field3"), mock.call("field4.`split(., 1, 1)`"), mock.call("(field5.arg)|(field6)"), ]) ceilometer-10.0.0/ceilometer/tests/unit/test_event_pipeline.py0000666000175100017510000003675413236733243024663 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import traceback import uuid import fixtures import mock import oslo_messaging from ceilometer.event import models from ceilometer.pipeline import base as pipeline from ceilometer.pipeline import event from ceilometer import publisher from ceilometer.publisher import test as test_publisher from ceilometer.publisher import utils from ceilometer import service from ceilometer.tests import base class EventPipelineTestCase(base.BaseTestCase): def get_publisher(self, conf, url, namespace=''): fake_drivers = {'test://': test_publisher.TestPublisher, 'new://': test_publisher.TestPublisher, 'except://': self.PublisherClassException} return fake_drivers[url](conf, url) class PublisherClassException(publisher.ConfigPublisherBase): def publish_samples(self, samples): pass def publish_events(self, events): raise Exception() def setUp(self): super(EventPipelineTestCase, self).setUp() self.CONF = service.prepare_service([], []) self.test_event = models.Event( message_id=uuid.uuid4(), event_type='a', generated=datetime.datetime.utcnow(), traits=[ models.Trait('t_text', 1, 'text_trait'), models.Trait('t_int', 2, 'int_trait'), models.Trait('t_float', 3, 'float_trait'), models.Trait('t_datetime', 4, 'datetime_trait') ], raw={'status': 'started'} ) self.test_event2 = models.Event( message_id=uuid.uuid4(), event_type='b', generated=datetime.datetime.utcnow(), traits=[ models.Trait('t_text', 1, 'text_trait'), models.Trait('t_int', 2, 'int_trait'), models.Trait('t_float', 3, 'float_trait'), models.Trait('t_datetime', 4, 'datetime_trait') ], raw={'status': 'stopped'} ) self.useFixture(fixtures.MockPatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) self._setup_pipeline_cfg() self._reraise_exception = True self.useFixture(fixtures.MockPatch( 'ceilometer.pipeline.base.LOG.exception', side_effect=self._handle_reraise_exception)) def _handle_reraise_exception(self, *args, **kwargs): if self._reraise_exception: raise Exception(traceback.format_exc()) def _setup_pipeline_cfg(self): """Setup the appropriate form of pipeline config.""" source = {'name': 'test_source', 'events': ['a'], 'sinks': ['test_sink']} sink = {'name': 'test_sink', 'publishers': ['test://']} self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} def _augment_pipeline_cfg(self): """Augment the pipeline config with an additional element.""" self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'events': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'publishers': ['new://'], }) def _break_pipeline_cfg(self): """Break the pipeline config with a malformed element.""" self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'events': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'publishers': ['except'], }) def _dup_pipeline_name_cfg(self): """Break the pipeline config with duplicate pipeline name.""" self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'events': ['a'], 'sinks': ['test_sink'] }) def _set_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field] = value else: self.pipeline_cfg['sinks'][0][field] = value def _extend_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field].extend(value) else: self.pipeline_cfg['sinks'][0][field].extend(value) def _unset_pipeline_cfg(self, field): if field in self.pipeline_cfg['sources'][0]: del self.pipeline_cfg['sources'][0][field] else: del self.pipeline_cfg['sinks'][0][field] def _build_and_set_new_pipeline(self): name = self.cfg2file(self.pipeline_cfg) self.CONF.set_override('event_pipeline_cfg_file', name) def _exception_create_pipelinemanager(self): self._build_and_set_new_pipeline() self.assertRaises(pipeline.PipelineException, event.EventPipelineManager, self.CONF) def test_no_events(self): self._unset_pipeline_cfg('events') self._exception_create_pipelinemanager() def test_no_name(self): self._unset_pipeline_cfg('name') self._exception_create_pipelinemanager() def test_name(self): self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) for pipe in pipeline_manager.pipelines: self.assertTrue(pipe.name.startswith('event:')) def test_no_publishers(self): self._unset_pipeline_cfg('publishers') self._exception_create_pipelinemanager() def test_check_events_include_exclude_same(self): event_cfg = ['a', '!a'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_events_include_exclude(self): event_cfg = ['a', '!b'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_events_wildcard_included(self): event_cfg = ['a', '*'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_publishers_invalid_publisher(self): publisher_cfg = ['test_invalid'] self._set_pipeline_cfg('publishers', publisher_cfg) def test_multiple_included_events(self): event_cfg = ['a', 'b'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) with pipeline_manager.publisher() as p: p([self.test_event2]) self.assertEqual(2, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) self.assertEqual('b', getattr(publisher.events[1], 'event_type')) def test_event_non_match(self): event_cfg = ['nomatch'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.events)) self.assertEqual(0, publisher.calls) def test_wildcard_event(self): event_cfg = ['*'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_wildcard_excluded_events(self): event_cfg = ['*', '!a'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_event('a')) def test_wildcard_excluded_events_not_excluded(self): event_cfg = ['*', '!b'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_all_excluded_events_not_excluded(self): event_cfg = ['!b', '!c'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_all_excluded_events_excluded(self): event_cfg = ['!a', '!c'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_event('a')) self.assertTrue(pipe.source.support_event('b')) self.assertFalse(pipe.source.support_event('c')) def test_wildcard_and_excluded_wildcard_events(self): event_cfg = ['*', '!compute.*'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source. support_event('compute.instance.create.start')) self.assertTrue(pipe.source.support_event('identity.user.create')) def test_included_event_and_wildcard_events(self): event_cfg = ['compute.instance.create.start', 'identity.*'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertTrue(pipe.source.support_event('identity.user.create')) self.assertTrue(pipe.source. support_event('compute.instance.create.start')) self.assertFalse(pipe.source. support_event('compute.instance.create.stop')) def test_excluded_event_and_excluded_wildcard_events(self): event_cfg = ['!compute.instance.create.start', '!identity.*'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_event('identity.user.create')) self.assertFalse(pipe.source. support_event('compute.instance.create.start')) self.assertTrue(pipe.source. support_event('compute.instance.create.stop')) def test_multiple_pipeline(self): self._augment_pipeline_cfg() self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event, self.test_event2]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual(1, publisher.calls) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) new_publisher = pipeline_manager.pipelines[1].publishers[0] self.assertEqual(1, len(new_publisher.events)) self.assertEqual(1, new_publisher.calls) self.assertEqual('b', getattr(new_publisher.events[0], 'event_type')) def test_multiple_publisher(self): self._set_pipeline_cfg('publishers', ['test://', 'new://']) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.events)) self.assertEqual(1, len(new_publisher.events)) self.assertEqual('a', getattr(new_publisher.events[0], 'event_type')) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_multiple_publisher_isolation(self): self._reraise_exception = False self._set_pipeline_cfg('publishers', ['except://', 'new://']) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_unique_pipeline_names(self): self._dup_pipeline_name_cfg() self._exception_create_pipelinemanager() def test_event_pipeline_endpoint_requeue_on_failure(self): self.CONF.set_override("ack_on_event_error", False, group="notification") self.CONF.set_override("telemetry_secret", "not-so-secret", group="publisher") test_data = { 'message_id': uuid.uuid4(), 'event_type': 'a', 'generated': '2013-08-08 21:06:37.803826', 'traits': [ {'name': 't_text', 'value': 1, 'dtype': 'text_trait' } ], 'raw': {'status': 'started'} } message_sign = utils.compute_signature(test_data, 'not-so-secret') test_data['message_signature'] = message_sign fake_publisher = mock.Mock() self.useFixture(fixtures.MockPatch( 'ceilometer.publisher.test.TestPublisher', return_value=fake_publisher)) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] event_pipeline_endpoint = event.InterimEventEndpoint( self.CONF, pipeline.PublishContext([pipe]), pipe.name) fake_publisher.publish_events.side_effect = Exception ret = event_pipeline_endpoint.sample([ {'ctxt': {}, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'a', 'payload': [test_data], 'metadata': {}}]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) ceilometer-10.0.0/ceilometer/tests/unit/__init__.py0000666000175100017510000000000013236733243022324 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/test_sample.py0000666000175100017510000000766113236733243023131 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/sample.py""" import datetime from ceilometer import sample from ceilometer.tests import base class TestSample(base.BaseTestCase): SAMPLE = sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, unit='ns', volume='1234567', user_id='56c5692032f34041900342503fecab30', project_id='ac9494df2d9d4e709bac378cceabaf23', resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', timestamp=datetime.datetime(2014, 10, 29, 14, 12, 15, 485877), resource_metadata={} ) def test_sample_string_format(self): expected = ('') self.assertEqual(expected, str(self.SAMPLE)) def test_sample_from_notifications_list(self): msg = { 'event_type': u'sample.create', 'metadata': { 'timestamp': u'2015-06-19T09:19:35.786893', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': [{u'counter_name': u'instance100'}], 'priority': 'info', 'publisher_id': u'ceilometer.api', } s = sample.Sample.from_notification( 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) expected = {'event_type': msg['event_type'], 'host': msg['publisher_id']} self.assertEqual(expected, s.resource_metadata) def test_sample_from_notifications_dict(self): msg = { 'event_type': u'sample.create', 'metadata': { 'timestamp': u'2015-06-19T09:19:35.786893', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': {u'counter_name': u'instance100'}, 'priority': 'info', 'publisher_id': u'ceilometer.api', } s = sample.Sample.from_notification( 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) msg['payload']['event_type'] = msg['event_type'] msg['payload']['host'] = msg['publisher_id'] self.assertEqual(msg['payload'], s.resource_metadata) def test_sample_from_notifications_assume_utc(self): msg = { 'event_type': u'sample.create', 'metadata': { 'timestamp': u'2015-06-19T09:19:35.786893', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': {u'counter_name': u'instance100'}, 'priority': 'info', 'publisher_id': u'ceilometer.api', } s = sample.Sample.from_notification( 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) self.assertEqual('2015-06-19T09:19:35.786893+00:00', s.timestamp) def test_sample_from_notifications_keep_tz(self): msg = { 'event_type': u'sample.create', 'metadata': { 'timestamp': u'2015-06-19T09:19:35.786893+01:00', 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': {u'counter_name': u'instance100'}, 'priority': 'info', 'publisher_id': u'ceilometer.api', } s = sample.Sample.from_notification( 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) self.assertEqual('2015-06-19T09:19:35.786893+01:00', s.timestamp) ceilometer-10.0.0/ceilometer/tests/unit/network/0000775000175100017510000000000013236733440021713 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/network/services/0000775000175100017510000000000013236733440023536 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/network/services/test_lbaas_v2.py0000666000175100017510000003175213236733243026653 0ustar zuulzuul00000000000000# # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from oslotest import base from ceilometer.network.services import discovery from ceilometer.network.services import lbaas from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer import service class _BaseTestLBPollster(base.BaseTestCase): def setUp(self): super(_BaseTestLBPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.CONF = service.prepare_service([], []) self.manager = manager.AgentManager(0, self.CONF) plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestLBListenerPollster(_BaseTestLBPollster): def setUp(self): super(TestLBListenerPollster, self).setUp() self.pollster = lbaas.LBListenerPollster(self.CONF) self.pollster.lb_version = 'v2' fake_listeners = self.fake_list_listeners() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'list_listener', return_value=fake_listeners)) @staticmethod def fake_list_listeners(): return [{'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'a9729389-6147-41a3-ab22-a24aed8692b2'}], 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'name': 'mylistener_online', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'ONLINE'}, {'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a'}], 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylistener_offline', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'OFFLINE'}, {'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'name': 'mylistener_error', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'ERROR'}, {'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'loadbalancers': [ {'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'name': 'mylistener_pending_create', 'admin_state_up': True, 'connection_limit': 100, 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'protocol_port': 80, 'operating_status': 'PENDING_CREATE'} ] def test_listener_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_listeners())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_list_listeners()[0][field], samples[0].resource_metadata[field]) def test_listener_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_listeners())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(4, samples[2].volume) def test_list_listener_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_listeners())) self.assertEqual(set(['network.services.lb.listener']), set([s.name for s in samples])) def test_listener_discovery(self): discovered_listeners = discovery.LBListenersDiscovery( self.CONF).discover(self.manager) self.assertEqual(4, len(discovered_listeners)) for listener in self.fake_list_listeners(): if listener['operating_status'] == 'pending_create': self.assertNotIn(listener, discovered_listeners) else: self.assertIn(listener, discovered_listeners) class TestLBLoadBalancerPollster(_BaseTestLBPollster): def setUp(self): super(TestLBLoadBalancerPollster, self).setUp() self.pollster = lbaas.LBLoadBalancerPollster(self.CONF) self.pollster.lb_version = 'v2' fake_loadbalancers = self.fake_list_loadbalancers() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'list_loadbalancer', return_value=fake_loadbalancers)) @staticmethod def fake_list_loadbalancers(): return [{'operating_status': 'ONLINE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'ACTIVE', 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'vip_address': '10.0.0.2', 'vip_subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'loadbalancer_online'}, {'operating_status': 'OFFLINE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'INACTIVE', 'listeners': [{'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a'}], 'vip_address': '10.0.0.3', 'vip_subnet_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'name': 'loadbalancer_offline'}, {'operating_status': 'ERROR', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'INACTIVE', 'listeners': [{'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d8b'}], 'vip_address': '10.0.0.4', 'vip_subnet_id': '213d3059-87a4-45a5-91e9-d721068df0b2', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'loadbalancer_error'}, {'operating_status': 'PENDING_CREATE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'INACTIVE', 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d4ed7c'}], 'vip_address': '10.0.0.5', 'vip_subnet_id': '123d3059-87a4-45a5-91e9-d721068ae0c3', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395763b2', 'name': 'loadbalancer_pending_create'} ] def test_loadbalancer_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_loadbalancers())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_list_loadbalancers()[0][field], samples[0].resource_metadata[field]) def test_loadbalancer_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_loadbalancers())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(4, samples[2].volume) def test_list_loadbalancer_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_list_loadbalancers())) self.assertEqual(set(['network.services.lb.loadbalancer']), set([s.name for s in samples])) def test_loadbalancer_discovery(self): discovered_loadbalancers = discovery.LBLoadBalancersDiscovery( self.CONF).discover(self.manager) self.assertEqual(4, len(discovered_loadbalancers)) for loadbalancer in self.fake_list_loadbalancers(): if loadbalancer['operating_status'] == 'pending_create': self.assertNotIn(loadbalancer, discovered_loadbalancers) else: self.assertIn(loadbalancer, discovered_loadbalancers) class TestLBStatsPollster(_BaseTestLBPollster): def setUp(self): super(TestLBStatsPollster, self).setUp() fake_balancer_stats = self.fake_balancer_stats() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'get_loadbalancer_stats', return_value=fake_balancer_stats)) fake_loadbalancers = self.fake_list_loadbalancers() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'list_loadbalancer', return_value=fake_loadbalancers)) self.CONF.set_override('neutron_lbaas_version', 'v2', group='service_types') @staticmethod def fake_list_loadbalancers(): return [{'operating_status': 'ONLINE', 'description': '', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'provisioning_status': 'ACTIVE', 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], 'vip_address': '10.0.0.2', 'vip_subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'loadbalancer_online'}, ] @staticmethod def fake_balancer_stats(): return {'active_connections': 2, 'bytes_in': 1, 'bytes_out': 3, 'total_connections': 4} def _check_get_samples(self, factory, sample_name, expected_volume, expected_type): pollster = factory(self.CONF) cache = {} samples = list(pollster.get_samples(self.manager, cache, self.fake_list_loadbalancers())) self.assertEqual(1, len(samples)) self.assertIsNotNone(samples) self.assertIn('lbstats', cache) self.assertEqual(set([sample_name]), set([s.name for s in samples])) match = [s for s in samples if s.name == sample_name] self.assertEqual(1, len(match), 'missing counter %s' % sample_name) self.assertEqual(expected_volume, match[0].volume) self.assertEqual(expected_type, match[0].type) def test_lb_total_connections(self): self._check_get_samples(lbaas.LBTotalConnectionsPollster, 'network.services.lb.total.connections', 4, 'cumulative') def test_lb_active_connections(self): self._check_get_samples(lbaas.LBActiveConnectionsPollster, 'network.services.lb.active.connections', 2, 'gauge') def test_lb_incoming_bytes(self): self._check_get_samples(lbaas.LBBytesInPollster, 'network.services.lb.incoming.bytes', 1, 'gauge') def test_lb_outgoing_bytes(self): self._check_get_samples(lbaas.LBBytesOutPollster, 'network.services.lb.outgoing.bytes', 3, 'gauge') ceilometer-10.0.0/ceilometer/tests/unit/network/services/__init__.py0000666000175100017510000000000013236733243025640 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/network/services/test_vpnaas.py0000666000175100017510000001650513236733243026451 0ustar zuulzuul00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from oslotest import base from ceilometer.network.services import discovery from ceilometer.network.services import vpnaas from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer import service class _BaseTestVPNPollster(base.BaseTestCase): def setUp(self): super(_BaseTestVPNPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.CONF = service.prepare_service([], []) self.manager = manager.AgentManager(0, self.CONF) plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestVPNServicesPollster(_BaseTestVPNPollster): def setUp(self): super(TestVPNServicesPollster, self).setUp() self.pollster = vpnaas.VPNServicesPollster(self.CONF) fake_vpn = self.fake_get_vpn_service() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'vpn_get_all', return_value=fake_vpn)) @staticmethod def fake_get_vpn_service(): return [{'status': 'ACTIVE', 'name': 'myvpn', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'INACTIVE', 'name': 'myvpn', 'description': '', 'admin_state_up': True, 'id': 'cdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'PENDING_CREATE', 'name': 'myvpn', 'description': '', 'id': 'bdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'error', 'name': 'myvpn', 'description': '', 'id': 'edde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'admin_state_up': False, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, ] def test_vpn_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vpn_service())) self.assertEqual(4, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_vpn_service()[0][field], samples[0].resource_metadata[field]) def test_vpn_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vpn_service())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_vpn_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vpn_service())) self.assertEqual(set(['network.services.vpn']), set([s.name for s in samples])) def test_vpn_discovery(self): discovered_vpns = discovery.VPNServicesDiscovery( self.CONF).discover(self.manager) self.assertEqual(3, len(discovered_vpns)) for vpn in self.fake_get_vpn_service(): if vpn['status'] == 'error': self.assertNotIn(vpn, discovered_vpns) else: self.assertIn(vpn, discovered_vpns) class TestIPSecConnectionsPollster(_BaseTestVPNPollster): def setUp(self): super(TestIPSecConnectionsPollster, self).setUp() self.pollster = vpnaas.IPSecConnectionsPollster(self.CONF) fake_conns = self.fake_get_ipsec_connections() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'ipsec_site_connections_get_all', return_value=fake_conns)) @staticmethod def fake_get_ipsec_connections(): return [{'name': 'connection1', 'description': 'Remote-connection1', 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'ikepolicy_id': 'ade3d818-fdcb-fg4b-de7f-4550dc8a9d7a', 'ipsecpolicy_id': 'fce3d818-fdcb-fg4b-de7f-7850dc8a9d7a', 'vpnservice_id': 'dce3d818-fdcb-fg4b-de7f-5650dc8a9d7a', 'admin_state_up': True, 'status': 'ACTIVE', 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} ] def test_conns_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_ipsec_connections())) self.assertEqual(1, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_ipsec_connections()[0][field], samples[0].resource_metadata[field]) def test_get_conns_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_ipsec_connections())) self.assertEqual(set(['network.services.vpn.connections']), set([s.name for s in samples])) def test_conns_discovery(self): discovered_conns = discovery.IPSecConnectionsDiscovery( self.CONF).discover(self.manager) self.assertEqual(1, len(discovered_conns)) self.assertEqual(self.fake_get_ipsec_connections(), discovered_conns) ceilometer-10.0.0/ceilometer/tests/unit/network/services/test_fwaas.py0000666000175100017510000001576413236733243026270 0ustar zuulzuul00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from oslotest import base from ceilometer.network.services import discovery from ceilometer.network.services import fwaas from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer import service class _BaseTestFWPollster(base.BaseTestCase): def setUp(self): super(_BaseTestFWPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.CONF = service.prepare_service([], []) self.manager = manager.AgentManager(0, self.CONF) plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestFirewallPollster(_BaseTestFWPollster): def setUp(self): super(TestFirewallPollster, self).setUp() self.pollster = fwaas.FirewallPollster(self.CONF) fake_fw = self.fake_get_fw_service() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'firewall_get_all', return_value=fake_fw)) @staticmethod def fake_get_fw_service(): return [{'status': 'ACTIVE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'INACTIVE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'PENDING_CREATE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'error', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, ] def test_fw_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual(4, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_fw_service()[0][field], samples[0].resource_metadata[field]) def test_vpn_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_vpn_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual(set(['network.services.firewall']), set([s.name for s in samples])) def test_vpn_discovery(self): discovered_fws = discovery.FirewallDiscovery( self.CONF).discover(self.manager) self.assertEqual(3, len(discovered_fws)) for vpn in self.fake_get_fw_service(): if vpn['status'] == 'error': self.assertNotIn(vpn, discovered_fws) else: self.assertIn(vpn, discovered_fws) class TestIPSecConnectionsPollster(_BaseTestFWPollster): def setUp(self): super(TestIPSecConnectionsPollster, self).setUp() self.pollster = fwaas.FirewallPolicyPollster(self.CONF) fake_fw_policy = self.fake_get_fw_policy() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'fw_policy_get_all', return_value=fake_fw_policy)) @staticmethod def fake_get_fw_policy(): return [{'name': 'my_fw_policy', 'description': 'fw_policy', 'admin_state_up': True, 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', 'firewall_rules': [{'enabled': True, 'action': 'allow', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '80', 'source_ip_address': '10.24.4.2'}, {'enabled': True, 'action': 'deny', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '22'}], 'shared': True, 'audited': True, 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} ] def test_policy_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_policy())) self.assertEqual(1, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_fw_policy()[0][field], samples[0].resource_metadata[field]) def test_get_policy_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_policy())) self.assertEqual(set(['network.services.firewall.policy']), set([s.name for s in samples])) def test_fw_policy_discovery(self): discovered_policy = discovery.FirewallPolicyDiscovery( self.CONF).discover(self.manager) self.assertEqual(1, len(discovered_policy)) self.assertEqual(self.fake_get_fw_policy(), discovered_policy) ceilometer-10.0.0/ceilometer/tests/unit/network/services/test_lbaas.py0000666000175100017510000005203113236733243026235 0ustar zuulzuul00000000000000# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from oslotest import base from ceilometer.network.services import discovery from ceilometer.network.services import lbaas from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer import service class _BaseTestLBPollster(base.BaseTestCase): def setUp(self): super(_BaseTestLBPollster, self).setUp() self.addCleanup(mock.patch.stopall) self.CONF = service.prepare_service([], []) self.manager = manager.AgentManager(0, self.CONF) self.CONF.set_override('neutron_lbaas_version', 'v1', group='service_types') plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestLBPoolPollster(_BaseTestLBPollster): def setUp(self): super(TestLBPoolPollster, self).setUp() self.pollster = lbaas.LBPoolPollster(self.CONF) fake_pools = self.fake_get_pools() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'pool_get_all', return_value=fake_pools)) @staticmethod def fake_get_pools(): return [{'status': 'ACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'INACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb02', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'PENDING_CREATE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb03', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'UNKNOWN', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb03', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, {'status': 'error', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb_error', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, ] def test_pool_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_pools())) self.assertEqual(4, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_pools()[0][field], samples[0].resource_metadata[field]) def test_pool_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_pools())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_pool_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_pools())) self.assertEqual(set(['network.services.lb.pool']), set([s.name for s in samples])) def test_pool_discovery(self): discovered_pools = discovery.LBPoolsDiscovery( self.CONF).discover(self.manager) self.assertEqual(4, len(discovered_pools)) for pool in self.fake_get_pools(): if pool['status'] == 'error': self.assertNotIn(pool, discovered_pools) else: self.assertIn(pool, discovered_pools) class TestLBVipPollster(_BaseTestLBPollster): def setUp(self): super(TestLBVipPollster, self).setUp() self.pollster = lbaas.LBVipPollster(self.CONF) fake_vips = self.fake_get_vips() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'vip_get_all', return_value=fake_vips)) @staticmethod def fake_get_vips(): return [{'status': 'ACTIVE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.2', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip'}, {'status': 'INACTIVE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.3', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'ba6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip02'}, {'status': 'PENDING_CREATE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.4', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip03'}, {'status': 'UNKNOWN', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.8', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip03'}, {'status': 'error', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.8', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip_error'}, ] def test_vip_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vips())) self.assertEqual(4, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_vips()[0][field], samples[0].resource_metadata[field]) def test_pool_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vips())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_vip_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_vips())) self.assertEqual(set(['network.services.lb.vip']), set([s.name for s in samples])) def test_vip_discovery(self): discovered_vips = discovery.LBVipsDiscovery( self.CONF).discover(self.manager) self.assertEqual(4, len(discovered_vips)) for pool in self.fake_get_vips(): if pool['status'] == 'error': self.assertNotIn(pool, discovered_vips) else: self.assertIn(pool, discovered_vips) class TestLBMemberPollster(_BaseTestLBPollster): def setUp(self): super(TestLBMemberPollster, self).setUp() self.pollster = lbaas.LBMemberPollster(self.CONF) fake_members = self.fake_get_members() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'member_get_all', return_value=fake_members)) @staticmethod def fake_get_members(): return [{'status': 'ACTIVE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.3', 'status_description': None, 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'INACTIVE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.5', 'status_description': None, 'id': '2456661eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'PENDING_CREATE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.6', 'status_description': None, 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'UNKNOWN', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.6', 'status_description': None, 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, {'status': 'error', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.6', 'status_description': None, 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, ] def test_get_samples_not_empty(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_members())) self.assertEqual(4, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_members()[0][field], samples[0].resource_metadata[field]) def test_pool_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_members())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_members())) self.assertEqual(set(['network.services.lb.member']), set([s.name for s in samples])) def test_members_discovery(self): discovered_members = discovery.LBMembersDiscovery( self.CONF).discover(self.manager) self.assertEqual(4, len(discovered_members)) for pool in self.fake_get_members(): if pool['status'] == 'error': self.assertNotIn(pool, discovered_members) else: self.assertIn(pool, discovered_members) class TestLBHealthProbePollster(_BaseTestLBPollster): def setUp(self): super(TestLBHealthProbePollster, self).setUp() self.pollster = lbaas.LBHealthMonitorPollster(self.CONF) fake_health_monitor = self.fake_get_health_monitor() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'health_monitor_get_all', return_value=fake_health_monitor)) @staticmethod def fake_get_health_monitor(): return [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', 'admin_state_up': True, 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", 'delay': 2, 'max_retries': 5, 'timeout': 5, 'pools': [], 'type': 'PING', }] def test_get_samples_not_empty(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_health_monitor())) self.assertEqual(1, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_health_monitor()[0][field], samples[0].resource_metadata[field]) def test_get_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, self.fake_get_health_monitor())) self.assertEqual(set(['network.services.lb.health_monitor']), set([s.name for s in samples])) def test_probes_discovery(self): discovered_probes = discovery.LBHealthMonitorsDiscovery( self.CONF).discover(self.manager) self.assertEqual(discovered_probes, self.fake_get_health_monitor()) class TestLBStatsPollster(_BaseTestLBPollster): def setUp(self): super(TestLBStatsPollster, self).setUp() fake_pool_stats = self.fake_pool_stats() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'pool_stats', return_value=fake_pool_stats)) fake_pools = self.fake_get_pools() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'pool_get_all', return_value=fake_pools)) @staticmethod def fake_get_pools(): return [{'status': 'ACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'provider': 'haproxy', 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, ] @staticmethod def fake_pool_stats(): return {'stats': {'active_connections': 2, 'bytes_in': 1, 'bytes_out': 3, 'total_connections': 4 } } def _check_get_samples(self, factory, sample_name, expected_volume, expected_type): pollster = factory(self.CONF) cache = {} samples = list(pollster.get_samples(self.manager, cache, self.fake_get_pools())) self.assertEqual(1, len(samples)) self.assertIsNotNone(samples) self.assertIn('lbstats', cache) self.assertEqual(set([sample_name]), set([s.name for s in samples])) match = [s for s in samples if s.name == sample_name] self.assertEqual(1, len(match), 'missing counter %s' % sample_name) self.assertEqual(expected_volume, match[0].volume) self.assertEqual(expected_type, match[0].type) def test_lb_total_connections(self): self._check_get_samples(lbaas.LBTotalConnectionsPollster, 'network.services.lb.total.connections', 4, 'cumulative') def test_lb_active_connections(self): self._check_get_samples(lbaas.LBActiveConnectionsPollster, 'network.services.lb.active.connections', 2, 'gauge') def test_lb_incoming_bytes(self): self._check_get_samples(lbaas.LBBytesInPollster, 'network.services.lb.incoming.bytes', 1, 'gauge') def test_lb_outgoing_bytes(self): self._check_get_samples(lbaas.LBBytesOutPollster, 'network.services.lb.outgoing.bytes', 3, 'gauge') ceilometer-10.0.0/ceilometer/tests/unit/network/test_floating_ip.py0000666000175100017510000001041313236733243025621 0ustar zuulzuul00000000000000# Copyright 2016 Sungard Availability Services # Copyright 2016 Red Hat # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from oslotest import base from ceilometer.network import floatingip from ceilometer.network.services import discovery from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer import service class _BaseTestFloatingIPPollster(base.BaseTestCase): def setUp(self): super(_BaseTestFloatingIPPollster, self).setUp() self.CONF = service.prepare_service([], []) self.manager = manager.AgentManager(0, self.CONF) plugin_base._get_keystone = mock.Mock() class TestFloatingIPPollster(_BaseTestFloatingIPPollster): def setUp(self): super(TestFloatingIPPollster, self).setUp() self.pollster = floatingip.FloatingIPPollster(self.CONF) fake_fip = self.fake_get_fip_service() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'fip_get_all', return_value=fake_fip)) @staticmethod def fake_get_fip_service(): return [{'router_id': 'e24f8a37-1bb7-49e4-833c-049bb21986d2', 'status': 'ACTIVE', 'tenant_id': '54a00c50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'f41f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.6', 'floating_ip_address': '65.79.162.11', 'port_id': '93a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': '18ca27bf-72bc-40c8-9c13-414d564ea367'}, {'router_id': 'astf8a37-1bb7-49e4-833c-049bb21986d2', 'status': 'DOWN', 'tenant_id': '34a00c50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'gh1f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.7', 'floating_ip_address': '65.79.162.12', 'port_id': '453a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': 'jkca27bf-72bc-40c8-9c13-414d564ea367'}, {'router_id': 'e2478937-1bb7-49e4-833c-049bb21986d2', 'status': 'error', 'tenant_id': '54a0gggg50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'po1f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.8', 'floating_ip_address': '65.79.162.13', 'port_id': '67a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': '90ca27bf-72bc-40c8-9c13-414d564ea367'}] def test_fip_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fip_service())) self.assertEqual(3, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_fip_service()[0][field], samples[0].resource_metadata[field]) def test_fip_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fip_service())) self.assertEqual(1, samples[0].volume) def test_get_fip_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fip_service())) self.assertEqual(set(['ip.floating']), set([s.name for s in samples])) def test_fip_discovery(self): discovered_fips = discovery.FloatingIPDiscovery( self.CONF).discover(self.manager) self.assertEqual(3, len(discovered_fips)) ceilometer-10.0.0/ceilometer/tests/unit/network/__init__.py0000666000175100017510000000000013236733243024015 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/0000775000175100017510000000000013236733440024105 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/opendaylight/0000775000175100017510000000000013236733440026574 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py0000666000175100017510000020341413236733243031507 0ustar zuulzuul00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from oslotest import base import six from six.moves.urllib import parse as url_parse from ceilometer.network.statistics.opendaylight import driver from ceilometer import service @six.add_metaclass(abc.ABCMeta) class _Base(base.BaseTestCase): @abc.abstractproperty def flow_data(self): pass @abc.abstractproperty def port_data(self): pass @abc.abstractproperty def table_data(self): pass @abc.abstractproperty def topology_data(self): pass @abc.abstractproperty def switch_data(self): pass @abc.abstractproperty def user_links_data(self): pass @abc.abstractproperty def active_hosts_data(self): pass @abc.abstractproperty def inactive_hosts_data(self): pass fake_odl_url = url_parse.ParseResult('opendaylight', 'localhost:8080', 'controller/nb/v2', None, None, None) fake_params = url_parse.parse_qs('user=admin&password=admin&scheme=http&' 'container_name=default&auth=basic') fake_params_multi_container = ( url_parse.parse_qs('user=admin&password=admin&scheme=http&' 'container_name=first&container_name=second&' 'auth=basic')) def setUp(self): super(_Base, self).setUp() self.addCleanup(mock.patch.stopall) conf = service.prepare_service([], []) self.driver = driver.OpenDayLightDriver(conf) self.get_flow_statistics = mock.patch( 'ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_flow_statistics', return_value=self.flow_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_table_statistics', return_value=self.table_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_port_statistics', return_value=self.port_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'TopologyAPIClient.get_topology', return_value=self.topology_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'TopologyAPIClient.get_user_links', return_value=self.user_links_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'SwitchManagerAPIClient.get_nodes', return_value=self.switch_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'HostTrackerAPIClient.get_active_hosts', return_value=self.active_hosts_data).start() mock.patch('ceilometer.network.statistics.opendaylight.client.' 'HostTrackerAPIClient.get_inactive_hosts', return_value=self.inactive_hosts_data).start() def _test_for_meter(self, meter_name, expected_data): sample_data = self.driver.get_sample_data(meter_name, self.fake_odl_url, self.fake_params, {}) self.assertEqual(expected_data, list(sample_data)) class TestOpenDayLightDriverSpecial(_Base): flow_data = {"flowStatistics": []} port_data = {"portStatistics": []} table_data = {"tableStatistics": []} topology_data = {"edgeProperties": []} switch_data = {"nodeProperties": []} user_links_data = {"userLinks": []} active_hosts_data = {"hostConfig": []} inactive_hosts_data = {"hostConfig": []} def test_dict_to_kv(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, 'nested2': [{'c': 'A'}, {'c': 'B'}] } pairs = list(self.driver.dict_to_keyval(data)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested.a', 'A'), ('nested.b', 'B'), ('nested2[0].c', 'A'), ('nested2[1].c', 'B')], sorted(pairs, key=lambda x: x[0])) def test_not_implemented_meter(self): sample_data = self.driver.get_sample_data('egg', self.fake_odl_url, self.fake_params, {}) self.assertIsNone(sample_data) sample_data = self.driver.get_sample_data('switch.table.egg', self.fake_odl_url, self.fake_params, {}) self.assertIsNone(sample_data) def test_cache(self): cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.assertEqual(1, self.get_flow_statistics.call_count) cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.assertEqual(2, self.get_flow_statistics.call_count) def test_multi_container(self): cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params_multi_container, cache) self.assertEqual(2, self.get_flow_statistics.call_count) self.assertIn('network.statistics.opendaylight', cache) odl_data = cache['network.statistics.opendaylight'] self.assertIn('first', odl_data) self.assertIn('second', odl_data) def test_http_error(self): mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_flow_statistics', side_effect=Exception()).start() sample_data = self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, {}) self.assertEqual(0, len(sample_data)) mock.patch('ceilometer.network.statistics.opendaylight.client.' 'StatisticsAPIClient.get_flow_statistics', side_effect=[Exception(), self.flow_data]).start() cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params_multi_container, cache) self.assertIn('network.statistics.opendaylight', cache) odl_data = cache['network.statistics.opendaylight'] self.assertIn('second', odl_data) class TestOpenDayLightDriverSimple(_Base): flow_data = { "flowStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "flowStatistic": [ { "flow": { "match": { "matchField": [ { "type": "DL_TYPE", "value": "2048" }, { "mask": "255.255.255.255", "type": "NW_DST", "value": "1.1.1.1" } ] }, "actions": { "@type": "output", "port": { "id": "3", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" } }, "hardTimeout": "0", "id": "0", "idleTimeout": "0", "priority": "1" }, "byteCount": "0", "durationNanoseconds": "397000000", "durationSeconds": "1828", "packetCount": "0", "tableId": "0" }, ] } ] } port_data = { "portStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "portStatistic": [ { "nodeConnector": { "id": "4", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" }, ] } ] } table_data = { "tableStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "tableStatistic": [ { "activeCount": "11", "lookupCount": "816", "matchedCount": "220", "nodeTable": { "id": "0", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" } } }, ] } ] } topology_data = {"edgeProperties": []} switch_data = { "nodeProperties": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "properties": { "actions": { "value": "4095" }, "timeStamp": { "name": "connectedSince", "value": "1377291227877" } } }, ] } user_links_data = {"userLinks": []} active_hosts_data = {"hostConfig": []} inactive_hosts_data = {"hostConfig": []} def test_meter_switch(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', "properties_actions": "4095", "properties_timeStamp_connectedSince": "1377291227877" }, None), ] self._test_for_meter('switch', expected_data) def test_meter_switch_port(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4', }, None), ] self._test_for_meter('switch.port', expected_data) def test_meter_switch_port_receive_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.receive.packets', expected_data) def test_meter_switch_port_transmit_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.transmit.packets', expected_data) def test_meter_switch_port_receive_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.receive.bytes', expected_data) def test_meter_switch_port_transmit_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.transmit.bytes', expected_data) def test_meter_switch_port_receive_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.receive.drops', expected_data) def test_meter_switch_port_transmit_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.transmit.drops', expected_data) def test_meter_switch_port_receive_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.receive.errors', expected_data) def test_meter_switch_port_transmit_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.transmit.errors', expected_data) def test_meter_switch_port_receive_frame_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.receive.frame_error', expected_data) def test_meter_switch_port_receive_overrun_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.receive.overrun_error', expected_data) def test_meter_switch_port_receive_crc_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.receive.crc_error', expected_data) def test_meter_switch_port_collision_count(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), ] self._test_for_meter('switch.port.collision.count', expected_data) def test_meter_switch_table(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}, None), ] self._test_for_meter('switch.table', expected_data) def test_meter_switch_table_active_entries(self): expected_data = [ (11, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}, None), ] self._test_for_meter('switch.table.active.entries', expected_data) def test_meter_switch_table_lookup_packets(self): expected_data = [ (816, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}, None), ] self._test_for_meter('switch.table.lookup.packets', expected_data) def test_meter_switch_table_matched_packets(self): expected_data = [ (220, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}, None), ] self._test_for_meter('switch.table.matched.packets', expected_data) def test_meter_switch_flow(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1" }, None), ] self._test_for_meter('switch.flow', expected_data) def test_meter_switch_flow_duration_seconds(self): expected_data = [ (1828, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), ] self._test_for_meter('switch.flow.duration_seconds', expected_data) def test_meter_switch_flow_duration_nanoseconds(self): expected_data = [ (397000000, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), ] self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) def test_meter_switch_flow_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), ] self._test_for_meter('switch.flow.packets', expected_data) def test_meter_switch_flow_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), ] self._test_for_meter('switch.flow.bytes', expected_data) class TestOpenDayLightDriverComplex(_Base): flow_data = { "flowStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "flowStatistic": [ { "flow": { "match": { "matchField": [ { "type": "DL_TYPE", "value": "2048" }, { "mask": "255.255.255.255", "type": "NW_DST", "value": "1.1.1.1" } ] }, "actions": { "@type": "output", "port": { "id": "3", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" } }, "hardTimeout": "0", "id": "0", "idleTimeout": "0", "priority": "1" }, "byteCount": "0", "durationNanoseconds": "397000000", "durationSeconds": "1828", "packetCount": "0", "tableId": "0" }, { "flow": { "match": { "matchField": [ { "type": "DL_TYPE", "value": "2048" }, { "mask": "255.255.255.255", "type": "NW_DST", "value": "1.1.1.2" } ] }, "actions": { "@type": "output", "port": { "id": "4", "node": { "id": "00:00:00:00:00:00:00:03", "type": "OF" }, "type": "OF" } }, "hardTimeout": "0", "id": "0", "idleTimeout": "0", "priority": "1" }, "byteCount": "89", "durationNanoseconds": "200000", "durationSeconds": "5648", "packetCount": "30", "tableId": "1" } ] } ] } port_data = { "portStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "portStatistic": [ { "nodeConnector": { "id": "4", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" }, { "nodeConnector": { "id": "3", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "12740", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "182", "transmitBytes": "12110", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "173" }, { "nodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "12180", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "174", "transmitBytes": "12670", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "181" }, { "nodeConnector": { "id": "1", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" }, { "nodeConnector": { "id": "0", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "collisionCount": "0", "receiveBytes": "0", "receiveCrcError": "0", "receiveDrops": "0", "receiveErrors": "0", "receiveFrameError": "0", "receiveOverRunError": "0", "receivePackets": "0", "transmitBytes": "0", "transmitDrops": "0", "transmitErrors": "0", "transmitPackets": "0" } ] } ] } table_data = { "tableStatistics": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "tableStatistic": [ { "activeCount": "11", "lookupCount": "816", "matchedCount": "220", "nodeTable": { "id": "0", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" } } }, { "activeCount": "20", "lookupCount": "10", "matchedCount": "5", "nodeTable": { "id": "1", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" } } } ] } ] } topology_data = { "edgeProperties": [ { "edge": { "headNodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:03", "type": "OF" }, "type": "OF" }, "tailNodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" } }, "properties": { "bandwidth": { "value": 10000000000 }, "config": { "value": 1 }, "name": { "value": "s2-eth3" }, "state": { "value": 1 }, "timeStamp": { "name": "creation", "value": 1379527162648 } } }, { "edge": { "headNodeConnector": { "id": "5", "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "type": "OF" }, "tailNodeConnector": { "id": "2", "node": { "id": "00:00:00:00:00:00:00:04", "type": "OF" }, "type": "OF" } }, "properties": { "timeStamp": { "name": "creation", "value": 1379527162648 } } } ] } switch_data = { "nodeProperties": [ { "node": { "id": "00:00:00:00:00:00:00:02", "type": "OF" }, "properties": { "actions": { "value": "4095" }, "buffers": { "value": "256" }, "capabilities": { "value": "199" }, "description": { "value": "None" }, "macAddress": { "value": "00:00:00:00:00:02" }, "tables": { "value": "-1" }, "timeStamp": { "name": "connectedSince", "value": "1377291227877" } } }, { "node": { "id": "00:00:00:00:00:00:00:03", "type": "OF" }, "properties": { "actions": { "value": "1024" }, "buffers": { "value": "512" }, "capabilities": { "value": "1000" }, "description": { "value": "Foo Bar" }, "macAddress": { "value": "00:00:00:00:00:03" }, "tables": { "value": "10" }, "timeStamp": { "name": "connectedSince", "value": "1377291228000" } } } ] } user_links_data = { "userLinks": [ { "dstNodeConnector": "OF|5@OF|00:00:00:00:00:00:00:05", "name": "link1", "srcNodeConnector": "OF|3@OF|00:00:00:00:00:00:00:02", "status": "Success" } ] } active_hosts_data = { "hostConfig": [ { "dataLayerAddress": "00:00:00:00:01:01", "networkAddress": "1.1.1.1", "nodeConnectorId": "9", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:01", "nodeType": "OF", "staticHost": "false", "vlan": "0" }, { "dataLayerAddress": "00:00:00:00:02:02", "networkAddress": "2.2.2.2", "nodeConnectorId": "1", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:02", "nodeType": "OF", "staticHost": "true", "vlan": "0" } ] } inactive_hosts_data = { "hostConfig": [ { "dataLayerAddress": "00:00:00:01:01:01", "networkAddress": "1.1.1.3", "nodeConnectorId": "8", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:01", "nodeType": "OF", "staticHost": "false", "vlan": "0" }, { "dataLayerAddress": "00:00:00:01:02:02", "networkAddress": "2.2.2.4", "nodeConnectorId": "0", "nodeConnectorType": "OF", "nodeId": "00:00:00:00:00:00:00:02", "nodeType": "OF", "staticHost": "false", "vlan": "1" } ] } def test_meter_switch(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', "properties_actions": "4095", "properties_buffers": "256", "properties_capabilities": "199", "properties_description": "None", "properties_macAddress": "00:00:00:00:00:02", "properties_tables": "-1", "properties_timeStamp_connectedSince": "1377291227877" }, None), (1, "00:00:00:00:00:00:00:03", { 'controller': 'OpenDaylight', 'container': 'default', "properties_actions": "1024", "properties_buffers": "512", "properties_capabilities": "1000", "properties_description": "Foo Bar", "properties_macAddress": "00:00:00:00:00:03", "properties_tables": "10", "properties_timeStamp_connectedSince": "1377291228000" }, None), ] self._test_for_meter('switch', expected_data) def test_meter_switch_port(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4', }, None), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3', 'user_link_node_id': '00:00:00:00:00:00:00:05', 'user_link_node_port': '5', 'user_link_status': 'Success', 'user_link_name': 'link1', }, None), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2', 'topology_node_id': '00:00:00:00:00:00:00:03', 'topology_node_port': '2', "topology_bandwidth": 10000000000, "topology_config": 1, "topology_name": "s2-eth3", "topology_state": 1, "topology_timeStamp_creation": 1379527162648 }, None), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1', 'host_status': 'active', 'host_dataLayerAddress': '00:00:00:00:02:02', 'host_networkAddress': '2.2.2.2', 'host_staticHost': 'true', 'host_vlan': '0', }, None), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0', 'host_status': 'inactive', 'host_dataLayerAddress': '00:00:00:01:02:02', 'host_networkAddress': '2.2.2.4', 'host_staticHost': 'false', 'host_vlan': '1', }, None), ] self._test_for_meter('switch.port', expected_data) def test_meter_switch_port_receive_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (182, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (174, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.receive.packets', expected_data) def test_meter_switch_port_transmit_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (173, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (181, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.transmit.packets', expected_data) def test_meter_switch_port_receive_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (12740, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (12180, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.receive.bytes', expected_data) def test_meter_switch_port_transmit_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (12110, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (12670, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.transmit.bytes', expected_data) def test_meter_switch_port_receive_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.receive.drops', expected_data) def test_meter_switch_port_transmit_drops(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.transmit.drops', expected_data) def test_meter_switch_port_receive_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.receive.errors', expected_data) def test_meter_switch_port_transmit_errors(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.transmit.errors', expected_data) def test_meter_switch_port_receive_frame_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.receive.frame_error', expected_data) def test_meter_switch_port_receive_overrun_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.receive.overrun_error', expected_data) def test_meter_switch_port_receive_crc_error(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.receive.crc_error', expected_data) def test_meter_switch_port_collision_count(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '4'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '3'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '2'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '1'}, None), (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'port': '0'}, None), ] self._test_for_meter('switch.port.collision.count', expected_data) def test_meter_switch_table(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}, None), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}, None), ] self._test_for_meter('switch.table', expected_data) def test_meter_switch_table_active_entries(self): expected_data = [ (11, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}, None), (20, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}, None), ] self._test_for_meter('switch.table.active.entries', expected_data) def test_meter_switch_table_lookup_packets(self): expected_data = [ (816, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}, None), (10, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}, None), ] self._test_for_meter('switch.table.lookup.packets', expected_data) def test_meter_switch_table_matched_packets(self): expected_data = [ (220, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0'}, None), (5, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1'}, None), ] self._test_for_meter('switch.table.matched.packets', expected_data) def test_meter_switch_flow(self): expected_data = [ (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1" }, None), (1, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1" }, None), ] self._test_for_meter('switch.flow', expected_data) def test_meter_switch_flow_duration_seconds(self): expected_data = [ (1828, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), (5648, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), ] self._test_for_meter('switch.flow.duration_seconds', expected_data) def test_meter_switch_flow_duration_nanoseconds(self): expected_data = [ (397000000, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), (200000, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), ] self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) def test_meter_switch_flow_packets(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), (30, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), ] self._test_for_meter('switch.flow.packets', expected_data) def test_meter_switch_flow_bytes(self): expected_data = [ (0, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '0', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.1", "flow_actions_@type": "output", "flow_actions_port_id": "3", "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), (89, "00:00:00:00:00:00:00:02", { 'controller': 'OpenDaylight', 'container': 'default', 'table_id': '1', 'flow_id': '0', "flow_match_matchField[0]_type": "DL_TYPE", "flow_match_matchField[0]_value": "2048", "flow_match_matchField[1]_mask": "255.255.255.255", "flow_match_matchField[1]_type": "NW_DST", "flow_match_matchField[1]_value": "1.1.1.2", "flow_actions_@type": "output", "flow_actions_port_id": "4", "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", "flow_actions_port_node_type": "OF", "flow_actions_port_type": "OF", "flow_hardTimeout": "0", "flow_idleTimeout": "0", "flow_priority": "1"}, None), ] self._test_for_meter('switch.flow.bytes', expected_data) ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/opendaylight/__init__.py0000666000175100017510000000000013236733243030676 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py0000666000175100017510000001412313236733243031467 0ustar zuulzuul00000000000000# # Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as config_fixture from oslotest import base from requests import auth as req_auth import six from six.moves.urllib import parse as urlparse from ceilometer.i18n import _ from ceilometer.network.statistics.opendaylight import client from ceilometer import service as ceilometer_service class TestClientHTTPBasicAuth(base.BaseTestCase): auth_way = 'basic' scheme = 'http' def setUp(self): super(TestClientHTTPBasicAuth, self).setUp() conf = ceilometer_service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(config_fixture.Config(conf)).conf self.parsed_url = urlparse.urlparse( 'http://127.0.0.1:8080/controller/nb/v2?container_name=default&' 'container_name=egg&auth=%s&user=admin&password=admin_pass&' 'scheme=%s' % (self.auth_way, self.scheme)) self.params = urlparse.parse_qs(self.parsed_url.query) self.endpoint = urlparse.urlunparse( urlparse.ParseResult(self.scheme, self.parsed_url.netloc, self.parsed_url.path, None, None, None)) odl_params = {'auth': self.params.get('auth')[0], 'user': self.params.get('user')[0], 'password': self.params.get('password')[0]} self.client = client.Client(self.CONF, self.endpoint, odl_params) self.resp = mock.MagicMock() self.get = mock.patch('requests.get', return_value=self.resp).start() self.resp.raw.version = 1.1 self.resp.status_code = 200 self.resp.reason = 'OK' self.resp.headers = {} self.resp.content = 'dummy' def _test_request(self, method, url): data = method('default') call_args = self.get.call_args_list[0][0] call_kwargs = self.get.call_args_list[0][1] # check url real_url = url % {'container_name': 'default', 'scheme': self.scheme} self.assertEqual(real_url, call_args[0]) # check auth parameters auth = call_kwargs.get('auth') if self.auth_way == 'digest': self.assertIsInstance(auth, req_auth.HTTPDigestAuth) else: self.assertIsInstance(auth, req_auth.HTTPBasicAuth) self.assertEqual('admin', auth.username) self.assertEqual('admin_pass', auth.password) # check header self.assertEqual( {'Accept': 'application/json'}, call_kwargs['headers']) # check return value self.assertEqual(self.get().json(), data) def test_flow_statistics(self): self._test_request( self.client.statistics.get_flow_statistics, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/statistics/%(container_name)s/flow') def test_port_statistics(self): self._test_request( self.client.statistics.get_port_statistics, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/statistics/%(container_name)s/port') def test_table_statistics(self): self._test_request( self.client.statistics.get_table_statistics, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/statistics/%(container_name)s/table') def test_topology(self): self._test_request( self.client.topology.get_topology, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/topology/%(container_name)s') def test_user_links(self): self._test_request( self.client.topology.get_user_links, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/topology/%(container_name)s/userLinks') def test_switch(self): self._test_request( self.client.switch_manager.get_nodes, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/switchmanager/%(container_name)s/nodes') def test_active_hosts(self): self._test_request( self.client.host_tracker.get_active_hosts, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/hosttracker/%(container_name)s/hosts/active') def test_inactive_hosts(self): self._test_request( self.client.host_tracker.get_inactive_hosts, '%(scheme)s://127.0.0.1:8080/controller/nb/v2' '/hosttracker/%(container_name)s/hosts/inactive') def test_http_error(self): self.resp.status_code = 404 self.resp.reason = 'Not Found' try: self.client.statistics.get_flow_statistics('default') self.fail('') except client.OpenDaylightRESTAPIFailed as e: self.assertEqual( _('OpenDaylight API returned %(status)s %(reason)s') % {'status': self.resp.status_code, 'reason': self.resp.reason}, six.text_type(e)) def test_other_error(self): class _Exception(Exception): pass self.get = mock.patch('requests.get', side_effect=_Exception).start() self.assertRaises(_Exception, self.client.statistics.get_flow_statistics, 'default') class TestClientHTTPDigestAuth(TestClientHTTPBasicAuth): auth_way = 'digest' class TestClientHTTPSBasicAuth(TestClientHTTPBasicAuth): scheme = 'https' class TestClientHTTPSDigestAuth(TestClientHTTPDigestAuth): scheme = 'https' ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/test_port.py0000666000175100017510000000744713236733243026521 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import port from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestPortPollsters(statistics._PollsterTestBase): def test_port_pollster(self): self._test_pollster( port.PortPollster, 'switch.port', sample.TYPE_GAUGE, 'port') def test_port_pollster_uptime(self): self._test_pollster( port.PortPollsterUptime, 'switch.port.uptime', sample.TYPE_GAUGE, 's') def test_port_pollster_receive_packets(self): self._test_pollster( port.PortPollsterReceivePackets, 'switch.port.receive.packets', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_transmit_packets(self): self._test_pollster( port.PortPollsterTransmitPackets, 'switch.port.transmit.packets', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_bytes(self): self._test_pollster( port.PortPollsterReceiveBytes, 'switch.port.receive.bytes', sample.TYPE_CUMULATIVE, 'B') def test_port_pollster_transmit_bytes(self): self._test_pollster( port.PortPollsterTransmitBytes, 'switch.port.transmit.bytes', sample.TYPE_CUMULATIVE, 'B') def test_port_pollster_receive_drops(self): self._test_pollster( port.PortPollsterReceiveDrops, 'switch.port.receive.drops', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_transmit_drops(self): self._test_pollster( port.PortPollsterTransmitDrops, 'switch.port.transmit.drops', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_errors(self): self._test_pollster( port.PortPollsterReceiveErrors, 'switch.port.receive.errors', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_transmit_errors(self): self._test_pollster( port.PortPollsterTransmitErrors, 'switch.port.transmit.errors', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_frame_errors(self): self._test_pollster( port.PortPollsterReceiveFrameErrors, 'switch.port.receive.frame_error', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_overrun_errors(self): self._test_pollster( port.PortPollsterReceiveOverrunErrors, 'switch.port.receive.overrun_error', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_crc_errors(self): self._test_pollster( port.PortPollsterReceiveCRCErrors, 'switch.port.receive.crc_error', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_collision_count(self): self._test_pollster( port.PortPollsterCollisionCount, 'switch.port.collision.count', sample.TYPE_CUMULATIVE, 'packet') ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/test_port_v2.py0000666000175100017510000000465013236733243027121 0ustar zuulzuul00000000000000# # Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import port_v2 from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestPortPollsters(statistics._PollsterTestBase): def test_port_pollster(self): self._test_pollster( port_v2.PortPollster, 'port', sample.TYPE_GAUGE, 'port') def test_port_pollster_uptime(self): self._test_pollster( port_v2.PortPollsterUptime, 'port.uptime', sample.TYPE_GAUGE, 's') def test_port_pollster_receive_packets(self): self._test_pollster( port_v2.PortPollsterReceivePackets, 'port.receive.packets', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_transmit_packets(self): self._test_pollster( port_v2.PortPollsterTransmitPackets, 'port.transmit.packets', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_bytes(self): self._test_pollster( port_v2.PortPollsterReceiveBytes, 'port.receive.bytes', sample.TYPE_CUMULATIVE, 'B') def test_port_pollster_transmit_bytes(self): self._test_pollster( port_v2.PortPollsterTransmitBytes, 'port.transmit.bytes', sample.TYPE_CUMULATIVE, 'B') def test_port_pollster_receive_drops(self): self._test_pollster( port_v2.PortPollsterReceiveDrops, 'port.receive.drops', sample.TYPE_CUMULATIVE, 'packet') def test_port_pollster_receive_errors(self): self._test_pollster( port_v2.PortPollsterReceiveErrors, 'port.receive.errors', sample.TYPE_CUMULATIVE, 'packet') ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/test_statistics.py0000666000175100017510000001512213236733243027714 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_utils import timeutils from oslo_utils import uuidutils from oslotest import base from ceilometer.network import statistics from ceilometer.network.statistics import driver from ceilometer import sample from ceilometer import service PROJECT_ID = uuidutils.generate_uuid() class TestBase(base.BaseTestCase): def setUp(self): super(TestBase, self).setUp() self.CONF = service.prepare_service([], []) def test_subclass_ok(self): class OkSubclass(statistics._Base): meter_name = 'foo' meter_type = sample.TYPE_GAUGE meter_unit = 'B' OkSubclass(self.CONF) def test_subclass_ng(self): class NgSubclass1(statistics._Base): """meter_name is lost.""" meter_type = sample.TYPE_GAUGE meter_unit = 'B' class NgSubclass2(statistics._Base): """meter_type is lost.""" meter_name = 'foo' meter_unit = 'B' class NgSubclass3(statistics._Base): """meter_unit is lost.""" meter_name = 'foo' meter_type = sample.TYPE_GAUGE self.assertRaises(TypeError, NgSubclass1, self.CONF) self.assertRaises(TypeError, NgSubclass2, self.CONF) self.assertRaises(TypeError, NgSubclass3, self.CONF) class TestBaseGetSamples(base.BaseTestCase): def setUp(self): super(TestBaseGetSamples, self).setUp() self.CONF = service.prepare_service([], []) class FakePollster(statistics._Base): meter_name = 'foo' meter_type = sample.TYPE_CUMULATIVE meter_unit = 'bar' self.pollster = FakePollster(self.CONF) def tearDown(self): statistics._Base.drivers = {} super(TestBaseGetSamples, self).tearDown() @staticmethod def _setup_ext_mgr(**drivers): statistics._Base.drivers = drivers def _make_fake_driver(self, *return_values): class FakeDriver(driver.Driver): def __init__(self, conf): super(FakeDriver, self).__init__(conf) self.index = 0 def get_sample_data(self, meter_name, parse_url, params, cache): if self.index >= len(return_values): yield None retval = return_values[self.index] self.index += 1 yield retval return FakeDriver @staticmethod def _make_timestamps(count): now = timeutils.utcnow() return [(now + datetime.timedelta(seconds=i)).isoformat() for i in range(count)] def _get_samples(self, *resources): return [v for v in self.pollster.get_samples(self, {}, resources)] def _assert_sample(self, s, volume, resource_id, resource_metadata, project_id): self.assertEqual('foo', s.name) self.assertEqual(sample.TYPE_CUMULATIVE, s.type) self.assertEqual('bar', s.unit) self.assertEqual(volume, s.volume) self.assertIsNone(s.user_id) self.assertEqual(project_id, s.project_id) self.assertEqual(resource_id, s.resource_id) self.assertEqual(resource_metadata, s.resource_metadata) def test_get_samples_one_driver_one_resource(self): fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'}, PROJECT_ID), (2, 'b', None, None)) self._setup_ext_mgr(http=fake_driver(self.CONF)) samples = self._get_samples('http://foo') self.assertEqual(1, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, PROJECT_ID) def test_get_samples_one_driver_two_resource(self): fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'}, None), (2, 'b', None, None), (3, 'c', None, None)) self._setup_ext_mgr(http=fake_driver(self.CONF)) samples = self._get_samples('http://foo', 'http://bar') self.assertEqual(2, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, None) self._assert_sample(samples[1], 2, 'b', {}, None) def test_get_samples_two_driver_one_resource(self): fake_driver1 = self._make_fake_driver((1, 'a', {'spam': 'egg'}, None), (2, 'b', None, None)) fake_driver2 = self._make_fake_driver((11, 'A', None, None), (12, 'B', None, None)) self._setup_ext_mgr(http=fake_driver1(self.CONF), https=fake_driver2(self.CONF)) samples = self._get_samples('http://foo') self.assertEqual(1, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, None) def test_get_samples_multi_samples(self): fake_driver = self._make_fake_driver([(1, 'a', {'spam': 'egg'}, None), (2, 'b', None, None)]) self._setup_ext_mgr(http=fake_driver(self.CONF)) samples = self._get_samples('http://foo') self.assertEqual(2, len(samples)) self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}, None) self._assert_sample(samples[1], 2, 'b', {}, None) def test_get_samples_return_none(self): fake_driver = self._make_fake_driver(None) self._setup_ext_mgr(http=fake_driver(self.CONF)) samples = self._get_samples('http://foo') self.assertEqual(0, len(samples)) def test_get_samples_return_no_generator(self): class NoneFakeDriver(driver.Driver): def get_sample_data(self, meter_name, parse_url, params, cache): return None self._setup_ext_mgr(http=NoneFakeDriver(self.CONF)) samples = self._get_samples('http://foo') self.assertFalse(samples) ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/opencontrail/0000775000175100017510000000000013236733440026602 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py0000666000175100017510000002731113236733243031515 0ustar zuulzuul00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from six.moves.urllib import parse as urlparse from ceilometer.network.statistics.opencontrail import driver from ceilometer import service class TestOpencontrailDriver(base.BaseTestCase): def setUp(self): super(TestOpencontrailDriver, self).setUp() self.nc_ports = mock.patch('ceilometer.neutron_client' '.Client.port_get_all', return_value=self.fake_ports()) self.nc_ports.start() self.CONF = service.prepare_service([], []) self.driver = driver.OpencontrailDriver(self.CONF) self.parse_url = urlparse.ParseResult('opencontrail', '127.0.0.1:8143', '/', None, None, None) self.params = {'password': ['admin'], 'scheme': ['http'], 'username': ['admin'], 'verify_ssl': ['false'], 'resource': ['if_stats_list']} @staticmethod def fake_ports(): return [{'admin_state_up': True, 'device_owner': 'compute:None', 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'extra_dhcp_opts': [], 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', 'mac_address': 'fa:16:3e:c5:35:93', 'name': '', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'status': 'ACTIVE', 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}] @staticmethod def fake_port_stats(): return {"value": [{ "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", "value": { "UveVirtualMachineAgent": { "if_stats_list": [{ "out_bytes": 22, "in_bandwidth_usage": 0, "in_bytes": 23, "out_bandwidth_usage": 0, "out_pkts": 5, "in_pkts": 6, "name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442") }], "fip_stats_list": [{ "in_bytes": 33, "iface_name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442"), "out_bytes": 44, "out_pkts": 10, "virtual_network": "default-domain:openstack:public", "in_pkts": 11, "ip_address": "1.1.1.1" }] }}}]} @staticmethod def fake_port_stats_with_node(): return {"value": [{ "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", "value": { "UveVirtualMachineAgent": { "if_stats_list": [ [[{ "out_bytes": 22, "in_bandwidth_usage": 0, "in_bytes": 23, "out_bandwidth_usage": 0, "out_pkts": 5, "in_pkts": 6, "name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442") }], 'node1'], [[{ "out_bytes": 22, "in_bandwidth_usage": 0, "in_bytes": 23, "out_bandwidth_usage": 0, "out_pkts": 4, "in_pkts": 13, "name": ("default-domain:demo:" "96d49cc3-4e01-40ce-9cac-c0e32642a442")}], 'node2'] ] }}}]} def _test_meter(self, meter_name, expected, fake_port_stats=None): if not fake_port_stats: fake_port_stats = self.fake_port_stats() with mock.patch('ceilometer.network.' 'statistics.opencontrail.' 'client.NetworksAPIClient.' 'get_vm_statistics', return_value=fake_port_stats) as port_stats: samples = self.driver.get_sample_data(meter_name, self.parse_url, self.params, {}) self.assertEqual(expected, [s for s in samples]) port_stats.assert_called_with('*') def test_switch_port_receive_packets_with_node(self): expected = [(6, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, None), (13, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, None)] self._test_meter('switch.port.receive.packets', expected, self.fake_port_stats_with_node()) def test_switch_port_receive_packets(self): expected = [(6, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, None)] self._test_meter('switch.port.receive.packets', expected) def test_switch_port_transmit_packets(self): expected = [(5, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, None)] self._test_meter('switch.port.transmit.packets', expected) def test_switch_port_receive_bytes(self): expected = [(23, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, None)] self._test_meter('switch.port.receive.bytes', expected) def test_switch_port_transmit_bytes(self): expected = [(22, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'if_stats_list'}, None)] self._test_meter('switch.port.transmit.bytes', expected) def test_switch_port_receive_packets_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(11, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, None)] self._test_meter('switch.port.receive.packets', expected) def test_switch_port_transmit_packets_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(10, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, None)] self._test_meter('switch.port.transmit.packets', expected) def test_switch_port_receive_bytes_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(33, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, None)] self._test_meter('switch.port.receive.bytes', expected) def test_switch_port_transmit_bytes_fip(self): self.params['resource'] = ['fip_stats_list'] expected = [(44, '96d49cc3-4e01-40ce-9cac-c0e32642a442', {'device_owner_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'domain': 'default-domain', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'project': 'demo', 'project_id': '89271fa581ab4380bf172f868c3615f9', 'resource': 'fip_stats_list'}, None)] self._test_meter('switch.port.transmit.bytes', expected) def test_switch_port_transmit_bytes_non_existing_network(self): self.params['virtual_network'] = ['aaa'] self.params['resource'] = ['fip_stats_list'] self._test_meter('switch.port.transmit.bytes', []) ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/opencontrail/__init__.py0000666000175100017510000000000013236733243030704 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py0000666000175100017510000000527213236733243031502 0ustar zuulzuul00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as config_fixture from oslotest import base from ceilometer.network.statistics.opencontrail import client from ceilometer import service as ceilometer_service class TestOpencontrailClient(base.BaseTestCase): def setUp(self): super(TestOpencontrailClient, self).setUp() conf = ceilometer_service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(config_fixture.Config(conf)).conf self.client = client.Client(self.CONF, 'http://127.0.0.1:8081', {'arg1': 'aaa'}) self.get_resp = mock.MagicMock() self.get = mock.patch('requests.get', return_value=self.get_resp).start() self.get_resp.raw.version = 1.1 self.get_resp.status_code = 200 self.get_resp.reason = 'OK' self.get_resp.content = '' def test_vm_statistics(self): self.client.networks.get_vm_statistics('bbb') call_args = self.get.call_args_list[0][0] call_kwargs = self.get.call_args_list[0][1] expected_url = ('http://127.0.0.1:8081/analytics/' 'uves/virtual-machine/bbb') self.assertEqual(expected_url, call_args[0]) data = call_kwargs.get('data') expected_data = {'arg1': 'aaa'} self.assertEqual(expected_data, data) def test_vm_statistics_params(self): self.client.networks.get_vm_statistics('bbb', {'resource': 'fip_stats_list', 'virtual_network': 'ccc'}) call_args = self.get.call_args_list[0][0] call_kwargs = self.get.call_args_list[0][1] expected_url = ('http://127.0.0.1:8081/analytics/' 'uves/virtual-machine/bbb') self.assertEqual(expected_url, call_args[0]) data = call_kwargs.get('data') expected_data = {'arg1': 'aaa', 'resource': 'fip_stats_list', 'virtual_network': 'ccc'} self.assertEqual(expected_data, data) ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/test_table.py0000666000175100017510000000315113236733243026610 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import table from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestTablePollsters(statistics._PollsterTestBase): def test_table_pollster(self): self._test_pollster( table.TablePollster, 'switch.table', sample.TYPE_GAUGE, 'table') def test_table_pollster_active_entries(self): self._test_pollster( table.TablePollsterActiveEntries, 'switch.table.active.entries', sample.TYPE_GAUGE, 'entry') def test_table_pollster_lookup_packets(self): self._test_pollster( table.TablePollsterLookupPackets, 'switch.table.lookup.packets', sample.TYPE_GAUGE, 'packet') def test_table_pollster_matched_packets(self): self._test_pollster( table.TablePollsterMatchedPackets, 'switch.table.matched.packets', sample.TYPE_GAUGE, 'packet') ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/test_switch.py0000666000175100017510000000221013236733243027015 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import switch from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestSwitchPollster(statistics._PollsterTestBase): def test_switch_pollster(self): self._test_pollster( switch.SWPollster, 'switch', sample.TYPE_GAUGE, 'switch') def test_switch_pollster_ports(self): self._test_pollster( switch.SwitchPollsterPorts, 'switch.ports', sample.TYPE_GAUGE, 'ports') ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/__init__.py0000666000175100017510000000220513236733243026220 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from ceilometer import service class _PollsterTestBase(base.BaseTestCase): def setUp(self): super(_PollsterTestBase, self).setUp() self.CONF = service.prepare_service([], []) def _test_pollster(self, pollster_class, meter_name, meter_type, meter_unit): pollster = pollster_class(self.CONF) self.assertEqual(pollster.meter_name, meter_name) self.assertEqual(pollster.meter_type, meter_type) self.assertEqual(pollster.meter_unit, meter_unit) ceilometer-10.0.0/ceilometer/tests/unit/network/statistics/test_flow.py0000666000175100017510000000342413236733243026473 0ustar zuulzuul00000000000000# # Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.network.statistics import flow from ceilometer import sample from ceilometer.tests.unit.network import statistics class TestFlowPollsters(statistics._PollsterTestBase): def test_flow_pollster(self): self._test_pollster( flow.FlowPollster, 'switch.flow', sample.TYPE_GAUGE, 'flow') def test_flow_pollster_duration_seconds(self): self._test_pollster( flow.FlowPollsterDurationSeconds, 'switch.flow.duration_seconds', sample.TYPE_GAUGE, 's') def test_flow_pollster_duration_nanoseconds(self): self._test_pollster( flow.FlowPollsterDurationNanoseconds, 'switch.flow.duration_nanoseconds', sample.TYPE_GAUGE, 'ns') def test_flow_pollster_packets(self): self._test_pollster( flow.FlowPollsterPackets, 'switch.flow.packets', sample.TYPE_CUMULATIVE, 'packet') def test_flow_pollster_bytes(self): self._test_pollster( flow.FlowPollsterBytes, 'switch.flow.bytes', sample.TYPE_CUMULATIVE, 'B') ceilometer-10.0.0/ceilometer/tests/unit/volume/0000775000175100017510000000000013236733440021531 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/volume/__init__.py0000666000175100017510000000000013236733243023633 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/volume/test_cinder.py0000666000175100017510000001536613236733243024424 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.polling import manager from ceilometer import service import ceilometer.tests.base as base from ceilometer.volume import cinder VOLUME_LIST = [ type('Volume', (object,), {u'migration_status': None, u'attachments': [ {u'server_id': u'1ae69721-d071-4156-a2bd-b11bb43ec2e3', u'attachment_id': u'f903d95e-f999-4a34-8be7-119eadd9bb4f', u'attached_at': u'2016-07-14T03:55:57.000000', u'host_name': None, u'volume_id': u'd94c18fb-b680-4912-9741-da69ee83c94f', u'device': u'/dev/vdb', u'id': u'd94c18fb-b680-4912-9741-da69ee83c94f'}], u'links': [{ u'href': u'http://fake_link3', u'rel': u'self'}, { u'href': u'http://fake_link4', u'rel': u'bookmark'}], u'availability_zone': u'nova', u'os-vol-host-attr:host': u'test@lvmdriver-1#lvmdriver-1', u'encrypted': False, u'updated_at': u'2016-07-14T03:55:57.000000', u'replication_status': u'disabled', u'snapshot_id': None, u'id': u'd94c18fb-b680-4912-9741-da69ee83c94f', u'size': 1, u'user_id': u'be255bd31eb944578000fc762fde6dcf', u'os-vol-tenant-attr:tenant_id': u'6824974c08974d4db864bbaa6bc08303', u'os-vol-mig-status-attr:migstat': None, u'metadata': {u'readonly': u'False', u'attached_mode': u'rw'}, u'status': u'in-use', u'description': None, u'multiattach': False, u'source_volid': None, u'consistencygroup_id': None, u'os-vol-mig-status-attr:name_id': None, u'name': None, u'bootable': u'false', u'created_at': u'2016-06-23T08:27:45.000000', u'volume_type': u'lvmdriver-1'}) ] SNAPSHOT_LIST = [ type('VolumeSnapshot', (object,), {u'status': u'available', u'os-extended-snapshot-attributes:progress': u'100%', u'description': None, u'os-extended-snapshot-attributes:project_id': u'6824974c08974d4db864bbaa6bc08303', u'size': 1, u'user_id': u'be255bd31eb944578000fc762fde6dcf', u'updated_at': u'2016-10-19T07:56:55.000000', u'id': u'b1ea6783-f952-491e-a4ed-23a6a562e1cf', u'volume_id': u'6f27bc42-c834-49ea-ae75-8d1073b37806', u'metadata': {}, u'created_at': u'2016-10-19T07:56:55.000000', u'name': None}) ] BACKUP_LIST = [ type('VolumeBackup', (object,), {u'status': u'available', u'object_count': 0, u'container': None, u'name': None, u'links': [{ u'href': u'http://fake_urla', u'rel': u'self'}, { u'href': u'http://fake_urlb', u'rel': u'bookmark'}], u'availability_zone': u'nova', u'created_at': u'2016-10-19T06:55:23.000000', u'snapshot_id': None, u'updated_at': u'2016-10-19T06:55:23.000000', u'data_timestamp': u'2016-10-19T06:55:23.000000', u'description': None, u'has_dependent_backups': False, u'volume_id': u'6f27bc42-c834-49ea-ae75-8d1073b37806', u'os-backup-project-attr:project_id': u'6824974c08974d4db864bbaa6bc08303', u'fail_reason': u"", u'is_incremental': False, u'id': u'75a52125-85ff-4a8d-b2aa-580f3b22273f', u'size': 1}) ] class TestVolumeSizePollster(base.BaseTestCase): def setUp(self): super(TestVolumeSizePollster, self).setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = cinder.VolumeSizePollster(conf) def test_volume_size_pollster(self): volume_size_samples = list( self.pollster.get_samples(self.manager, {}, resources=VOLUME_LIST)) self.assertEqual(1, len(volume_size_samples)) self.assertEqual('volume.size', volume_size_samples[0].name) self.assertEqual(1, volume_size_samples[0].volume) self.assertEqual('6824974c08974d4db864bbaa6bc08303', volume_size_samples[0].project_id) self.assertEqual('d94c18fb-b680-4912-9741-da69ee83c94f', volume_size_samples[0].resource_id) class TestVolumeSnapshotSizePollster(base.BaseTestCase): def setUp(self): super(TestVolumeSnapshotSizePollster, self).setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = cinder.VolumeSnapshotSize(conf) def test_volume_snapshot_size_pollster(self): volume_snapshot_size_samples = list( self.pollster.get_samples( self.manager, {}, resources=SNAPSHOT_LIST)) self.assertEqual(1, len(volume_snapshot_size_samples)) self.assertEqual('volume.snapshot.size', volume_snapshot_size_samples[0].name) self.assertEqual(1, volume_snapshot_size_samples[0].volume) self.assertEqual('be255bd31eb944578000fc762fde6dcf', volume_snapshot_size_samples[0].user_id) self.assertEqual('6824974c08974d4db864bbaa6bc08303', volume_snapshot_size_samples[0].project_id) self.assertEqual('b1ea6783-f952-491e-a4ed-23a6a562e1cf', volume_snapshot_size_samples[0].resource_id) class TestVolumeBackupSizePollster(base.BaseTestCase): def setUp(self): super(TestVolumeBackupSizePollster, self).setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = cinder.VolumeBackupSize(conf) def test_volume_backup_size_pollster(self): volume_backup_size_samples = list( self.pollster.get_samples(self.manager, {}, resources=BACKUP_LIST)) self.assertEqual(1, len(volume_backup_size_samples)) self.assertEqual('volume.backup.size', volume_backup_size_samples[0].name) self.assertEqual(1, volume_backup_size_samples[0].volume) self.assertEqual('75a52125-85ff-4a8d-b2aa-580f3b22273f', volume_backup_size_samples[0].resource_id) ceilometer-10.0.0/ceilometer/tests/unit/polling/0000775000175100017510000000000013236733440021666 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/polling/test_manager.py0000666000175100017510000010776113236733243024730 0ustar zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 Intel corp. # Copyright 2013 eNovance # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer agent manager""" import copy import datetime import fixtures import mock from keystoneauth1 import exceptions as ka_exceptions from stevedore import extension from ceilometer.compute import discovery as nova_discover from ceilometer.hardware import discovery from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer import sample from ceilometer import service from ceilometer.tests import base def default_test_data(name='test'): return sample.Sample( name=name, type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'Pollster'}) class TestPollster(plugin_base.PollsterBase): test_data = default_test_data() discovery = None @property def default_discovery(self): return self.discovery def get_samples(self, manager, cache, resources): resources = resources or [] self.samples.append((manager, resources)) self.resources.extend(resources) c = copy.deepcopy(self.test_data) c.resource_metadata['resources'] = resources return [c] class PollingException(Exception): pass class TestPollsterBuilder(TestPollster): @classmethod def build_pollsters(cls, conf): return [('builder1', cls(conf)), ('builder2', cls(conf))] class TestManager(base.BaseTestCase): def setUp(self): super(TestManager, self).setUp() self.conf = service.prepare_service([], []) def test_hash_of_set(self): x = ['a', 'b'] y = ['a', 'b', 'a'] z = ['a', 'c'] self.assertEqual(manager.hash_of_set(x), manager.hash_of_set(y)) self.assertNotEqual(manager.hash_of_set(x), manager.hash_of_set(z)) self.assertNotEqual(manager.hash_of_set(y), manager.hash_of_set(z)) def test_load_plugins(self): mgr = manager.AgentManager(0, self.conf) self.assertIsNotNone(list(mgr.extensions)) # Test plugin load behavior based on Node Manager pollsters. @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(return_value=None)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) def test_load_normal_plugins(self): mgr = manager.AgentManager(0, self.conf, namespaces=['ipmi']) # 8 pollsters for Node Manager self.assertEqual(12, len(mgr.extensions)) # Skip loading pollster upon ExtensionLoadError @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(side_effect=plugin_base.ExtensionLoadError)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) @mock.patch('ceilometer.polling.manager.LOG') def test_load_failed_plugins(self, LOG): # Here we additionally check that namespaces will be converted to the # list if param was not set as a list. try: manager.AgentManager(0, self.conf, namespaces='ipmi') except manager.EmptyPollstersList: err_msg = 'Skip loading extension for %s' pollster_names = [ 'power', 'temperature', 'outlet_temperature', 'airflow', 'cups', 'cpu_util', 'mem_util', 'io_util'] calls = [mock.call(err_msg, 'hardware.ipmi.node.%s' % n) for n in pollster_names] LOG.exception.assert_has_calls(calls=calls, any_order=True) # Skip loading pollster upon ImportError @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(side_effect=ImportError)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) def test_import_error_in_plugin(self): self.assertRaisesRegex( manager.EmptyPollstersList, 'No valid pollsters can be loaded with the startup parameter' ' polling-namespaces.', manager.AgentManager, 0, self.conf, {"namespaces": ['ipmi']}) # Exceptions other than ExtensionLoadError are propagated @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', mock.Mock(side_effect=PollingException)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) def test_load_exceptional_plugins(self): self.assertRaises(PollingException, manager.AgentManager, 0, self.conf, ['ipmi']) def test_builder(self): @staticmethod def fake_get_ext_mgr(namespace, *args, **kwargs): if 'builder' in namespace: return extension.ExtensionManager.make_test_instance( [ extension.Extension('builder', None, TestPollsterBuilder, None), ] ) else: return extension.ExtensionManager.make_test_instance( [ extension.Extension('test', None, None, TestPollster(self.conf)), ] ) with mock.patch.object(manager.AgentManager, '_get_ext_mgr', new=fake_get_ext_mgr): mgr = manager.AgentManager(0, self.conf, namespaces=['central']) self.assertEqual(3, len(mgr.extensions)) for ext in mgr.extensions: self.assertIn(ext.name, ['builder1', 'builder2', 'test']) self.assertIsInstance(ext.obj, TestPollster) class BatchTestPollster(TestPollster): test_data = default_test_data() discovery = None @property def default_discovery(self): return self.discovery def get_samples(self, manager, cache, resources): resources = resources or [] self.samples.append((manager, resources)) self.resources.extend(resources) for resource in resources: c = copy.deepcopy(self.test_data) c.timestamp = datetime.datetime.utcnow().isoformat() c.resource_id = resource c.resource_metadata['resource'] = resource yield c class TestPollsterKeystone(TestPollster): def get_samples(self, manager, cache, resources): # Just try to use keystone, that will raise an exception manager.keystone.projects.list() class TestPollsterPollingException(TestPollster): discovery = 'test' polling_failures = 0 def get_samples(self, manager, cache, resources): func = super(TestPollsterPollingException, self).get_samples sample = func(manager=manager, cache=cache, resources=resources) # Raise polling exception after 2 times self.polling_failures += 1 if self.polling_failures > 2: raise plugin_base.PollsterPermanentError(resources) return sample class TestDiscovery(plugin_base.DiscoveryBase): def discover(self, manager, param=None): self.params.append(param) return self.resources class TestDiscoveryException(plugin_base.DiscoveryBase): def discover(self, manager, param=None): self.params.append(param) raise Exception() class BaseAgent(base.BaseTestCase): class Pollster(TestPollster): samples = [] resources = [] test_data = default_test_data() class BatchPollster(BatchTestPollster): samples = [] resources = [] test_data = default_test_data() class PollsterAnother(TestPollster): samples = [] resources = [] test_data = default_test_data('testanother') class PollsterKeystone(TestPollsterKeystone): samples = [] resources = [] test_data = default_test_data('testkeystone') class PollsterPollingException(TestPollsterPollingException): samples = [] resources = [] test_data = default_test_data('testpollingexception') class Discovery(TestDiscovery): params = [] resources = [] class DiscoveryAnother(TestDiscovery): params = [] resources = [] @property def group_id(self): return 'another_group' class DiscoveryException(TestDiscoveryException): params = [] def setup_polling(self, poll_cfg=None): name = self.cfg2file(poll_cfg or self.polling_cfg) self.CONF.set_override('cfg_file', name, group='polling') self.mgr.polling_manager = manager.PollingManager(self.CONF) def create_manager(self): return manager.AgentManager(0, self.CONF) def fake_notifier_sample(self, ctxt, event_type, payload): for m in payload['samples']: del m['message_signature'] self.notified_samples.append(m) def setUp(self): super(BaseAgent, self).setUp() self.notified_samples = [] self.notifier = mock.Mock() self.notifier.sample.side_effect = self.fake_notifier_sample self.useFixture(fixtures.MockPatch('oslo_messaging.Notifier', return_value=self.notifier)) self.useFixture(fixtures.MockPatch('keystoneclient.v2_0.client.Client', return_value=mock.Mock())) self.CONF = service.prepare_service([], []) self.CONF.set_override( 'cfg_file', self.path_get('etc/ceilometer/polling_all.yaml'), group='polling' ) self.polling_cfg = { 'sources': [{ 'name': 'test_polling', 'interval': 60, 'meters': ['test'], 'resources': ['test://']}] } def tearDown(self): self.PollsterKeystone.samples = [] self.PollsterKeystone.resources = [] self.PollsterPollingException.samples = [] self.PollsterPollingException.resources = [] self.Pollster.samples = [] self.Pollster.discovery = [] self.PollsterAnother.samples = [] self.PollsterAnother.discovery = [] self.Pollster.resources = [] self.PollsterAnother.resources = [] self.Discovery.params = [] self.DiscoveryAnother.params = [] self.DiscoveryException.params = [] self.Discovery.resources = [] self.DiscoveryAnother.resources = [] super(BaseAgent, self).tearDown() def create_extension_list(self): return [extension.Extension('test', None, None, self.Pollster(self.CONF), ), extension.Extension('testbatch', None, None, self.BatchPollster(self.CONF), ), extension.Extension('testanother', None, None, self.PollsterAnother(self.CONF), ), extension.Extension('testkeystone', None, None, self.PollsterKeystone(self.CONF), ), extension.Extension('testpollingexception', None, None, self.PollsterPollingException(self.CONF), ) ] def create_discoveries(self): return extension.ExtensionManager.make_test_instance( [ extension.Extension( 'testdiscovery', None, None, self.Discovery(self.CONF), ), extension.Extension( 'testdiscoveryanother', None, None, self.DiscoveryAnother(self.CONF), ), extension.Extension( 'testdiscoveryexception', None, None, self.DiscoveryException(self.CONF), ), ], ) class TestPollingAgent(BaseAgent): def setUp(self): super(TestPollingAgent, self).setUp() self.mgr = self.create_manager() self.mgr.extensions = self.create_extension_list() self.setup_polling() @mock.patch('ceilometer.polling.manager.PollingManager') def test_start(self, poll_manager): self.mgr.setup_polling_tasks = mock.MagicMock() self.mgr.run() poll_manager.assert_called_once_with(self.CONF) self.mgr.setup_polling_tasks.assert_called_once_with() self.mgr.terminate() def test_setup_polling_tasks(self): polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) per_task_resources = polling_tasks[60].resources self.assertEqual(1, len(per_task_resources)) self.assertEqual(set(self.polling_cfg['sources'][0]['resources']), set(per_task_resources['test_polling-test'].get({}))) def test_setup_polling_tasks_multiple_interval(self): self.polling_cfg['sources'].append({ 'name': 'test_polling_1', 'interval': 10, 'meters': ['test'], 'resources': ['test://'], }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(2, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.assertIn(10, polling_tasks.keys()) def test_setup_polling_tasks_mismatch_counter(self): self.polling_cfg['sources'].append({ 'name': 'test_polling_1', 'interval': 10, 'meters': ['test_invalid'], 'resources': ['invalid://'], }) polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.assertNotIn(10, polling_tasks.keys()) def test_setup_polling_task_same_interval(self): self.polling_cfg['sources'].append({ 'name': 'test_polling_1', 'interval': 60, 'meters': ['testanother'], 'resources': ['testanother://'], }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) pollsters = polling_tasks.get(60).pollster_matches self.assertEqual(2, len(pollsters)) per_task_resources = polling_tasks[60].resources self.assertEqual(2, len(per_task_resources)) key = 'test_polling-test' self.assertEqual(set(self.polling_cfg['sources'][0]['resources']), set(per_task_resources[key].get({}))) key = 'test_polling_1-testanother' self.assertEqual(set(self.polling_cfg['sources'][1]['resources']), set(per_task_resources[key].get({}))) def _verify_discovery_params(self, expected): self.assertEqual(expected, self.Discovery.params) self.assertEqual(expected, self.DiscoveryAnother.params) self.assertEqual(expected, self.DiscoveryException.params) def _do_test_per_pollster_discovery(self, discovered_resources, static_resources): self.Pollster.discovery = 'testdiscovery' self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = discovered_resources self.DiscoveryAnother.resources = [d[::-1] for d in discovered_resources] if static_resources: # just so we can test that static + pre_polling amalgamated # override per_pollster self.polling_cfg['sources'][0]['discovery'] = [ 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.polling_cfg['sources'][0]['resources'] = static_resources self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) if static_resources: self.assertEqual(set(static_resources + self.DiscoveryAnother.resources), set(self.Pollster.resources)) else: self.assertEqual(set(self.Discovery.resources), set(self.Pollster.resources)) # Make sure no duplicated resource from discovery for x in self.Pollster.resources: self.assertEqual(1, self.Pollster.resources.count(x)) def test_per_pollster_discovery(self): self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], []) def test_per_pollster_discovery_overridden_by_per_polling_discovery(self): # ensure static+per_source_discovery overrides per_pollster_discovery self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], ['static_1', 'static_2']) def test_per_pollster_discovery_duplicated(self): self._do_test_per_pollster_discovery(['dup', 'discovered_1', 'dup'], []) def test_per_pollster_discovery_overridden_by_duplicated_static(self): self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], ['static_1', 'dup', 'dup']) def test_per_pollster_discovery_caching(self): # ensure single discovery associated with multiple pollsters # only called once per polling cycle discovered_resources = ['discovered_1', 'discovered_2'] self.Pollster.discovery = 'testdiscovery' self.PollsterAnother.discovery = 'testdiscovery' self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = discovered_resources self.polling_cfg['sources'][0]['meters'].append('testanother') self.polling_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual(1, len(self.Discovery.params)) self.assertEqual(discovered_resources, self.Pollster.resources) self.assertEqual(discovered_resources, self.PollsterAnother.resources) def _do_test_per_polling_discovery(self, discovered_resources, static_resources): self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = discovered_resources self.DiscoveryAnother.resources = [d[::-1] for d in discovered_resources] self.polling_cfg['sources'][0]['discovery'] = [ 'testdiscovery', 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.polling_cfg['sources'][0]['resources'] = static_resources self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) discovery = self.Discovery.resources + self.DiscoveryAnother.resources # compare resource lists modulo ordering self.assertEqual(set(static_resources + discovery), set(self.Pollster.resources)) # Make sure no duplicated resource from discovery for x in self.Pollster.resources: self.assertEqual(1, self.Pollster.resources.count(x)) def test_per_polling_discovery_discovered_only(self): self._do_test_per_polling_discovery(['discovered_1', 'discovered_2'], []) def test_per_polling_discovery_static_only(self): self._do_test_per_polling_discovery([], ['static_1', 'static_2']) def test_per_polling_discovery_discovered_augmented_by_static(self): self._do_test_per_polling_discovery(['discovered_1', 'discovered_2'], ['static_1', 'static_2']) def test_per_polling_discovery_discovered_duplicated_static(self): self._do_test_per_polling_discovery(['discovered_1', 'pud'], ['dup', 'static_1', 'dup']) def test_multiple_pollings_different_static_resources(self): # assert that the individual lists of static and discovered resources # for each polling with a common interval are passed to individual # pollsters matching each polling self.polling_cfg['sources'][0]['resources'] = ['test://'] self.polling_cfg['sources'][0]['discovery'] = ['testdiscovery'] self.polling_cfg['sources'].append({ 'name': 'another_polling', 'interval': 60, 'meters': ['test'], 'resources': ['another://'], 'discovery': ['testdiscoveryanother'], }) self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = ['discovered_1', 'discovered_2'] self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual([None], self.Discovery.params) self.assertEqual([None], self.DiscoveryAnother.params) self.assertEqual(2, len(self.Pollster.samples)) samples = self.Pollster.samples test_resources = ['test://', 'discovered_1', 'discovered_2'] another_resources = ['another://', 'discovered_3', 'discovered_4'] if samples[0][1] == test_resources: self.assertEqual(another_resources, samples[1][1]) elif samples[0][1] == another_resources: self.assertEqual(test_resources, samples[1][1]) else: self.fail('unexpected sample resources %s' % samples) def test_multiple_sources_different_discoverers(self): self.Discovery.resources = ['discovered_1', 'discovered_2'] self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] sources = [{'name': 'test_source_1', 'interval': 60, 'meters': ['test'], 'discovery': ['testdiscovery']}, {'name': 'test_source_2', 'interval': 60, 'meters': ['testanother'], 'discovery': ['testdiscoveryanother']}] self.polling_cfg = {'sources': sources} self.mgr.discoveries = self.create_discoveries() self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual(1, len(self.Pollster.samples)) self.assertEqual(['discovered_1', 'discovered_2'], self.Pollster.resources) self.assertEqual(1, len(self.PollsterAnother.samples)) self.assertEqual(['discovered_3', 'discovered_4'], self.PollsterAnother.resources) @mock.patch('ceilometer.polling.manager.LOG') def test_polling_and_notify_with_resources(self, LOG): self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] polling_task.poll_and_notify() LOG.info.assert_called_with( 'Polling pollster %(poll)s in the context of %(src)s', {'poll': 'test', 'src': 'test_polling'}) @mock.patch('ceilometer.polling.manager.LOG') def test_skip_polling_and_notify_with_no_resources(self, LOG): self.polling_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] pollster = list(polling_task.pollster_matches['test_polling'])[0] polling_task.poll_and_notify() LOG.debug.assert_called_with( 'Skip pollster %(name)s, no %(p_context)sresources found this ' 'cycle', {'name': pollster.name, 'p_context': ''}) @mock.patch('ceilometer.polling.manager.LOG') def test_skip_polling_polled_resources(self, LOG): self.polling_cfg['sources'].append({ 'name': 'test_polling_1', 'interval': 60, 'meters': ['test'], 'resources': ['test://'], }) self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] polling_task.poll_and_notify() LOG.debug.assert_called_with( 'Skip pollster %(name)s, no %(p_context)sresources found this ' 'cycle', {'name': 'test', 'p_context': 'new '}) @mock.patch('oslo_utils.timeutils.utcnow') def test_polling_samples_timestamp(self, mock_utc): polled_samples = [] timestamp = '2222-11-22T00:11:22.333333' def fake_send_notification(samples): polled_samples.extend(samples) mock_utc.return_value = datetime.datetime.strptime( timestamp, "%Y-%m-%dT%H:%M:%S.%f") self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] polling_task._send_notification = mock.Mock( side_effect=fake_send_notification) polling_task.poll_and_notify() self.assertEqual(timestamp, polled_samples[0]['timestamp']) def test_get_sample_resources(self): polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(list(polling_tasks.values())[0]) self.assertTrue(self.Pollster.resources) def test_when_keystone_fail(self): """Test for bug 1316532.""" self.useFixture(fixtures.MockPatch( 'keystoneclient.v2_0.client.Client', side_effect=ka_exceptions.ClientException)) poll_cfg = { 'sources': [{ 'name': "test_keystone", 'interval': 10, 'meters': ['testkeystone'], 'resources': ['test://'], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.setup_polling(poll_cfg) polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(list(polling_tasks.values())[0]) self.assertFalse(self.PollsterKeystone.samples) self.assertFalse(self.notified_samples) @mock.patch('ceilometer.polling.manager.LOG') @mock.patch('ceilometer.nova_client.LOG') def test_hardware_discover_fail_minimize_logs(self, novalog, baselog): class PollsterHardware(TestPollster): discovery = 'tripleo_overcloud_nodes' class PollsterHardwareAnother(TestPollster): discovery = 'tripleo_overcloud_nodes' self.mgr.extensions.extend([ extension.Extension('testhardware', None, None, PollsterHardware(self.CONF), ), extension.Extension('testhardware2', None, None, PollsterHardwareAnother(self.CONF), ) ]) ext = extension.Extension('tripleo_overcloud_nodes', None, None, discovery.NodesDiscoveryTripleO(self.CONF)) self.mgr.discoveries = (extension.ExtensionManager .make_test_instance([ext])) poll_cfg = { 'sources': [{ 'name': "test_hardware", 'interval': 10, 'meters': ['testhardware', 'testhardware2'], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.setup_polling(poll_cfg) polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(list(polling_tasks.values())[0]) self.assertEqual(1, novalog.exception.call_count) self.assertFalse(baselog.exception.called) @mock.patch('ceilometer.polling.manager.LOG') def test_polling_exception(self, LOG): source_name = 'test_pollingexception' res_list = ['test://'] poll_cfg = { 'sources': [{ 'name': source_name, 'interval': 10, 'meters': ['testpollingexception'], 'resources': res_list, 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.setup_polling(poll_cfg) polling_task = list(self.mgr.setup_polling_tasks().values())[0] pollster = list(polling_task.pollster_matches[source_name])[0] # 2 samples after 4 pollings, as pollster got disabled upon exception for x in range(0, 4): self.mgr.interval_task(polling_task) samples = self.notified_samples self.assertEqual(2, len(samples)) LOG.error.assert_called_once_with(( 'Prevent pollster %(name)s from ' 'polling %(res_list)s on source %(source)s anymore!'), dict(name=pollster.name, res_list=str(res_list), source=source_name)) @mock.patch('ceilometer.polling.manager.LOG') def test_polling_novalike_exception(self, LOG): source_name = 'test_pollingexception' poll_cfg = { 'sources': [{ 'name': source_name, 'interval': 10, 'meters': ['testpollingexception'], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.setup_polling(poll_cfg) polling_task = list(self.mgr.setup_polling_tasks().values())[0] pollster = list(polling_task.pollster_matches[source_name])[0] with mock.patch.object(polling_task.manager, 'discover') as disco: # NOTE(gordc): polling error on 3rd poll for __ in range(4): disco.return_value = ( [nova_discover.NovaLikeServer(**{'id': 1})]) self.mgr.interval_task(polling_task) LOG.error.assert_called_once_with(( 'Prevent pollster %(name)s from ' 'polling %(res_list)s on source %(source)s anymore!'), dict(name=pollster.name, res_list="[]", source=source_name)) def test_batching_polled_samples_false(self): self.CONF.set_override('batch_polled_samples', False) self._batching_samples(4, 4) def test_batching_polled_samples_true(self): self.CONF.set_override('batch_polled_samples', True) self._batching_samples(4, 1) def test_batching_polled_samples_default(self): self._batching_samples(4, 1) def _batching_samples(self, expected_samples, call_count): poll_cfg = { 'sources': [{ 'name': 'test_pipeline', 'interval': 1, 'meters': ['testbatch'], 'resources': ['alpha', 'beta', 'gamma', 'delta'], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'transformers': [], 'publishers': ["test"]}] } self.setup_polling(poll_cfg) polling_task = list(self.mgr.setup_polling_tasks().values())[0] self.mgr.interval_task(polling_task) samples = self.notified_samples self.assertEqual(expected_samples, len(samples)) self.assertEqual(call_count, self.notifier.sample.call_count) class TestPollingAgentPartitioned(BaseAgent): def setUp(self): super(TestPollingAgentPartitioned, self).setUp() self.CONF.set_override("backend_url", "zake://", "coordination") self.hashring = mock.MagicMock() self.hashring.belongs_to_self = mock.MagicMock() self.hashring.belongs_to_self.return_value = True self.mgr = self.create_manager() self.mgr.extensions = self.create_extension_list() self.mgr.hashrings = mock.MagicMock() self.mgr.hashrings.__getitem__.return_value = self.hashring self.setup_polling() def test_discovery_partitioning(self): discovered_resources = ['discovered_1', 'discovered_2'] self.Pollster.discovery = 'testdiscovery' self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = discovered_resources self.polling_cfg['sources'][0]['discovery'] = [ 'testdiscovery', 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.polling_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) self.hashring.belongs_to_self.assert_has_calls( [mock.call('discovered_1'), mock.call('discovered_2')]) def test_discovery_partitioning_unhashable(self): discovered_resources = [{'unhashable': True}] self.Pollster.discovery = 'testdiscovery' self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = discovered_resources self.polling_cfg['sources'][0]['discovery'] = [ 'testdiscovery', 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.polling_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) self.hashring.belongs_to_self.assert_has_calls( [mock.call('{\'unhashable\': True}')]) def test_static_resources_partitioning(self): static_resources = ['static_1', 'static_2'] static_resources2 = ['static_3', 'static_4'] self.polling_cfg['sources'][0]['resources'] = static_resources self.polling_cfg['sources'].append({ 'name': 'test_polling2', 'interval': 60, 'meters': ['test', 'test2'], 'resources': static_resources2, }) # have one polling without static resources defined self.polling_cfg['sources'].append({ 'name': 'test_polling3', 'interval': 60, 'meters': ['test', 'test2'], 'resources': [], }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) self.hashring.belongs_to_self.assert_has_calls([ mock.call('static_1'), mock.call('static_2'), mock.call('static_3'), mock.call('static_4'), ], any_order=True) ceilometer-10.0.0/ceilometer/tests/unit/polling/test_discovery.py0000666000175100017510000002005113236733243025307 0ustar zuulzuul00000000000000# # Copyright 2014 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/central/manager.py """ import mock from oslotest import base from ceilometer.hardware import discovery as hardware from ceilometer.polling.discovery import endpoint from ceilometer.polling.discovery import localnode from ceilometer.polling.discovery import tenant as project from ceilometer import service class TestEndpointDiscovery(base.BaseTestCase): def setUp(self): super(TestEndpointDiscovery, self).setUp() CONF = service.prepare_service([], []) CONF.set_override('interface', 'publicURL', group='service_credentials') CONF.set_override('region_name', 'test-region-name', group='service_credentials') self.discovery = endpoint.EndpointDiscovery(CONF) self.manager = mock.MagicMock() self.catalog = (self.manager.keystone.session.auth.get_access. return_value.service_catalog) def test_keystone_called(self): self.discovery.discover(self.manager, param='test-service-type') expected = [mock.call(service_type='test-service-type', interface='publicURL', region_name='test-region-name')] self.assertEqual(expected, self.catalog.get_urls.call_args_list) def test_keystone_called_no_service_type(self): self.discovery.discover(self.manager) expected = [mock.call(service_type=None, interface='publicURL', region_name='test-region-name')] self.assertEqual(expected, self.catalog.get_urls .call_args_list) def test_keystone_called_no_endpoints(self): self.catalog.get_urls.return_value = [] self.assertEqual([], self.discovery.discover(self.manager)) class TestLocalnodeDiscovery(base.BaseTestCase): def setUp(self): super(TestLocalnodeDiscovery, self).setUp() CONF = service.prepare_service([], []) self.discovery = localnode.LocalNodeDiscovery(CONF) self.manager = mock.MagicMock() def test_lockalnode_discovery(self): self.assertEqual(['local_host'], self.discovery.discover(self.manager)) class TestProjectDiscovery(base.BaseTestCase): def prepare_mock_data(self): domain_heat = mock.MagicMock() domain_heat.id = '2f42ab40b7ad4140815ef830d816a16c' domain_heat.name = 'heat' domain_heat.enabled = True domain_heat.links = { u'self': u'http://192.168.1.1/identity/v3/domains/' u'2f42ab40b7ad4140815ef830d816a16c'} domain_default = mock.MagicMock() domain_default.id = 'default' domain_default.name = 'Default' domain_default.enabled = True domain_default.links = { u'self': u'http://192.168.1.1/identity/v3/domains/default'} project_admin = mock.MagicMock() project_admin.id = '2ce92449a23145ef9c539f3327960ce3' project_admin.name = 'admin' project_admin.parent_id = 'default' project_admin.domain_id = 'default' project_admin.is_domain = False project_admin.enabled = True project_admin.links = { u'self': u'http://192.168.4.46/identity/v3/projects/' u'2ce92449a23145ef9c539f3327960ce3'}, project_service = mock.MagicMock() project_service.id = '9bf93b86bca04e3b815f86a5de083adc' project_service.name = 'service' project_service.parent_id = 'default' project_service.domain_id = 'default' project_service.is_domain = False project_service.enabled = True project_service.links = { u'self': u'http://192.168.4.46/identity/v3/projects/' u'9bf93b86bca04e3b815f86a5de083adc'} project_demo = mock.MagicMock() project_demo.id = '57d96b9af18d43bb9d047f436279b0be' project_demo.name = 'demo' project_demo.parent_id = 'default' project_demo.domain_id = 'default' project_demo.is_domain = False project_demo.enabled = True project_demo.links = { u'self': u'http://192.168.4.46/identity/v3/projects/' u'57d96b9af18d43bb9d047f436279b0be'} self.domains = [domain_heat, domain_default] self.default_domain_projects = [project_admin, project_service] self.heat_domain_projects = [project_demo] def side_effect(self, domain=None): if not domain or domain.name == 'Default': return self.default_domain_projects elif domain.name == 'heat': return self.heat_domain_projects else: return [] def setUp(self): super(TestProjectDiscovery, self).setUp() CONF = service.prepare_service([], []) self.discovery = project.TenantDiscovery(CONF) self.prepare_mock_data() self.manager = mock.MagicMock() self.manager.keystone.projects.list.side_effect = self.side_effect def test_project_discovery(self): self.manager.keystone.domains.list.return_value = self.domains result = self.discovery.discover(self.manager) self.assertEqual(len(result), 3) self.assertEqual(self.manager.keystone.projects.list.call_count, 2) class TestHardwareDiscovery(base.BaseTestCase): class MockInstance(object): addresses = {'ctlplane': [ {'addr': '0.0.0.0', 'OS-EXT-IPS-MAC:mac_addr': '01-23-45-67-89-ab'} ]} id = 'resource_id' image = {'id': 'image_id'} flavor = {'id': 'flavor_id'} expected = { 'resource_id': 'resource_id', 'resource_url': 'snmp://ro_snmp_user:password@0.0.0.0', 'mac_addr': '01-23-45-67-89-ab', 'image_id': 'image_id', 'flavor_id': 'flavor_id', } expected_usm = { 'resource_id': 'resource_id', 'resource_url': ''.join(['snmp://ro_snmp_user:password@0.0.0.0', '?priv_proto=aes192', '&priv_password=priv_pass']), 'mac_addr': '01-23-45-67-89-ab', 'image_id': 'image_id', 'flavor_id': 'flavor_id', } def setUp(self): super(TestHardwareDiscovery, self).setUp() self.CONF = service.prepare_service([], []) self.discovery = hardware.NodesDiscoveryTripleO(self.CONF) self.discovery.nova_cli = mock.MagicMock() self.manager = mock.MagicMock() def test_hardware_discovery(self): self.discovery.nova_cli.instance_get_all.return_value = [ self.MockInstance()] resources = self.discovery.discover(self.manager) self.assertEqual(1, len(resources)) self.assertEqual(self.expected, resources[0]) def test_hardware_discovery_without_flavor(self): instance = self.MockInstance() instance.flavor = {} self.discovery.nova_cli.instance_get_all.return_value = [instance] resources = self.discovery.discover(self.manager) self.assertEqual(0, len(resources)) def test_hardware_discovery_usm(self): self.CONF.set_override('readonly_user_priv_proto', 'aes192', group='hardware') self.CONF.set_override('readonly_user_priv_password', 'priv_pass', group='hardware') self.discovery.nova_cli.instance_get_all.return_value = [ self.MockInstance()] resources = self.discovery.discover(self.manager) self.assertEqual(self.expected_usm, resources[0]) ceilometer-10.0.0/ceilometer/tests/unit/polling/__init__.py0000666000175100017510000000000013236733243023770 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/publisher/0000775000175100017510000000000013236733440022217 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/publisher/test_gnocchi.py0000666000175100017510000006101213236733243025245 0ustar zuulzuul00000000000000# # Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import uuid import fixtures from gnocchiclient import exceptions as gnocchi_exc from keystoneauth1 import exceptions as ka_exceptions import mock from oslo_config import fixture as config_fixture from oslo_utils import fileutils from oslo_utils import fixture as utils_fixture from oslo_utils import netutils from oslo_utils import timeutils import requests import six from stevedore import extension import testscenarios from ceilometer.event import models from ceilometer.publisher import gnocchi from ceilometer import sample from ceilometer import service as ceilometer_service from ceilometer.tests import base load_tests = testscenarios.load_tests_apply_scenarios INSTANCE_DELETE_START = models.Event( event_type=u'compute.instance.delete.start', traits=[models.Trait('state', 1, u'active'), models.Trait( 'user_id', 1, u'1e3ce043029547f1a61c1996d1a531a2'), models.Trait('service', 1, u'compute'), models.Trait('disk_gb', 2, 0), models.Trait('instance_type', 1, u'm1.tiny'), models.Trait('tenant_id', 1, u'7c150a59fe714e6f9263774af9688f0e'), models.Trait('root_gb', 2, 0), models.Trait('ephemeral_gb', 2, 0), models.Trait('instance_type_id', 2, 2), models.Trait('vcpus', 2, 1), models.Trait('memory_mb', 2, 512), models.Trait( 'instance_id', 1, u'9f9d01b9-4a58-4271-9e27-398b21ab20d1'), models.Trait('host', 1, u'vagrant-precise'), models.Trait( 'request_id', 1, u'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39'), models.Trait('project_id', 1, u'7c150a59fe714e6f9263774af9688f0e'), models.Trait('launched_at', 4, '2012-05-08T20:23:47')], raw={}, generated='2012-05-08T20:24:14.824743', message_id=u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4', ) IMAGE_DELETE_START = models.Event( event_type=u'image.delete', traits=[models.Trait(u'status', 1, u'deleted'), models.Trait(u'deleted_at', 1, u'2016-11-04T04:25:56Z'), models.Trait(u'user_id', 1, u'e97ef33a20ed4843b520d223f3cc33d4'), models.Trait(u'name', 1, u'cirros'), models.Trait(u'service', 1, u'image.localhost'), models.Trait( u'resource_id', 1, u'dc337359-de70-4044-8e2c-80573ba6e577'), models.Trait(u'created_at', 1, u'2016-11-04T04:24:36Z'), models.Trait( u'project_id', 1, u'e97ef33a20ed4843b520d223f3cc33d4'), models.Trait(u'size', 1, u'13287936')], raw={}, generated=u'2016-11-04T04:25:56.493820', message_id=u'7f5280f7-1d10-46a5-ba58-4d5508e49f99' ) VOLUME_DELETE_START = models.Event( event_type=u'volume.delete.start', traits=[models.Trait(u'availability_zone', 1, u'nova'), models.Trait(u'created_at', 1, u'2016-11-28T13:19:53+00:00'), models.Trait(u'display_name', 1, u'vol-001'), models.Trait( u'host', 1, u'zhangguoqing-dev@lvmdriver-1#lvmdriver-1'), models.Trait( u'project_id', 1, u'd53fcc7dc53c4662ad77822c36a21f00'), models.Trait(u'replication_status', 1, u'disabled'), models.Trait( u'request_id', 1, u'req-f44df096-50d4-4211-95ea-64be6f5e4f60'), models.Trait( u'resource_id', 1, u'6cc6e7dd-d17d-460f-ae79-7e08a216ce96'), models.Trait( u'service', 1, u'volume.zhangguoqing-dev@lvmdriver-1'), models.Trait(u'size', 1, u'1'), models.Trait(u'status', 1, u'deleting'), models.Trait(u'tenant_id', 1, u'd53fcc7dc53c4662ad77822c36a21f00'), models.Trait(u'type', 1, u'af6271fa-13c4-44e6-9246-754ce9dc7df8'), models.Trait(u'user_id', 1, u'819bbd28f5374506b8502521c89430b5')], raw={}, generated='2016-11-28T13:42:15.484674', message_id=u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4', ) FLOATINGIP_DELETE_END = models.Event( event_type=u'floatingip.delete.end', traits=[models.Trait(u'service', 1, u'network.zhangguoqing-dev'), models.Trait( u'project_id', 1, u'd53fcc7dc53c4662ad77822c36a21f00'), models.Trait( u'request_id', 1, 'req-443ddb77-31f7-41fe-abbf-921107dd9f00'), models.Trait( u'resource_id', 1, u'705e2c08-08e8-45cb-8673-5c5be955569b'), models.Trait(u'tenant_id', 1, u'd53fcc7dc53c4662ad77822c36a21f00'), models.Trait(u'user_id', 1, u'819bbd28f5374506b8502521c89430b5')], raw={}, generated='2016-11-29T09:25:55.474710', message_id=u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4' ) class PublisherTest(base.BaseTestCase): def setUp(self): super(PublisherTest, self).setUp() conf = ceilometer_service.prepare_service(argv=[], config_files=[]) self.conf = self.useFixture(config_fixture.Config(conf)) self.resource_id = str(uuid.uuid4()) self.samples = [sample.Sample( name='disk.root.size', unit='GB', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2012-05-08 20:23:48.028195', resource_id=self.resource_id, resource_metadata={ 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', } ), sample.Sample( name='disk.root.size', unit='GB', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2014-05-08 20:23:48.028195', resource_id=self.resource_id, resource_metadata={ 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', }, ), ] ks_client = mock.Mock(auth_token='fake_token') ks_client.projects.find.return_value = mock.Mock( name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') self.useFixture(fixtures.MockPatch( 'ceilometer.keystone_client.get_client', return_value=ks_client)) self.ks_client = ks_client def test_config_load(self): url = netutils.urlsplit("gnocchi://") d = gnocchi.GnocchiPublisher(self.conf.conf, url) names = [rd.cfg['resource_type'] for rd in d.resources_definition] self.assertIn('instance', names) self.assertIn('volume', names) def test_match(self): resource = { 'metrics': ['image', 'image.size', 'image.download', 'image.serve'], 'attributes': {'container_format': 'resource_metadata.container_format', 'disk_format': 'resource_metadata.disk_format', 'name': 'resource_metadata.name'}, 'event_delete': 'image.delete', 'event_attributes': {'id': 'resource_id'}, 'resource_type': 'image'} plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait.trait_plugin') rd = gnocchi.ResourcesDefinition( resource, "low", plugin_manager) operation = rd.event_match("image.delete") self.assertEqual('delete', operation) def test_metric_match(self): pub = gnocchi.GnocchiPublisher(self.conf.conf, netutils.urlsplit("gnocchi://")) self.assertIn('image.size', pub.metric_map['image.size'].metrics) @mock.patch('ceilometer.publisher.gnocchi.LOG') def test_broken_config_load(self, mylog): contents = [("---\n" "resources:\n" " - resource_type: foobar\n"), ("---\n" "resources:\n" " - resource_type: 0\n"), ("---\n" "resources:\n" " - sample_types: ['foo', 'bar']\n"), ("---\n" "resources:\n" " - sample_types: foobar\n" " - resource_type: foobar\n"), ] for content in contents: if six.PY3: content = content.encode('utf-8') temp = fileutils.write_to_tempfile(content=content, prefix='gnocchi_resources', suffix='.yaml') self.addCleanup(os.remove, temp) url = netutils.urlsplit( "gnocchi://?resources_definition_file=" + temp) d = gnocchi.GnocchiPublisher(self.conf.conf, url) self.assertTrue(mylog.error.called) self.assertEqual(0, len(d.resources_definition)) @mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher' '._if_not_cached', mock.Mock()) @mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher' '.batch_measures') def _do_test_activity_filter(self, expected_measures, fake_batch): url = netutils.urlsplit("gnocchi://") d = gnocchi.GnocchiPublisher(self.conf.conf, url) d.publish_samples(self.samples) self.assertEqual(1, len(fake_batch.mock_calls)) measures = fake_batch.mock_calls[0][1][0] self.assertEqual( expected_measures, sum(len(m) for rid in measures for m in measures[rid].values())) def test_activity_filter_match_project_id(self): self.samples[0].project_id = ( 'a2d42c23-d518-46b6-96ab-3fba2e146859') self._do_test_activity_filter(1) @mock.patch('ceilometer.publisher.gnocchi.LOG') def test_activity_gnocchi_project_not_found(self, logger): self.ks_client.projects.find.side_effect = ka_exceptions.NotFound self._do_test_activity_filter(2) logger.warning.assert_called_with('filtered project not found in ' 'keystone, ignoring the ' 'filter_project option') def test_activity_filter_match_swift_event(self): self.samples[0].name = 'storage.api.request' self.samples[0].resource_id = 'a2d42c23-d518-46b6-96ab-3fba2e146859' self._do_test_activity_filter(1) def test_activity_filter_nomatch(self): self._do_test_activity_filter(2) @mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher' '.batch_measures') def test_unhandled_meter(self, fake_batch): samples = [sample.Sample( name='unknown.meter', unit='GB', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2014-05-08 20:23:48.028195', resource_id='randomid', resource_metadata={} )] url = netutils.urlsplit("gnocchi://") d = gnocchi.GnocchiPublisher(self.conf.conf, url) d.publish_samples(samples) self.assertEqual(0, len(fake_batch.call_args[0][1])) class MockResponse(mock.NonCallableMock): def __init__(self, code): text = {500: 'Internal Server Error', 404: 'Not Found', 204: 'Created', 409: 'Conflict', }.get(code) super(MockResponse, self).__init__(spec=requests.Response, status_code=code, text=text) class PublisherWorkflowTest(base.BaseTestCase, testscenarios.TestWithScenarios): sample_scenarios = [ ('disk.root.size', dict( sample=sample.Sample( resource_id=str(uuid.uuid4()) + "_foobar", name='disk.root.size', unit='GB', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2012-05-08 20:23:48.028195', resource_metadata={ 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', }, ), measures_attributes=[{ 'timestamp': '2012-05-08 20:23:48.028195', 'value': 2 }], postable_attributes={ 'user_id': 'test_user', 'project_id': 'test_project', }, patchable_attributes={ 'host': 'foo', 'image_ref': 'imageref!', 'flavor_id': 1234, 'display_name': 'myinstance', }, metric_names=[ 'disk.root.size', 'disk.ephemeral.size', 'memory', 'vcpus', 'memory.usage', 'memory.resident', 'memory.swap.in', 'memory.swap.out', 'memory.bandwidth.total', 'memory.bandwidth.local', 'cpu', 'cpu.delta', 'cpu_util', 'vcpus', 'disk.read.requests', 'cpu_l3_cache', 'perf.cpu.cycles', 'perf.instructions', 'perf.cache.references', 'perf.cache.misses', 'disk.read.requests.rate', 'disk.write.requests', 'disk.write.requests.rate', 'disk.read.bytes', 'disk.read.bytes.rate', 'disk.write.bytes', 'disk.write.bytes.rate', 'disk.latency', 'disk.iops', 'disk.capacity', 'disk.allocation', 'disk.usage', 'compute.instance.booting.time'], resource_type='instance')), ('hardware.ipmi.node.power', dict( sample=sample.Sample( resource_id=str(uuid.uuid4()) + "_foobar", name='hardware.ipmi.node.power', unit='W', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2012-05-08 20:23:48.028195', resource_metadata={ 'useless': 'not_used', }, ), measures_attributes=[{ 'timestamp': '2012-05-08 20:23:48.028195', 'value': 2 }], postable_attributes={ 'user_id': 'test_user', 'project_id': 'test_project', }, patchable_attributes={ }, metric_names=[ 'hardware.ipmi.node.power', 'hardware.ipmi.node.temperature', 'hardware.ipmi.node.inlet_temperature', 'hardware.ipmi.node.outlet_temperature', 'hardware.ipmi.node.fan', 'hardware.ipmi.node.current', 'hardware.ipmi.node.voltage', 'hardware.ipmi.node.airflow', 'hardware.ipmi.node.cups', 'hardware.ipmi.node.cpu_util', 'hardware.ipmi.node.mem_util', 'hardware.ipmi.node.io_util' ], resource_type='ipmi')), ] default_workflow = dict(resource_exists=True, post_measure_fail=False, create_resource_fail=False, create_resource_race=False, update_resource_fail=False, retry_post_measures_fail=False) workflow_scenarios = [ ('normal_workflow', {}), ('new_resource', dict(resource_exists=False)), ('new_resource_compat', dict(resource_exists=False)), ('new_resource_fail', dict(resource_exists=False, create_resource_fail=True)), ('new_resource_race', dict(resource_exists=False, create_resource_race=True)), ('resource_update_fail', dict(update_resource_fail=True)), ('retry_fail', dict(resource_exists=False, retry_post_measures_fail=True)), ('measure_fail', dict(post_measure_fail=True)), ] @classmethod def generate_scenarios(cls): workflow_scenarios = [] for name, wf_change in cls.workflow_scenarios: wf = cls.default_workflow.copy() wf.update(wf_change) workflow_scenarios.append((name, wf)) cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios, workflow_scenarios) def setUp(self): super(PublisherWorkflowTest, self).setUp() conf = ceilometer_service.prepare_service(argv=[], config_files=[]) self.conf = self.useFixture(config_fixture.Config(conf)) ks_client = mock.Mock() ks_client.projects.find.return_value = mock.Mock( name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') self.useFixture(fixtures.MockPatch( 'ceilometer.keystone_client.get_client', return_value=ks_client)) self.ks_client = ks_client @mock.patch('gnocchiclient.v1.client.Client') def test_event_workflow(self, fakeclient_cls): url = netutils.urlsplit("gnocchi://") self.publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) fakeclient = fakeclient_cls.return_value fakeclient.resource.search.side_effect = [ [{"id": "b26268d6-8bb5-11e6-baff-00224d8226cd", "type": "instance_disk", "instance_id": "9f9d01b9-4a58-4271-9e27-398b21ab20d1"}], [{"id": "b1c7544a-8bb5-11e6-850e-00224d8226cd", "type": "instance_network_interface", "instance_id": "9f9d01b9-4a58-4271-9e27-398b21ab20d1"}], ] search_params = { '=': {'instance_id': '9f9d01b9-4a58-4271-9e27-398b21ab20d1'} } now = timeutils.utcnow() self.useFixture(utils_fixture.TimeFixture(now)) expected_calls = [ mock.call.resource.search('instance_disk', search_params), mock.call.resource.search('instance_network_interface', search_params), mock.call.resource.update( 'instance', '9f9d01b9-4a58-4271-9e27-398b21ab20d1', {'ended_at': now.isoformat()}), mock.call.resource.update( 'instance_disk', 'b26268d6-8bb5-11e6-baff-00224d8226cd', {'ended_at': now.isoformat()}), mock.call.resource.update( 'instance_network_interface', 'b1c7544a-8bb5-11e6-850e-00224d8226cd', {'ended_at': now.isoformat()}), mock.call.resource.update( 'image', 'dc337359-de70-4044-8e2c-80573ba6e577', {'ended_at': now.isoformat()}), mock.call.resource.update( 'volume', '6cc6e7dd-d17d-460f-ae79-7e08a216ce96', {'ended_at': now.isoformat()}), mock.call.resource.update( 'network', '705e2c08-08e8-45cb-8673-5c5be955569b', {'ended_at': now.isoformat()}) ] self.publisher.publish_events([INSTANCE_DELETE_START, IMAGE_DELETE_START, VOLUME_DELETE_START, FLOATINGIP_DELETE_END]) self.assertEqual(8, len(fakeclient.mock_calls)) for call in expected_calls: self.assertIn(call, fakeclient.mock_calls) @mock.patch('ceilometer.publisher.gnocchi.LOG') @mock.patch('gnocchiclient.v1.client.Client') def test_workflow(self, fakeclient_cls, logger): url = netutils.urlsplit("gnocchi://") self.publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) fakeclient = fakeclient_cls.return_value resource_id = self.sample.resource_id.replace("/", "_") metric_name = self.sample.name gnocchi_id = uuid.uuid4() expected_calls = [ mock.call.metric.batch_resources_metrics_measures( {resource_id: {metric_name: self.measures_attributes}}, create_metrics=True) ] expected_debug = [ mock.call('filtered project found: %s', 'a2d42c23-d518-46b6-96ab-3fba2e146859'), ] measures_posted = False batch_side_effect = [] if self.post_measure_fail: batch_side_effect += [Exception('boom!')] elif not self.resource_exists: batch_side_effect += [ gnocchi_exc.BadRequest( 400, {"cause": "Unknown resources", 'detail': [{ 'resource_id': gnocchi_id, 'original_resource_id': resource_id}]})] attributes = self.postable_attributes.copy() attributes.update(self.patchable_attributes) attributes['id'] = self.sample.resource_id attributes['metrics'] = dict((metric_name, {}) for metric_name in self.metric_names) for k, v in six.iteritems(attributes['metrics']): if k == 'disk.root.size': v['unit'] = 'GB' continue if k == 'hardware.ipmi.node.power': v['unit'] = 'W' continue expected_calls.append(mock.call.resource.create( self.resource_type, attributes)) if self.create_resource_fail: fakeclient.resource.create.side_effect = [Exception('boom!')] elif self.create_resource_race: fakeclient.resource.create.side_effect = [ gnocchi_exc.ResourceAlreadyExists(409)] else: # not resource_exists expected_debug.append(mock.call( 'Resource %s created', self.sample.resource_id)) if not self.create_resource_fail: expected_calls.append( mock.call.metric.batch_resources_metrics_measures( {resource_id: {metric_name: self.measures_attributes}}, create_metrics=True) ) if self.retry_post_measures_fail: batch_side_effect += [Exception('boom!')] else: measures_posted = True else: measures_posted = True if measures_posted: batch_side_effect += [None] expected_debug.append( mock.call("%d measures posted against %d metrics through %d " "resources", len(self.measures_attributes), 1, 1) ) if self.patchable_attributes: expected_calls.append(mock.call.resource.update( self.resource_type, resource_id, self.patchable_attributes)) if self.update_resource_fail: fakeclient.resource.update.side_effect = [Exception('boom!')] else: expected_debug.append(mock.call( 'Resource %s updated', self.sample.resource_id)) batch = fakeclient.metric.batch_resources_metrics_measures batch.side_effect = batch_side_effect self.publisher.publish_samples([self.sample]) # Check that the last log message is the expected one if (self.post_measure_fail or self.create_resource_fail or self.retry_post_measures_fail or (self.update_resource_fail and self.patchable_attributes)): logger.error.assert_called_with('boom!', exc_info=True) else: self.assertEqual(0, logger.error.call_count) self.assertEqual(expected_calls, fakeclient.mock_calls) self.assertEqual(expected_debug, logger.debug.mock_calls) PublisherWorkflowTest.generate_scenarios() ceilometer-10.0.0/ceilometer/tests/unit/publisher/test_messaging_publisher.py0000666000175100017510000003477713236733243027707 0ustar zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/messaging.py """ import datetime import uuid import mock import oslo_messaging from oslo_messaging._drivers import impl_kafka as kafka_driver from oslo_utils import netutils import testscenarios.testcase from ceilometer.event import models as event from ceilometer.publisher import messaging as msg_publisher from ceilometer import sample from ceilometer import service from ceilometer.tests import base as tests_base class BasePublisherTestCase(tests_base.BaseTestCase): test_event_data = [ event.Event(message_id=uuid.uuid4(), event_type='event_%d' % i, generated=datetime.datetime.utcnow(), traits=[], raw={}) for i in range(0, 5) ] test_sample_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def setUp(self): super(BasePublisherTestCase, self).setUp() self.CONF = service.prepare_service([], []) self.setup_messaging(self.CONF) class NotifierOnlyPublisherTest(BasePublisherTestCase): @mock.patch('oslo_messaging.Notifier') def test_publish_topic_override(self, notifier): msg_publisher.SampleNotifierPublisher( self.CONF, netutils.urlsplit('notifier://?topic=custom_topic')) notifier.assert_called_with(mock.ANY, topics=['custom_topic'], driver=mock.ANY, retry=mock.ANY, publisher_id=mock.ANY) msg_publisher.EventNotifierPublisher( self.CONF, netutils.urlsplit('notifier://?topic=custom_event_topic')) notifier.assert_called_with(mock.ANY, topics=['custom_event_topic'], driver=mock.ANY, retry=mock.ANY, publisher_id=mock.ANY) @mock.patch('ceilometer.messaging.get_transport') def test_publish_other_host(self, cgt): msg_publisher.SampleNotifierPublisher( self.CONF, netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234')) cgt.assert_called_with(self.CONF, 'rabbit://foo:foo@127.0.0.1:1234') msg_publisher.EventNotifierPublisher( self.CONF, netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234')) cgt.assert_called_with(self.CONF, 'rabbit://foo:foo@127.0.0.1:1234') @mock.patch('ceilometer.messaging.get_transport') def test_publish_other_host_vhost_and_query(self, cgt): msg_publisher.SampleNotifierPublisher( self.CONF, netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234/foo' '?driver=amqp&amqp_auto_delete=true')) cgt.assert_called_with(self.CONF, 'amqp://foo:foo@127.0.0.1:1234/foo' '?amqp_auto_delete=true') msg_publisher.EventNotifierPublisher( self.CONF, netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234/foo' '?driver=amqp&amqp_auto_delete=true')) cgt.assert_called_with(self.CONF, 'amqp://foo:foo@127.0.0.1:1234/foo' '?amqp_auto_delete=true') @mock.patch('ceilometer.messaging.get_transport') def test_publish_with_none_rabbit_driver(self, cgt): sample_publisher = msg_publisher.SampleNotifierPublisher( self.CONF, netutils.urlsplit('notifier://127.0.0.1:9092?driver=kafka')) cgt.assert_called_with(self.CONF, 'kafka://127.0.0.1:9092') transport = oslo_messaging.get_transport(self.CONF, 'kafka://127.0.0.1:9092') self.assertIsInstance(transport._driver, kafka_driver.KafkaDriver) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(sample_publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, sample_publisher.publish_samples, self.test_sample_data) self.assertEqual(0, len(sample_publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with('metering', mock.ANY) event_publisher = msg_publisher.EventNotifierPublisher( self.CONF, netutils.urlsplit('notifier://127.0.0.1:9092?driver=kafka')) cgt.assert_called_with(self.CONF, 'kafka://127.0.0.1:9092') with mock.patch.object(event_publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, event_publisher.publish_events, self.test_event_data) self.assertEqual(0, len(event_publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with('event', mock.ANY) class TestPublisher(testscenarios.testcase.WithScenarios, BasePublisherTestCase): scenarios = [ ('notifier', dict(protocol="notifier", publisher_cls=msg_publisher.SampleNotifierPublisher, test_data=BasePublisherTestCase.test_sample_data, pub_func='publish_samples', attr='source')), ('event_notifier', dict(protocol="notifier", publisher_cls=msg_publisher.EventNotifierPublisher, test_data=BasePublisherTestCase.test_event_data, pub_func='publish_events', attr='event_type')), ] def setUp(self): super(TestPublisher, self).setUp() self.topic = (self.CONF.publisher_notifier.event_topic if self.pub_func == 'publish_events' else self.CONF.publisher_notifier.metering_topic) class TestPublisherPolicy(TestPublisher): @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_no_policy(self, mylog): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), self.test_data) self.assertTrue(mylog.info.called) self.assertEqual('default', publisher.policy) self.assertEqual(0, len(publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with( self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_policy_block(self, mylog): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=default' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), self.test_data) self.assertTrue(mylog.info.called) self.assertEqual(0, len(publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with( self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_policy_incorrect(self, mylog): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=notexist' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), self.test_data) self.assertTrue(mylog.warning.called) self.assertEqual('default', publisher.policy) self.assertEqual(0, len(publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with( self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG', mock.Mock()) class TestPublisherPolicyReactions(TestPublisher): def test_published_with_policy_drop_and_rpc_down(self): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=drop' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( self.topic, mock.ANY) def test_published_with_policy_queue_and_rpc_down(self): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(1, len(publisher.local_queue)) fake_send.assert_called_once_with( self.topic, mock.ANY) def test_published_with_policy_queue_and_rpc_down_up(self): self.rpc_unreachable = True publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(1, len(publisher.local_queue)) fake_send.side_effect = mock.MagicMock() getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(0, len(publisher.local_queue)) topic = self.topic expected = [mock.call(topic, mock.ANY), mock.call(topic, mock.ANY), mock.call(topic, mock.ANY)] self.assertEqual(expected, fake_send.mock_calls) def test_published_with_policy_sized_queue_and_rpc_down(self): publisher = self.publisher_cls(self.CONF, netutils.urlsplit( '%s://?policy=queue&max_queue_length=3' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect for i in range(0, 5): for s in self.test_data: setattr(s, self.attr, 'test-%d' % i) getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(3, len(publisher.local_queue)) self.assertEqual( 'test-2', publisher.local_queue[0][1][0][self.attr] ) self.assertEqual( 'test-3', publisher.local_queue[1][1][0][self.attr] ) self.assertEqual( 'test-4', publisher.local_queue[2][1][0][self.attr] ) def test_published_with_policy_default_sized_queue_and_rpc_down(self): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect for i in range(0, 2000): for s in self.test_data: setattr(s, self.attr, 'test-%d' % i) getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(1024, len(publisher.local_queue)) self.assertEqual( 'test-976', publisher.local_queue[0][1][0][self.attr] ) self.assertEqual( 'test-1999', publisher.local_queue[1023][1][0][self.attr] ) ceilometer-10.0.0/ceilometer/tests/unit/publisher/test_file.py0000666000175100017510000001055413236733243024557 0ustar zuulzuul00000000000000# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/file.py """ import datetime import logging.handlers import os import tempfile from oslo_utils import netutils from oslotest import base from ceilometer.publisher import file from ceilometer import sample from ceilometer import service class TestFilePublisher(base.BaseTestCase): test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def setUp(self): super(TestFilePublisher, self).setUp() self.CONF = service.prepare_service([], []) def test_file_publisher_maxbytes(self): # Test valid configurations tempdir = tempfile.mkdtemp() name = '%s/log_file' % tempdir parsed_url = netutils.urlsplit('file://%s?max_bytes=50&backup_count=3' % name) publisher = file.FilePublisher(self.CONF, parsed_url) publisher.publish_samples(self.test_data) handler = publisher.publisher_logger.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) self.assertEqual([50, name, 3], [handler.maxBytes, handler.baseFilename, handler.backupCount]) # The rotating file gets created since only allow 50 bytes. self.assertTrue(os.path.exists('%s.1' % name)) def test_file_publisher(self): # Test missing max bytes, backup count configurations tempdir = tempfile.mkdtemp() name = '%s/log_file_plain' % tempdir parsed_url = netutils.urlsplit('file://%s' % name) publisher = file.FilePublisher(self.CONF, parsed_url) publisher.publish_samples(self.test_data) handler = publisher.publisher_logger.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) self.assertEqual([0, name, 0], [handler.maxBytes, handler.baseFilename, handler.backupCount]) # Test the content is corrected saved in the file self.assertTrue(os.path.exists(name)) with open(name, 'r') as f: content = f.read() for sample_item in self.test_data: self.assertIn(sample_item.id, content) self.assertIn(sample_item.timestamp, content) def test_file_publisher_invalid(self): # Test invalid max bytes, backup count configurations tempdir = tempfile.mkdtemp() parsed_url = netutils.urlsplit( 'file://%s/log_file_bad' '?max_bytes=yus&backup_count=5y' % tempdir) publisher = file.FilePublisher(self.CONF, parsed_url) publisher.publish_samples(self.test_data) self.assertIsNone(publisher.publisher_logger) ceilometer-10.0.0/ceilometer/tests/unit/publisher/test_utils.py0000666000175100017510000001437213236733243025002 0ustar zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/utils.py """ import json from oslotest import base from ceilometer.publisher import utils class TestSignature(base.BaseTestCase): def test_compute_signature_change_key(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'A': 'A', 'b': 'B'}, 'not-so-secret') self.assertNotEqual(sig1, sig2) def test_compute_signature_change_value(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'a': 'a', 'b': 'B'}, 'not-so-secret') self.assertNotEqual(sig1, sig2) def test_compute_signature_same(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') self.assertEqual(sig1, sig2) def test_compute_signature_signed(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') data['message_signature'] = sig1 sig2 = utils.compute_signature(data, 'not-so-secret') self.assertEqual(sig1, sig2) def test_compute_signature_use_configured_secret(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') sig2 = utils.compute_signature(data, 'different-value') self.assertNotEqual(sig1, sig2) def test_verify_signature_signed(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') data['message_signature'] = sig1 self.assertTrue(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_unsigned(self): data = {'a': 'A', 'b': 'B'} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_incorrect(self): data = {'a': 'A', 'b': 'B', 'message_signature': 'Not the same'} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_invalid_encoding(self): data = {'a': 'A', 'b': 'B', 'message_signature': ''} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_unicode(self): data = {'a': 'A', 'b': 'B', 'message_signature': u''} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_nested(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') self.assertTrue(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_nested_json(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', 'c': ('c',), 'd': ['d'] }, } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') jsondata = json.loads(json.dumps(data)) self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) def test_verify_unicode_symbols(self): data = {u'a\xe9\u0437': 'A', 'b': u'B\xe9\u0437' } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') jsondata = json.loads(json.dumps(data)) self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) def test_verify_no_secret(self): data = {'a': 'A', 'b': 'B'} self.assertTrue(utils.verify_signature(data, '')) class TestUtils(base.BaseTestCase): def test_recursive_keypairs(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B'}} pairs = list(utils.recursive_keypairs(data)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested:a', 'A'), ('nested:b', 'B')], pairs) def test_recursive_keypairs_with_separator(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, } separator = '.' pairs = list(utils.recursive_keypairs(data, separator)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested.a', 'A'), ('nested.b', 'B')], pairs) def test_recursive_keypairs_with_list_of_dict(self): small = 1 big = 1 << 64 expected = [('a', 'A'), ('b', 'B'), ('nested:list', [{small: 99, big: 42}])] data = {'a': 'A', 'b': 'B', 'nested': {'list': [{small: 99, big: 42}]}} pairs = list(utils.recursive_keypairs(data)) self.assertEqual(len(expected), len(pairs)) for k, v in pairs: # the keys 1 and 1<<64 cause a hash collision on 64bit platforms if k == 'nested:list': self.assertIn(v, [[{small: 99, big: 42}], [{big: 42, small: 99}]]) else: self.assertIn((k, v), expected) ceilometer-10.0.0/ceilometer/tests/unit/publisher/__init__.py0000666000175100017510000000000013236733243024321 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/publisher/test_http.py0000666000175100017510000002435513236733243024623 0ustar zuulzuul00000000000000# # Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/http.py """ import datetime import mock from oslotest import base import requests from six.moves.urllib import parse as urlparse import uuid from ceilometer.event import models as event from ceilometer.publisher import http from ceilometer import sample from ceilometer import service class TestHttpPublisher(base.BaseTestCase): resource_id = str(uuid.uuid4()) sample_data = [ sample.Sample( name='alpha', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='beta', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='gamma', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.now().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] event_data = [event.Event( message_id=str(uuid.uuid4()), event_type='event_%d' % i, generated=datetime.datetime.utcnow().isoformat(), traits=[], raw={'payload': {'some': 'aa'}}) for i in range(3)] def setUp(self): super(TestHttpPublisher, self).setUp() self.CONF = service.prepare_service([], []) def test_http_publisher_config(self): """Test publisher config parameters.""" # invalid hostname, the given url, results in an empty hostname parsed_url = urlparse.urlparse('http:/aaa.bb/path') self.assertRaises(ValueError, http.HttpPublisher, self.CONF, parsed_url) # invalid port parsed_url = urlparse.urlparse('http://aaa:bb/path') self.assertRaises(ValueError, http.HttpPublisher, self.CONF, parsed_url) parsed_url = urlparse.urlparse('http://localhost:90/path1') publisher = http.HttpPublisher(self.CONF, parsed_url) # By default, timeout and retry_count should be set to 5 and 2 # respectively self.assertEqual(5, publisher.timeout) self.assertEqual(2, publisher.max_retries) parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'timeout=19&max_retries=4') publisher = http.HttpPublisher(self.CONF, parsed_url) self.assertEqual(19, publisher.timeout) self.assertEqual(4, publisher.max_retries) parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'timeout=19') publisher = http.HttpPublisher(self.CONF, parsed_url) self.assertEqual(19, publisher.timeout) self.assertEqual(2, publisher.max_retries) parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'max_retries=6') publisher = http.HttpPublisher(self.CONF, parsed_url) self.assertEqual(5, publisher.timeout) self.assertEqual(6, publisher.max_retries) @mock.patch('ceilometer.publisher.http.LOG') def test_http_post_samples(self, thelog): """Test publisher post.""" parsed_url = urlparse.urlparse('http://localhost:90/path1') publisher = http.HttpPublisher(self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_samples(self.sample_data) self.assertEqual(1, m_req.call_count) self.assertFalse(thelog.exception.called) res = requests.Response() res.status_code = 401 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_samples(self.sample_data) self.assertEqual(1, m_req.call_count) self.assertTrue(thelog.exception.called) @mock.patch('ceilometer.publisher.http.LOG') def test_http_post_events(self, thelog): """Test publisher post.""" parsed_url = urlparse.urlparse('http://localhost:90/path1') publisher = http.HttpPublisher(self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_events(self.event_data) self.assertEqual(1, m_req.call_count) self.assertFalse(thelog.exception.called) res = requests.Response() res.status_code = 401 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_events(self.event_data) self.assertEqual(1, m_req.call_count) self.assertTrue(thelog.exception.called) @mock.patch('ceilometer.publisher.http.LOG') def test_http_post_empty_data(self, thelog): parsed_url = urlparse.urlparse('http://localhost:90/path1') publisher = http.HttpPublisher(self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_events([]) self.assertEqual(0, m_req.call_count) self.assertTrue(thelog.debug.called) def _post_batch_control_test(self, method, data, batch): parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'batch=%s' % batch) publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: getattr(publisher, method)(data) self.assertEqual(1 if batch else 3, post.call_count) def test_post_batch_sample(self): self._post_batch_control_test('publish_samples', self.sample_data, 1) def test_post_no_batch_sample(self): self._post_batch_control_test('publish_samples', self.sample_data, 0) def test_post_batch_event(self): self._post_batch_control_test('publish_events', self.event_data, 1) def test_post_no_batch_event(self): self._post_batch_control_test('publish_events', self.event_data, 0) def test_post_verify_ssl_default(self): parsed_url = urlparse.urlparse('http://localhost:90/path1') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertTrue(post.call_args[1]['verify']) def test_post_verify_ssl_True(self): parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'verify_ssl=True') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertTrue(post.call_args[1]['verify']) def test_post_verify_ssl_False(self): parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'verify_ssl=False') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertFalse(post.call_args[1]['verify']) def test_post_verify_ssl_path(self): parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'verify_ssl=/path/to/cert.crt') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertEqual('/path/to/cert.crt', post.call_args[1]['verify']) def test_post_basic_auth(self): parsed_url = urlparse.urlparse( 'http://alice:l00kingGla$$@localhost:90/path1?') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertEqual(('alice', 'l00kingGla$$'), post.call_args[1]['auth']) def test_post_client_cert_auth(self): parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'clientcert=/path/to/cert.crt&' 'clientkey=/path/to/cert.key') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertEqual(('/path/to/cert.crt', '/path/to/cert.key'), post.call_args[1]['cert']) def test_post_raw_only(self): parsed_url = urlparse.urlparse('http://localhost:90/path1?raw_only=1') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_events(self.event_data) self.assertEqual( '[{"some": "aa"}, {"some": "aa"}, {"some": "aa"}]', post.call_args[1]['data']) ceilometer-10.0.0/ceilometer/tests/unit/publisher/test_udp.py0000666000175100017510000001452613236733243024433 0ustar zuulzuul00000000000000# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/udp.py """ import datetime import socket import mock import msgpack from oslo_utils import netutils from oslotest import base from ceilometer.publisher import udp from ceilometer.publisher import utils from ceilometer import sample from ceilometer import service COUNTER_SOURCE = 'testsource' class TestUDPPublisher(base.BaseTestCase): test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), ] @staticmethod def _make_fake_socket(published): def _fake_socket_socket(family, type): def record_data(msg, dest): published.append((msg, dest)) udp_socket = mock.Mock() udp_socket.sendto = record_data return udp_socket return _fake_socket_socket def setUp(self): super(TestUDPPublisher, self).setUp() self.CONF = service.prepare_service([], []) self.CONF.publisher.telemetry_secret = 'not-so-secret' def _check_udp_socket(self, url, expected_addr_family): with mock.patch.object(socket, 'socket') as mock_socket: udp.UDPPublisher(self.CONF, netutils.urlsplit(url)) mock_socket.assert_called_with(expected_addr_family, socket.SOCK_DGRAM) def test_publisher_udp_socket_ipv4(self): self._check_udp_socket('udp://127.0.0.1:4952', socket.AF_INET) def test_publisher_udp_socket_ipv6(self): self._check_udp_socket('udp://[::1]:4952', socket.AF_INET6) def test_publisher_udp_socket_ipv4_hostname(self): host = "ipv4.google.com" try: socket.getaddrinfo(host, None, socket.AF_INET, socket.SOCK_DGRAM) except socket.gaierror: self.skipTest("cannot resolve not running test") url = "udp://"+host+":4952" self._check_udp_socket(url, socket.AF_INET) def test_publisher_udp_socket_ipv6_hostname(self): host = "ipv6.google.com" try: socket.getaddrinfo(host, None, socket.AF_INET6, socket.SOCK_DGRAM) except socket.gaierror: self.skipTest("cannot resolve not running test") url = "udp://"+host+":4952" self._check_udp_socket(url, socket.AF_INET6) def test_published(self): self.data_sent = [] with mock.patch('socket.socket', self._make_fake_socket(self.data_sent)): publisher = udp.UDPPublisher( self.CONF, netutils.urlsplit('udp://somehost')) publisher.publish_samples(self.test_data) self.assertEqual(5, len(self.data_sent)) sent_counters = [] for data, dest in self.data_sent: counter = msgpack.loads(data, encoding="utf-8") sent_counters.append(counter) # Check destination self.assertEqual(('somehost', 4952), dest) # Check that counters are equal def sort_func(counter): return counter['counter_name'] counters = [utils.meter_message_from_counter(d, "not-so-secret") for d in self.test_data] counters.sort(key=sort_func) sent_counters.sort(key=sort_func) self.assertEqual(counters, sent_counters) @staticmethod def _raise_ioerror(*args): raise IOError def _make_broken_socket(self, family, type): udp_socket = mock.Mock() udp_socket.sendto = self._raise_ioerror return udp_socket def test_publish_error(self): with mock.patch('socket.socket', self._make_broken_socket): publisher = udp.UDPPublisher( self.CONF, netutils.urlsplit('udp://localhost')) publisher.publish_samples(self.test_data) ceilometer-10.0.0/ceilometer/tests/unit/publisher/test_zaqar.py0000666000175100017510000001064113236733243024753 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslotest import base from six.moves.urllib import parse as urlparse import uuid from ceilometer.event import models as event from ceilometer.publisher import zaqar from ceilometer import sample from ceilometer import service class TestZaqarPublisher(base.BaseTestCase): resource_id = str(uuid.uuid4()) sample_data = [ sample.Sample( name='alpha', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='beta', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='gamma', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.now().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] event_data = [event.Event( message_id=str(uuid.uuid4()), event_type='event_%d' % i, generated=datetime.datetime.utcnow().isoformat(), traits=[], raw={'payload': {'some': 'aa'}}) for i in range(3)] def setUp(self): super(TestZaqarPublisher, self).setUp() self.CONF = service.prepare_service([], []) def test_zaqar_publisher_config(self): """Test publisher config parameters.""" parsed_url = urlparse.urlparse('zaqar://') self.assertRaises(ValueError, zaqar.ZaqarPublisher, self.CONF, parsed_url) parsed_url = urlparse.urlparse('zaqar://?queue=foo&ttl=bar') self.assertRaises(ValueError, zaqar.ZaqarPublisher, self.CONF, parsed_url) parsed_url = urlparse.urlparse('zaqar://?queue=foo&ttl=60') publisher = zaqar.ZaqarPublisher(self.CONF, parsed_url) self.assertEqual(60, publisher.ttl) parsed_url = urlparse.urlparse('zaqar://?queue=foo') publisher = zaqar.ZaqarPublisher(self.CONF, parsed_url) self.assertEqual(3600, publisher.ttl) self.assertEqual('foo', publisher.queue_name) @mock.patch('zaqarclient.queues.v2.queues.Queue') def test_zaqar_post_samples(self, mock_queue): """Test publisher post.""" parsed_url = urlparse.urlparse('zaqar://?queue=foo') publisher = zaqar.ZaqarPublisher(self.CONF, parsed_url) mock_post = mock.Mock() mock_queue.return_value = mock_post publisher.publish_samples(self.sample_data) mock_queue.assert_called_once_with(mock.ANY, 'foo') self.assertEqual( 3, len(mock_post.post.call_args_list[0][0][0])) self.assertEqual( mock_post.post.call_args_list[0][0][0][0]['body'], self.sample_data[0].as_dict()) @mock.patch('zaqarclient.queues.v2.queues.Queue') def test_zaqar_post_events(self, mock_queue): """Test publisher post.""" parsed_url = urlparse.urlparse('zaqar://?queue=foo') publisher = zaqar.ZaqarPublisher(self.CONF, parsed_url) mock_post = mock.Mock() mock_queue.return_value = mock_post publisher.publish_events(self.event_data) mock_queue.assert_called_once_with(mock.ANY, 'foo') self.assertEqual( 3, len(mock_post.post.call_args_list[0][0][0])) self.assertEqual( mock_post.post.call_args_list[0][0][0][0]['body'], self.event_data[0].serialize()) ceilometer-10.0.0/ceilometer/tests/unit/test_polling.py0000666000175100017510000000712413236733243023306 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.polling import manager from ceilometer import service from ceilometer.tests import base class PollingTestCase(base.BaseTestCase): def setUp(self): super(PollingTestCase, self).setUp() self.CONF = service.prepare_service([], []) self.poll_cfg = {'sources': [{'name': 'test_source', 'interval': 600, 'meters': ['a']}]} def _build_and_set_new_polling(self): name = self.cfg2file(self.poll_cfg) self.CONF.set_override('cfg_file', name, group='polling') def test_no_name(self): del self.poll_cfg['sources'][0]['name'] self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_no_interval(self): del self.poll_cfg['sources'][0]['interval'] self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_invalid_string_interval(self): self.poll_cfg['sources'][0]['interval'] = 'string' self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_get_interval(self): self._build_and_set_new_polling() poll_manager = manager.PollingManager(self.CONF) source = poll_manager.sources[0] self.assertEqual(600, source.get_interval()) def test_invalid_resources(self): self.poll_cfg['sources'][0]['resources'] = {'invalid': 1} self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_resources(self): resources = ['test1://', 'test2://'] self.poll_cfg['sources'][0]['resources'] = resources self._build_and_set_new_polling() poll_manager = manager.PollingManager(self.CONF) self.assertEqual(resources, poll_manager.sources[0].resources) def test_no_resources(self): self._build_and_set_new_polling() poll_manager = manager.PollingManager(self.CONF) self.assertEqual(0, len(poll_manager.sources[0].resources)) def test_check_meters_include_exclude_same(self): self.poll_cfg['sources'][0]['meters'] = ['a', '!a'] self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_check_meters_include_exclude(self): self.poll_cfg['sources'][0]['meters'] = ['a', '!b'] self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_check_meters_wildcard_included(self): self.poll_cfg['sources'][0]['meters'] = ['a', '*'] self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) ceilometer-10.0.0/ceilometer/tests/unit/test_middleware.py0000666000175100017510000001014013236733243023747 0ustar zuulzuul00000000000000# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from ceilometer import middleware from ceilometer import service from ceilometer.tests import base HTTP_REQUEST = { u'ctxt': {u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'is_admin': True, u'project_id': u'7c150a59fe714e6f9263774af9688f0e', u'quota_class': None, u'read_deleted': u'no', u'remote_address': u'10.0.2.15', u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'roles': [u'admin'], u'timestamp': u'2012-05-08T20:23:41.425105', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2'}, u'event_type': u'http.request', u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', 'HTTP_X_USER_ID': 'jd-x32', 'HTTP_X_PROJECT_ID': 'project-id', 'HTTP_X_SERVICE_NAME': 'nova'}}, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'metadata': {u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'timestamp': u'2012-05-08 20:23:48.028195'}, } HTTP_RESPONSE = { u'ctxt': {u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', u'is_admin': True, u'project_id': u'7c150a59fe714e6f9263774af9688f0e', u'quota_class': None, u'read_deleted': u'no', u'remote_address': u'10.0.2.15', u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', u'roles': [u'admin'], u'timestamp': u'2012-05-08T20:23:41.425105', u'user_id': u'1e3ce043029547f1a61c1996d1a531a2'}, u'event_type': u'http.response', u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', 'HTTP_X_USER_ID': 'jd-x32', 'HTTP_X_PROJECT_ID': 'project-id', 'HTTP_X_SERVICE_NAME': 'nova'}, u'response': {'status': '200 OK'}}, u'priority': u'INFO', u'publisher_id': u'compute.vagrant-precise', u'metadata': {u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', u'timestamp': u'2012-05-08 20:23:48.028195'}, } class TestNotifications(base.BaseTestCase): def setUp(self): super(TestNotifications, self).setUp() self.CONF = service.prepare_service([], []) self.setup_messaging(self.CONF) def test_process_request_notification(self): sample = list(middleware.HTTPRequest( mock.Mock(), mock.Mock()).build_sample(HTTP_REQUEST))[0] self.assertEqual(HTTP_REQUEST['payload']['request']['HTTP_X_USER_ID'], sample.user_id) self.assertEqual(HTTP_REQUEST['payload']['request'] ['HTTP_X_PROJECT_ID'], sample.project_id) self.assertEqual(HTTP_REQUEST['payload']['request'] ['HTTP_X_SERVICE_NAME'], sample.resource_id) self.assertEqual(1, sample.volume) def test_process_response_notification(self): sample = list(middleware.HTTPResponse( mock.Mock(), mock.Mock()).build_sample(HTTP_RESPONSE))[0] self.assertEqual(HTTP_RESPONSE['payload']['request']['HTTP_X_USER_ID'], sample.user_id) self.assertEqual(HTTP_RESPONSE['payload']['request'] ['HTTP_X_PROJECT_ID'], sample.project_id) self.assertEqual(HTTP_RESPONSE['payload']['request'] ['HTTP_X_SERVICE_NAME'], sample.resource_id) self.assertEqual(1, sample.volume) ceilometer-10.0.0/ceilometer/tests/unit/image/0000775000175100017510000000000013236733440021304 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/image/test_glance.py0000666000175100017510000000777613236733243024172 0ustar zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.image import glance from ceilometer.polling import manager from ceilometer import service import ceilometer.tests.base as base IMAGE_LIST = [ type('Image', (object,), {u'status': u'active', u'tags': [], u'kernel_id': u'fd24d91a-dfd5-4a3c-b990-d4563eb27396', u'container_format': u'ami', u'min_ram': 0, u'ramdisk_id': u'd629522b-ebaa-4c92-9514-9e31fe760d18', u'updated_at': u'2016-06-20T13: 34: 41Z', u'visibility': u'public', u'owner': u'6824974c08974d4db864bbaa6bc08303', u'file': u'/v2/images/fda54a44-3f96-40bf-ab07-0a4ce9e1761d/file', u'min_disk': 0, u'virtual_size': None, u'id': u'fda54a44-3f96-40bf-ab07-0a4ce9e1761d', u'size': 25165824, u'name': u'cirros-0.3.4-x86_64-uec', u'checksum': u'eb9139e4942121f22bbc2afc0400b2a4', u'created_at': u'2016-06-20T13: 34: 40Z', u'disk_format': u'ami', u'protected': False, u'schema': u'/v2/schemas/image'}), type('Image', (object,), {u'status': u'active', u'tags': [], u'container_format': u'ari', u'min_ram': 0, u'updated_at': u'2016-06-20T13: 34: 38Z', u'visibility': u'public', u'owner': u'6824974c08974d4db864bbaa6bc08303', u'file': u'/v2/images/d629522b-ebaa-4c92-9514-9e31fe760d18/file', u'min_disk': 0, u'virtual_size': None, u'id': u'd629522b-ebaa-4c92-9514-9e31fe760d18', u'size': 3740163, u'name': u'cirros-0.3.4-x86_64-uec-ramdisk', u'checksum': u'be575a2b939972276ef675752936977f', u'created_at': u'2016-06-20T13: 34: 37Z', u'disk_format': u'ari', u'protected': False, u'schema': u'/v2/schemas/image'}), type('Image', (object,), {u'status': u'active', u'tags': [], u'container_format': u'aki', u'min_ram': 0, u'updated_at': u'2016-06-20T13: 34: 35Z', u'visibility': u'public', u'owner': u'6824974c08974d4db864bbaa6bc08303', u'file': u'/v2/images/fd24d91a-dfd5-4a3c-b990-d4563eb27396/file', u'min_disk': 0, u'virtual_size': None, u'id': u'fd24d91a-dfd5-4a3c-b990-d4563eb27396', u'size': 4979632, u'name': u'cirros-0.3.4-x86_64-uec-kernel', u'checksum': u'8a40c862b5735975d82605c1dd395796', u'created_at': u'2016-06-20T13: 34: 35Z', u'disk_format': u'aki', u'protected': False, u'schema': u'/v2/schemas/image'}), ] class TestImagePollsterPageSize(base.BaseTestCase): def setUp(self): super(TestImagePollsterPageSize, self).setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = glance.ImageSizePollster(conf) def test_image_pollster(self): image_samples = list( self.pollster.get_samples(self.manager, {}, resources=IMAGE_LIST)) self.assertEqual(3, len(image_samples)) self.assertEqual('image.size', image_samples[0].name) self.assertEqual(25165824, image_samples[0].volume) self.assertEqual('6824974c08974d4db864bbaa6bc08303', image_samples[0].project_id) self.assertEqual('fda54a44-3f96-40bf-ab07-0a4ce9e1761d', image_samples[0].resource_id) ceilometer-10.0.0/ceilometer/tests/unit/image/__init__.py0000666000175100017510000000000013236733243023406 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/objectstore/0000775000175100017510000000000013236733440022545 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/objectstore/test_rgw.py0000666000175100017510000001703513236733243024766 0ustar zuulzuul00000000000000# Copyright 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import fixtures from keystoneauth1 import exceptions import mock from oslotest import base import testscenarios.testcase from ceilometer.objectstore import rgw from ceilometer.objectstore import rgw_client from ceilometer.polling import manager from ceilometer import service bucket_list1 = [rgw_client.RGWAdminClient.Bucket('somefoo1', 10, 7)] bucket_list2 = [rgw_client.RGWAdminClient.Bucket('somefoo2', 2, 9)] bucket_list3 = [rgw_client.RGWAdminClient.Bucket('unlisted', 100, 100)] GET_BUCKETS = [('tenant-000', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list1}), ('tenant-001', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list2}), ('tenant-002-ignored', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list3})] GET_USAGE = [('tenant-000', 10), ('tenant-001', 11), ('tenant-002-ignored', 12)] Tenant = collections.namedtuple('Tenant', 'id') ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] class TestManager(manager.AgentManager): def __init__(self, worker_id, conf): super(TestManager, self).__init__(worker_id, conf) self._keystone = mock.Mock() self._catalog = (self._keystone.session.auth.get_access. return_value.service_catalog) self._catalog.url_for.return_value = 'http://foobar/endpoint' class TestRgwPollster(testscenarios.testcase.WithScenarios, base.BaseTestCase): # Define scenarios to run all of the tests against all of the # pollsters. scenarios = [ ('radosgw.objects', {'factory': rgw.ObjectsPollster}), ('radosgw.objects.size', {'factory': rgw.ObjectsSizePollster}), ('radosgw.objects.containers', {'factory': rgw.ObjectsContainersPollster}), ('radosgw.containers.objects', {'factory': rgw.ContainersObjectsPollster}), ('radosgw.containers.objects.size', {'factory': rgw.ContainersSizePollster}), ('radosgw.api.request', {'factory': rgw.UsagePollster}), ] @staticmethod def fake_ks_service_catalog_url_for(*args, **kwargs): raise exceptions.EndpointNotFound("Fake keystone exception") def fake_iter_accounts(self, ksclient, cache, tenants): tenant_ids = [t.id for t in tenants] for i in self.ACCOUNTS: if i[0] in tenant_ids: yield i def setUp(self): super(TestRgwPollster, self).setUp() conf = service.prepare_service([], []) conf.set_override('radosgw', 'object-store', group='service_types') self.pollster = self.factory(conf) self.manager = TestManager(0, conf) if self.pollster.CACHE_KEY_METHOD == 'rgw.get_bucket': self.ACCOUNTS = GET_BUCKETS else: self.ACCOUNTS = GET_USAGE def tearDown(self): super(TestRgwPollster, self).tearDown() rgw._Base._ENDPOINT = None def test_iter_accounts_no_cache(self): cache = {} with fixtures.MockPatchObject(self.factory, '_get_account_info', return_value=[]): data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) self.assertEqual([], data) def test_iter_accounts_cached(self): # Verify that if a method has already been called, _iter_accounts # uses the cached version and doesn't call rgw_clinet. mock_method = mock.Mock() mock_method.side_effect = AssertionError( 'should not be called', ) api_method = 'get_%s' % self.pollster.METHOD with fixtures.MockPatchObject(rgw_client.RGWAdminClient, api_method, new=mock_method): cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertEqual([self.ACCOUNTS[0]], data) def test_metering(self): with fixtures.MockPatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(2, len(samples), self.pollster.__class__) def test_get_meter_names(self): with fixtures.MockPatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def test_only_poll_assigned(self): mock_method = mock.MagicMock() endpoint = 'http://127.0.0.1:8000/admin' api_method = 'get_%s' % self.pollster.METHOD with fixtures.MockPatchObject(rgw_client.RGWAdminClient, api_method, new=mock_method): with fixtures.MockPatchObject( self.manager._catalog, 'url_for', return_value=endpoint): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) expected = [mock.call(t.id) for t in ASSIGNED_TENANTS] self.assertEqual(expected, mock_method.call_args_list) def test_get_endpoint_only_once(self): mock_url_for = mock.MagicMock() mock_url_for.return_value = '/endpoint' api_method = 'get_%s' % self.pollster.METHOD with fixtures.MockPatchObject(rgw_client.RGWAdminClient, api_method, new=mock.MagicMock()): with fixtures.MockPatchObject( self.manager._catalog, 'url_for', new=mock_url_for): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(1, mock_url_for.call_count) def test_endpoint_notfound(self): with fixtures.MockPatchObject( self.manager._catalog, 'url_for', side_effect=self.fake_ks_service_catalog_url_for): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(0, len(samples)) ceilometer-10.0.0/ceilometer/tests/unit/objectstore/test_swift.py0000666000175100017510000002357513236733243025331 0ustar zuulzuul00000000000000# Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import fixtures from keystoneauth1 import exceptions import mock from oslotest import base from swiftclient import client as swift_client import testscenarios.testcase from ceilometer.objectstore import swift from ceilometer.polling import manager from ceilometer import service HEAD_ACCOUNTS = [('tenant-000', {'x-account-object-count': 12, 'x-account-bytes-used': 321321321, 'x-account-container-count': 7, }), ('tenant-001', {'x-account-object-count': 34, 'x-account-bytes-used': 9898989898, 'x-account-container-count': 17, }), ('tenant-002-ignored', {'x-account-object-count': 34, 'x-account-bytes-used': 9898989898, 'x-account-container-count': 17, })] GET_ACCOUNTS = [('tenant-000', ({'x-account-object-count': 10, 'x-account-bytes-used': 123123, 'x-account-container-count': 2, }, [{'count': 10, 'bytes': 123123, 'name': 'my_container'}, {'count': 0, 'bytes': 0, 'name': 'new_container' }])), ('tenant-001', ({'x-account-object-count': 0, 'x-account-bytes-used': 0, 'x-account-container-count': 0, }, [])), ('tenant-002-ignored', ({'x-account-object-count': 0, 'x-account-bytes-used': 0, 'x-account-container-count': 0, }, []))] Tenant = collections.namedtuple('Tenant', 'id') ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] class TestManager(manager.AgentManager): def __init__(self, worker_id, conf): super(TestManager, self).__init__(worker_id, conf) self._keystone = mock.MagicMock() self._keystone_last_exception = None self._service_catalog = (self._keystone.session.auth. get_access.return_value.service_catalog) self._auth_token = (self._keystone.session.auth. get_access.return_value.auth_token) class TestSwiftPollster(testscenarios.testcase.WithScenarios, base.BaseTestCase): # Define scenarios to run all of the tests against all of the # pollsters. scenarios = [ ('storage.objects', {'factory': swift.ObjectsPollster}), ('storage.objects.size', {'factory': swift.ObjectsSizePollster}), ('storage.objects.containers', {'factory': swift.ObjectsContainersPollster}), ('storage.containers.objects', {'factory': swift.ContainersObjectsPollster}), ('storage.containers.objects.size', {'factory': swift.ContainersSizePollster}), ] @staticmethod def fake_ks_service_catalog_url_for(*args, **kwargs): raise exceptions.EndpointNotFound("Fake keystone exception") def fake_iter_accounts(self, ksclient, cache, tenants): tenant_ids = [t.id for t in tenants] for i in self.ACCOUNTS: if i[0] in tenant_ids: yield i def setUp(self): super(TestSwiftPollster, self).setUp() self.CONF = service.prepare_service([], []) self.pollster = self.factory(self.CONF) self.manager = TestManager(0, self.CONF) if self.pollster.CACHE_KEY_METHOD == 'swift.head_account': self.ACCOUNTS = HEAD_ACCOUNTS else: self.ACCOUNTS = GET_ACCOUNTS def tearDown(self): super(TestSwiftPollster, self).tearDown() swift._Base._ENDPOINT = None def test_iter_accounts_no_cache(self): cache = {} with fixtures.MockPatchObject(self.factory, '_get_account_info', return_value=[]): data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) self.assertEqual([], data) def test_iter_accounts_cached(self): # Verify that if a method has already been called, _iter_accounts # uses the cached version and doesn't call swiftclient. mock_method = mock.Mock() mock_method.side_effect = AssertionError( 'should not be called', ) api_method = '%s_account' % self.pollster.METHOD with fixtures.MockPatchObject(swift_client, api_method, new=mock_method): with fixtures.MockPatchObject(self.factory, '_neaten_url'): cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertEqual([self.ACCOUNTS[0]], data) def test_neaten_url(self): reseller_prefix = self.CONF.reseller_prefix test_endpoints = ['http://127.0.0.1:8080', 'http://127.0.0.1:8080/swift'] test_tenant_id = 'a7fd1695fa154486a647e44aa99a1b9b' for test_endpoint in test_endpoints: standard_url = test_endpoint + '/v1/AUTH_' + test_tenant_id url = swift._Base._neaten_url(test_endpoint, test_tenant_id, reseller_prefix) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(test_endpoint + '/', test_tenant_id, reseller_prefix) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(test_endpoint + '/v1', test_tenant_id, reseller_prefix) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(standard_url, test_tenant_id, reseller_prefix) self.assertEqual(standard_url, url) def test_metering(self): with fixtures.MockPatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(2, len(samples), self.pollster.__class__) def test_get_meter_names(self): with fixtures.MockPatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def test_only_poll_assigned(self): mock_method = mock.MagicMock() endpoint = 'end://point/' api_method = '%s_account' % self.pollster.METHOD with fixtures.MockPatchObject(swift_client, api_method, new=mock_method): with fixtures.MockPatchObject( self.manager._service_catalog, 'url_for', return_value=endpoint): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) expected = [mock.call(self.pollster._neaten_url( endpoint, t.id, self.CONF.reseller_prefix), self.manager._auth_token) for t in ASSIGNED_TENANTS] self.assertEqual(expected, mock_method.call_args_list) def test_get_endpoint_only_once(self): endpoint = 'end://point/' mock_url_for = mock.MagicMock(return_value=endpoint) api_method = '%s_account' % self.pollster.METHOD with fixtures.MockPatchObject(swift_client, api_method, new=mock.MagicMock()): with fixtures.MockPatchObject( self.manager._service_catalog, 'url_for', new=mock_url_for): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(1, mock_url_for.call_count) def test_endpoint_notfound(self): with fixtures.MockPatchObject( self.manager._service_catalog, 'url_for', side_effect=self.fake_ks_service_catalog_url_for): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(0, len(samples)) ceilometer-10.0.0/ceilometer/tests/unit/objectstore/__init__.py0000666000175100017510000000000013236733243024647 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/objectstore/test_rgw_client.py0000666000175100017510000001407513236733243026325 0ustar zuulzuul00000000000000# Copyright (C) 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import mock from oslotest import base from ceilometer.objectstore import rgw_client RGW_ADMIN_BUCKETS = ''' [ { "max_marker": "", "ver": 2001, "usage": { "rgw.main": { "size_kb_actual": 16000, "num_objects": 1000, "size_kb": 1000 } }, "bucket": "somefoo", "owner": "admin", "master_ver": 0, "mtime": 1420176126, "marker": "default.4126.1", "bucket_quota": { "max_objects": -1, "enabled": false, "max_size_kb": -1 }, "id": "default.4126.1", "pool": ".rgw.buckets", "index_pool": ".rgw.buckets.index" }, { "max_marker": "", "ver": 3, "usage": { "rgw.main": { "size_kb_actual": 43, "num_objects": 1, "size_kb": 42 } }, "bucket": "somefoo31", "owner": "admin", "master_ver": 0, "mtime": 1420176134, "marker": "default.4126.5", "bucket_quota": { "max_objects": -1, "enabled": false, "max_size_kb": -1 }, "id": "default.4126.5", "pool": ".rgw.buckets", "index_pool": ".rgw.buckets.index" } ]''' RGW_ADMIN_USAGE = ''' { "entries": [ { "owner": "5f7fe2d5352e466f948f49341e33d107", "buckets": [ { "bucket": "", "time": "2015-01-23 09:00:00.000000Z", "epoch": 1422003600, "categories": [ { "category": "list_buckets", "bytes_sent": 46, "bytes_received": 0, "ops": 3, "successful_ops": 3}, { "category": "stat_account", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}]}, { "bucket": "foodsgh", "time": "2015-01-23 09:00:00.000000Z", "epoch": 1422003600, "categories": [ { "category": "create_bucket", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}, { "category": "get_obj", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 0}, { "category": "put_obj", "bytes_sent": 0, "bytes_received": 238, "ops": 1, "successful_ops": 1}]}]}], "summary": [ { "user": "5f7fe2d5352e466f948f49341e33d107", "categories": [ { "category": "create_bucket", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}, { "category": "get_obj", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 0}, { "category": "list_buckets", "bytes_sent": 46, "bytes_received": 0, "ops": 3, "successful_ops": 3}, { "category": "put_obj", "bytes_sent": 0, "bytes_received": 238, "ops": 1, "successful_ops": 1}, { "category": "stat_account", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}], "total": { "bytes_sent": 46, "bytes_received": 238, "ops": 7, "successful_ops": 6}}]} ''' buckets_json = json.loads(RGW_ADMIN_BUCKETS) usage_json = json.loads(RGW_ADMIN_USAGE) class TestRGWAdminClient(base.BaseTestCase): def setUp(self): super(TestRGWAdminClient, self).setUp() self.client = rgw_client.RGWAdminClient('http://127.0.0.1:8080/admin', 'abcde', 'secret') self.get_resp = mock.MagicMock() self.get = mock.patch('requests.get', return_value=self.get_resp).start() def test_make_request_exception(self): self.get_resp.status_code = 403 self.assertRaises(rgw_client.RGWAdminAPIFailed, self.client._make_request, *('foo', {})) def test_make_request(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = buckets_json actual = self.client._make_request('foo', []) self.assertEqual(buckets_json, actual) def test_get_buckets(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = buckets_json actual = self.client.get_bucket('foo') bucket_list = [rgw_client.RGWAdminClient.Bucket('somefoo', 1000, 1000), rgw_client.RGWAdminClient.Bucket('somefoo31', 1, 42), ] expected = {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list} self.assertEqual(expected, actual) def test_get_usage(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = usage_json actual = self.client.get_usage('foo') expected = 7 self.assertEqual(expected, actual) ceilometer-10.0.0/ceilometer/tests/unit/compute/0000775000175100017510000000000013236733440021676 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/test_discovery.py0000666000175100017510000002653313236733243025332 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import fixtures import iso8601 import mock import testtools try: import libvirt except ImportError: libvirt = None from ceilometer.compute import discovery from ceilometer.compute.pollsters import util from ceilometer.compute.virt.libvirt import utils from ceilometer import service import ceilometer.tests.base as base LIBVIRT_METADATA_XML = """ test.dom.com 2016-11-16 07:35:06 512 1 0 0 1 admin admin """ LIBVIRT_DESC_XML = """ instance-00000001 a75c2fa5-6c03-45a8-bbf7-b993cfcdec27 hvm /opt/stack/data/nova/instances/a75c2fa5-6c03-45a8-bbf7-b993cfcdec27/kernel /opt/stack/data/nova/instances/a75c2fa5-6c03-45a8-bbf7-b993cfcdec27/ramdisk root=/dev/vda console=tty0 console=ttyS0 """ LIBVIRT_MANUAL_INSTANCE_DESC_XML = """ Manual-instance-00000001 5e637d0d-8c0e-441a-a11a-a9dc95aed84e hvm /opt/instances/5e637d0d-8c0e-441a-a11a-a9dc95aed84e/kernel /opt/instances/5e637d0d-8c0e-441a-a11a-a9dc95aed84e/ramdisk root=/dev/vda console=tty0 console=ttyS0 """ class FakeDomain(object): def state(self): return [1, 2] def name(self): return "instance-00000001" def UUIDString(self): return "a75c2fa5-6c03-45a8-bbf7-b993cfcdec27" def XMLDesc(self): return LIBVIRT_DESC_XML def metadata(self, flags, url): return LIBVIRT_METADATA_XML class FakeConn(object): def listAllDomains(self): return [FakeDomain()] class FakeManualInstanceDomain(object): def state(self): return [1, 2] def name(self): return "Manual-instance-00000001" def UUIDString(self): return "5e637d0d-8c0e-441a-a11a-a9dc95aed84e" def XMLDesc(self): return LIBVIRT_MANUAL_INSTANCE_DESC_XML def metadata(self, flags, url): # Note(xiexianbin): vm not create by nova-compute don't have metadata # elements like: '' # When invoke get metadata method, raise libvirtError. raise libvirt.libvirtError( "metadata not found: Requested metadata element is not present") class FakeManualInstanceConn(object): def listAllDomains(self): return [FakeManualInstanceDomain()] class TestDiscovery(base.BaseTestCase): def setUp(self): super(TestDiscovery, self).setUp() self.instance = mock.MagicMock() self.instance.name = 'instance-00000001' setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', self.instance.name) setattr(self.instance, 'OS-EXT-STS:vm_state', 'active') # FIXME(sileht): This is wrong, this should be a uuid # The internal id of nova can't be retrieved via API or notification self.instance.id = 1 self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} self.instance.status = 'active' self.instance.metadata = { 'fqdn': 'vm_fqdn', 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', 'project_cos': 'dev'} # as we're having lazy hypervisor inspector singleton object in the # base compute pollster class, that leads to the fact that we # need to mock all this class property to avoid context sharing between # the tests self.client = mock.MagicMock() self.client.instance_get_all_by_host.return_value = [self.instance] patch_client = fixtures.MockPatch('ceilometer.nova_client.Client', return_value=self.client) self.useFixture(patch_client) self.utc_now = mock.MagicMock( return_value=datetime.datetime(2016, 1, 1, tzinfo=iso8601.iso8601.UTC)) patch_timeutils = fixtures.MockPatch('oslo_utils.timeutils.utcnow', self.utc_now) self.useFixture(patch_timeutils) self.CONF = service.prepare_service([], []) self.CONF.set_override('host', 'test') def test_normal_discovery(self): self.CONF.set_override("instance_discovery_method", "naive", group="compute") dsc = discovery.InstanceDiscovery(self.CONF) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_once_with( 'test', None) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_with( self.CONF.host, "2016-01-01T00:00:00+00:00") def test_discovery_with_resource_update_interval(self): self.CONF.set_override("instance_discovery_method", "naive", group="compute") self.CONF.set_override("resource_update_interval", 600, group="compute") dsc = discovery.InstanceDiscovery(self.CONF) dsc.last_run = datetime.datetime(2016, 1, 1, tzinfo=iso8601.iso8601.UTC) self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=5, tzinfo=iso8601.iso8601.UTC) resources = dsc.discover(mock.MagicMock()) self.assertEqual(0, len(resources)) self.client.instance_get_all_by_host.assert_not_called() self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=20, tzinfo=iso8601.iso8601.UTC) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_once_with( self.CONF.host, "2016-01-01T00:00:00+00:00") @mock.patch.object(utils, "libvirt") @mock.patch.object(discovery, "libvirt") def test_discovery_with_libvirt(self, libvirt, libvirt2): self.CONF.set_override("instance_discovery_method", "libvirt_metadata", group="compute") libvirt.VIR_DOMAIN_METADATA_ELEMENT = 2 libvirt2.openReadOnly.return_value = FakeConn() dsc = discovery.InstanceDiscovery(self.CONF) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) r = list(resources)[0] s = util.make_sample_from_instance(self.CONF, r, "metric", "delta", "carrot", 1) self.assertEqual("a75c2fa5-6c03-45a8-bbf7-b993cfcdec27", s.resource_id) self.assertEqual("d99c829753f64057bc0f2030da309943", s.project_id) self.assertEqual("a1f4684e58bd4c88aefd2ecb0783b497", s.user_id) metadata = s.resource_metadata self.assertEqual(1, metadata["vcpus"]) self.assertEqual(512, metadata["memory_mb"]) self.assertEqual(1, metadata["disk_gb"]) self.assertEqual(0, metadata["ephemeral_gb"]) self.assertEqual(1, metadata["root_gb"]) self.assertEqual("bdaf114a-35e9-4163-accd-226d5944bf11", metadata["image_ref"]) self.assertEqual("test.dom.com", metadata["display_name"]) self.assertEqual("instance-00000001", metadata["name"]) self.assertEqual("a75c2fa5-6c03-45a8-bbf7-b993cfcdec27", metadata["instance_id"]) self.assertEqual("m1.tiny", metadata["instance_type"]) self.assertEqual( "4d0bc931ea7f0513da2efd9acb4cf3a273c64b7bcc544e15c070e662", metadata["host"]) self.assertEqual(self.CONF.host, metadata["instance_host"]) self.assertEqual("active", metadata["status"]) self.assertEqual("running", metadata["state"]) self.assertEqual("hvm", metadata["os_type"]) self.assertEqual("x86_64", metadata["architecture"]) def test_discovery_with_legacy_resource_cache_cleanup(self): self.CONF.set_override("instance_discovery_method", "naive", group="compute") self.CONF.set_override("resource_update_interval", 600, group="compute") self.CONF.set_override("resource_cache_expiry", 1800, group="compute") dsc = discovery.InstanceDiscovery(self.CONF) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=20, tzinfo=iso8601.iso8601.UTC) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=31, tzinfo=iso8601.iso8601.UTC) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) expected_calls = [mock.call('test', None), mock.call('test', '2016-01-01T00:00:00+00:00'), mock.call('test', None)] self.assertEqual(expected_calls, self.client.instance_get_all_by_host.call_args_list) @testtools.skipUnless(libvirt, "libvirt not available") @mock.patch.object(utils, "libvirt") @mock.patch.object(discovery, "libvirt") def test_discovery_with_libvirt_error(self, libvirt, libvirt2): self.CONF.set_override("instance_discovery_method", "libvirt_metadata", group="compute") libvirt.VIR_DOMAIN_METADATA_ELEMENT = 2 libvirt2.openReadOnly.return_value = FakeManualInstanceConn() dsc = discovery.InstanceDiscovery(self.CONF) resources = dsc.discover(mock.MagicMock()) self.assertEqual(0, len(resources)) ceilometer-10.0.0/ceilometer/tests/unit/compute/pollsters/0000775000175100017510000000000013236733440023725 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/pollsters/test_memory.py0000666000175100017510000001726613236733243026665 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from ceilometer.compute.pollsters import instance_stats from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer.tests.unit.compute.pollsters import base class TestMemoryPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(memory_usage=1.0), virt_inspector.InstanceStats(memory_usage=2.0), virt_inspector.InstanceStats(), virt_inspector.InstanceShutOffException(), ) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.MemoryUsagePollster(self.CONF) @mock.patch('ceilometer.compute.pollsters.LOG') def _verify_memory_metering(expected_count, expected_memory_mb, expected_warnings, mylog): samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(expected_count, len(samples)) if expected_count > 0: self.assertEqual(set(['memory.usage']), set([s.name for s in samples])) self.assertEqual(expected_memory_mb, samples[0].volume) else: self.assertEqual(expected_warnings, mylog.warning.call_count) self.assertEqual(0, mylog.exception.call_count) _verify_memory_metering(1, 1.0, 0) _verify_memory_metering(1, 2.0, 0) _verify_memory_metering(0, 0, 1) _verify_memory_metering(0, 0, 0) def test_get_samples_with_empty_stats(self): self._mock_inspect_instance(virt_inspector.NoDataException()) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.MemoryUsagePollster(self.CONF) def all_samples(): return list(pollster.get_samples(mgr, {}, [self.instance])) self.assertRaises(plugin_base.PollsterPermanentError, all_samples) class TestResidentMemoryPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(memory_resident=1.0), virt_inspector.InstanceStats(memory_resident=2.0), virt_inspector.InstanceStats(), virt_inspector.InstanceShutOffException(), ) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.MemoryResidentPollster(self.CONF) @mock.patch('ceilometer.compute.pollsters.LOG') def _verify_resident_memory_metering(expected_count, expected_resident_memory_mb, expected_warnings, mylog): samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(expected_count, len(samples)) if expected_count > 0: self.assertEqual(set(['memory.resident']), set([s.name for s in samples])) self.assertEqual(expected_resident_memory_mb, samples[0].volume) else: self.assertEqual(expected_warnings, mylog.warning.call_count) self.assertEqual(0, mylog.exception.call_count) _verify_resident_memory_metering(1, 1.0, 0) _verify_resident_memory_metering(1, 2.0, 0) _verify_resident_memory_metering(0, 0, 1) _verify_resident_memory_metering(0, 0, 0) class TestMemorySwapPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(memory_swap_in=1.0, memory_swap_out=2.0), virt_inspector.InstanceStats(memory_swap_in=3.0, memory_swap_out=4.0), ) mgr = manager.AgentManager(0, self.CONF) def _check_memory_swap_in(expected_swap_in): pollster = instance_stats.MemorySwapInPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['memory.swap.in']), set([s.name for s in samples])) self.assertEqual(expected_swap_in, samples[0].volume) def _check_memory_swap_out(expected_swap_out): pollster = instance_stats.MemorySwapOutPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['memory.swap.out']), set([s.name for s in samples])) self.assertEqual(expected_swap_out, samples[0].volume) _check_memory_swap_in(1.0) _check_memory_swap_out(4.0) def test_get_samples_with_empty_stats(self): self._mock_inspect_instance(virt_inspector.NoDataException()) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.MemorySwapInPollster(self.CONF) def all_samples(): return list(pollster.get_samples(mgr, {}, [self.instance])) self.assertRaises(plugin_base.PollsterPermanentError, all_samples) class TestMemoryBandwidthPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(memory_bandwidth_total=1892352, memory_bandwidth_local=1802240), virt_inspector.InstanceStats(memory_bandwidth_total=1081344, memory_bandwidth_local=90112), ) mgr = manager.AgentManager(0, self.CONF) def _check_memory_bandwidth_total(expected_usage): pollster = instance_stats.MemoryBandwidthTotalPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['memory.bandwidth.total']), set([s.name for s in samples])) self.assertEqual(expected_usage, samples[0].volume) def _check_memory_bandwidth_local(expected_usage): pollster = instance_stats.MemoryBandwidthLocalPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['memory.bandwidth.local']), set([s.name for s in samples])) self.assertEqual(expected_usage, samples[0].volume) _check_memory_bandwidth_total(1892352) _check_memory_bandwidth_local(90112) def test_get_samples_with_empty_stats(self): self._mock_inspect_instance(virt_inspector.NoDataException()) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.MemoryBandwidthTotalPollster(self.CONF) def all_samples(): return list(pollster.get_samples(mgr, {}, [self.instance])) self.assertRaises(plugin_base.PollsterPermanentError, all_samples) ceilometer-10.0.0/ceilometer/tests/unit/compute/pollsters/test_diskio.py0000666000175100017510000003534613236733243026636 0ustar zuulzuul00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # Copyright 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from ceilometer.compute.pollsters import disk from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import manager from ceilometer.tests.unit.compute.pollsters import base class TestBaseDiskIO(base.TestPollsterBase): TYPE = 'cumulative' def setUp(self): super(TestBaseDiskIO, self).setUp() self.instance = self._get_fake_instances() @staticmethod def _get_fake_instances(): instances = [] for i in [1, 2]: instance = mock.MagicMock() instance.name = 'instance-%s' % i setattr(instance, 'OS-EXT-SRV-ATTR:instance_name', instance.name) instance.id = i instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} instance.status = 'active' instances.append(instance) return instances def _check_get_samples(self, factory, name, expected_count=2): pollster = factory(self.CONF) mgr = manager.AgentManager(0, self.CONF) cache = {} samples = list(pollster.get_samples(mgr, cache, self.instance)) self.assertIsNotEmpty(samples) cache_key = pollster.inspector_method self.assertIn(cache_key, cache) for instance in self.instance: self.assertIn(instance.id, cache[cache_key]) self.assertEqual(set([name]), set([s.name for s in samples])) match = [s for s in samples if s.name == name] self.assertEqual(len(match), expected_count, 'missing counter %s' % name) return match def _check_aggregate_samples(self, factory, name, expected_volume, expected_device=None): match = self._check_get_samples(factory, name) self.assertEqual(expected_volume, match[0].volume) self.assertEqual(self.TYPE, match[0].type) if expected_device is not None: self.assertEqual(set(expected_device), set(match[0].resource_metadata.get('device'))) instances = [i.id for i in self.instance] for m in match: self.assertIn(m.resource_id, instances) def _check_per_device_samples(self, factory, name, expected_volume, expected_device=None): match = self._check_get_samples(factory, name, expected_count=4) match_dict = {} for m in match: match_dict[m.resource_id] = m for instance in self.instance: key = "%s-%s" % (instance.id, expected_device) self.assertEqual(expected_volume, match_dict[key].volume) self.assertEqual(self.TYPE, match_dict[key].type) self.assertEqual(key, match_dict[key].resource_id) class TestDiskPollsters(TestBaseDiskIO): DISKS = [ virt_inspector.DiskStats(device='vda1', read_bytes=1, read_requests=2, write_bytes=3, write_requests=4, errors=-1, rd_total_times=100, wr_total_times=200,), virt_inspector.DiskStats(device='vda2', read_bytes=2, read_requests=3, write_bytes=5, write_requests=7, errors=-1, rd_total_times=300, wr_total_times=400,), ] def setUp(self): super(TestDiskPollsters, self).setUp() self.inspector.inspect_disks = mock.Mock(return_value=self.DISKS) def test_disk_read_requests(self): self._check_aggregate_samples(disk.ReadRequestsPollster, 'disk.read.requests', 5, expected_device=['vda1', 'vda2']) def test_disk_read_bytes(self): self._check_aggregate_samples(disk.ReadBytesPollster, 'disk.read.bytes', 3, expected_device=['vda1', 'vda2']) def test_disk_write_requests(self): self._check_aggregate_samples(disk.WriteRequestsPollster, 'disk.write.requests', 11, expected_device=['vda1', 'vda2']) def test_disk_write_bytes(self): self._check_aggregate_samples(disk.WriteBytesPollster, 'disk.write.bytes', 8, expected_device=['vda1', 'vda2']) def test_per_disk_read_requests(self): self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 2, 'vda1') self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 3, 'vda2') def test_per_disk_write_requests(self): self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 4, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 7, 'vda2') def test_per_disk_read_bytes(self): self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 1, 'vda1') self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 2, 'vda2') def test_per_disk_write_bytes(self): self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 3, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 5, 'vda2') def test_per_device_read_latency(self): self._check_per_device_samples( disk.PerDeviceDiskReadLatencyPollster, 'disk.device.read.latency', 100, 'vda1') self._check_per_device_samples( disk.PerDeviceDiskReadLatencyPollster, 'disk.device.read.latency', 300, 'vda2') def test_per_device_write_latency(self): self._check_per_device_samples( disk.PerDeviceDiskWriteLatencyPollster, 'disk.device.write.latency', 200, 'vda1') self._check_per_device_samples( disk.PerDeviceDiskWriteLatencyPollster, 'disk.device.write.latency', 400, 'vda2') class TestDiskRatePollsters(TestBaseDiskIO): DISKS = [ virt_inspector.DiskRateStats("disk1", 1024, 300, 5120, 700), virt_inspector.DiskRateStats("disk2", 2048, 400, 6144, 800) ] TYPE = 'gauge' def setUp(self): super(TestDiskRatePollsters, self).setUp() self.inspector.inspect_disk_rates = mock.Mock(return_value=self.DISKS) def test_disk_read_bytes_rate(self): self._check_aggregate_samples(disk.ReadBytesRatePollster, 'disk.read.bytes.rate', 3072, expected_device=['disk1', 'disk2']) def test_disk_read_requests_rate(self): self._check_aggregate_samples(disk.ReadRequestsRatePollster, 'disk.read.requests.rate', 700, expected_device=['disk1', 'disk2']) def test_disk_write_bytes_rate(self): self._check_aggregate_samples(disk.WriteBytesRatePollster, 'disk.write.bytes.rate', 11264, expected_device=['disk1', 'disk2']) def test_disk_write_requests_rate(self): self._check_aggregate_samples(disk.WriteRequestsRatePollster, 'disk.write.requests.rate', 1500, expected_device=['disk1', 'disk2']) def test_per_disk_read_bytes_rate(self): self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, 'disk.device.read.bytes.rate', 1024, 'disk1') self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, 'disk.device.read.bytes.rate', 2048, 'disk2') def test_per_disk_read_requests_rate(self): self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, 'disk.device.read.requests.rate', 300, 'disk1') self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, 'disk.device.read.requests.rate', 400, 'disk2') def test_per_disk_write_bytes_rate(self): self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, 'disk.device.write.bytes.rate', 5120, 'disk1') self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, 'disk.device.write.bytes.rate', 6144, 'disk2') def test_per_disk_write_requests_rate(self): self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, 'disk.device.write.requests.rate', 700, 'disk1') self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, 'disk.device.write.requests.rate', 800, 'disk2') class TestDiskLatencyPollsters(TestBaseDiskIO): DISKS = [ virt_inspector.DiskLatencyStats("disk1", 1), virt_inspector.DiskLatencyStats("disk2", 2) ] TYPE = 'gauge' def setUp(self): super(TestDiskLatencyPollsters, self).setUp() self.inspector.inspect_disk_latency = mock.Mock( return_value=self.DISKS) def test_disk_latency(self): self._check_aggregate_samples(disk.DiskLatencyPollster, 'disk.latency', 3) def test_per_device_latency(self): self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, 'disk.device.latency', 1, 'disk1') self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, 'disk.device.latency', 2, 'disk2') class TestDiskIOPSPollsters(TestBaseDiskIO): DISKS = [ virt_inspector.DiskIOPSStats("disk1", 10), virt_inspector.DiskIOPSStats("disk2", 20), ] TYPE = 'gauge' def setUp(self): super(TestDiskIOPSPollsters, self).setUp() self.inspector.inspect_disk_iops = mock.Mock(return_value=self.DISKS) def test_disk_iops(self): self._check_aggregate_samples(disk.DiskIOPSPollster, 'disk.iops', 30) def test_per_device_iops(self): self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, 'disk.device.iops', 10, 'disk1') self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, 'disk.device.iops', 20, 'disk2') class TestDiskInfoPollsters(TestBaseDiskIO): DISKS = [ virt_inspector.DiskInfo(device="vda1", capacity=3, allocation=2, physical=1), virt_inspector.DiskInfo(device="vda2", capacity=4, allocation=3, physical=2), ] TYPE = 'gauge' def setUp(self): super(TestDiskInfoPollsters, self).setUp() self.inspector.inspect_disk_info = mock.Mock(return_value=self.DISKS) def test_disk_capacity(self): self._check_aggregate_samples(disk.CapacityPollster, 'disk.capacity', 7, expected_device=['vda1', 'vda2']) def test_disk_allocation(self): self._check_aggregate_samples(disk.AllocationPollster, 'disk.allocation', 5, expected_device=['vda1', 'vda2']) def test_disk_physical(self): self._check_aggregate_samples(disk.PhysicalPollster, 'disk.usage', 3, expected_device=['vda1', 'vda2']) def test_per_disk_capacity(self): self._check_per_device_samples(disk.PerDeviceCapacityPollster, 'disk.device.capacity', 3, 'vda1') self._check_per_device_samples(disk.PerDeviceCapacityPollster, 'disk.device.capacity', 4, 'vda2') def test_per_disk_allocation(self): self._check_per_device_samples(disk.PerDeviceAllocationPollster, 'disk.device.allocation', 2, 'vda1') self._check_per_device_samples(disk.PerDeviceAllocationPollster, 'disk.device.allocation', 3, 'vda2') def test_per_disk_physical(self): self._check_per_device_samples(disk.PerDevicePhysicalPollster, 'disk.device.usage', 1, 'vda1') self._check_per_device_samples(disk.PerDevicePhysicalPollster, 'disk.device.usage', 2, 'vda2') ceilometer-10.0.0/ceilometer/tests/unit/compute/pollsters/test_perf.py0000666000175100017510000000716713236733243026310 0ustar zuulzuul00000000000000# Copyright 2016 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.compute.pollsters import instance_stats from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer.tests.unit.compute.pollsters import base class TestPerfPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(cpu_cycles=7259361, instructions=8815623, cache_references=74184, cache_misses=16737) ) mgr = manager.AgentManager(0, self.CONF) cache = {} def _check_perf_events_cpu_cycles(expected_usage): pollster = instance_stats.PerfCPUCyclesPollster(self.CONF) samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['perf.cpu.cycles']), set([s.name for s in samples])) self.assertEqual(expected_usage, samples[0].volume) def _check_perf_events_instructions(expected_usage): pollster = instance_stats.PerfInstructionsPollster(self.CONF) samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['perf.instructions']), set([s.name for s in samples])) self.assertEqual(expected_usage, samples[0].volume) def _check_perf_events_cache_references(expected_usage): pollster = instance_stats.PerfCacheReferencesPollster( self.CONF) samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['perf.cache.references']), set([s.name for s in samples])) self.assertEqual(expected_usage, samples[0].volume) def _check_perf_events_cache_misses(expected_usage): pollster = instance_stats.PerfCacheMissesPollster(self.CONF) samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['perf.cache.misses']), set([s.name for s in samples])) self.assertEqual(expected_usage, samples[0].volume) _check_perf_events_cpu_cycles(7259361) _check_perf_events_instructions(8815623) _check_perf_events_cache_references(74184) _check_perf_events_cache_misses(16737) def test_get_samples_with_empty_stats(self): self._mock_inspect_instance(virt_inspector.NoDataException()) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.PerfCPUCyclesPollster(self.CONF) def all_samples(): return list(pollster.get_samples(mgr, {}, [self.instance])) self.assertRaises(plugin_base.PollsterPermanentError, all_samples) ceilometer-10.0.0/ceilometer/tests/unit/compute/pollsters/__init__.py0000666000175100017510000000000013236733243026027 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/pollsters/base.py0000666000175100017510000000517713236733243025226 0ustar zuulzuul00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from ceilometer.compute.virt import inspector as virt_inspector from ceilometer import service import ceilometer.tests.base as base class TestPollsterBase(base.BaseTestCase): def setUp(self): super(TestPollsterBase, self).setUp() self.CONF = service.prepare_service([], []) self.inspector = mock.Mock() self.instance = mock.MagicMock() self.instance.name = 'instance-00000001' setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', self.instance.name) setattr(self.instance, 'OS-EXT-STS:vm_state', 'active') setattr(self.instance, 'OS-EXT-STS:task_state', None) self.instance.id = 1 self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} self.instance.status = 'active' self.instance.metadata = { 'fqdn': 'vm_fqdn', 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', 'project_cos': 'dev'} self.useFixture(fixtures.MockPatch( 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', new=mock.Mock(return_value=self.inspector))) # as we're having lazy hypervisor inspector singleton object in the # base compute pollster class, that leads to the fact that we # need to mock all this class property to avoid context sharing between # the tests self.useFixture(fixtures.MockPatch( 'ceilometer.compute.pollsters.' 'GenericComputePollster._get_inspector', return_value=self.inspector)) def _mock_inspect_instance(self, *data): next_value = iter(data) def inspect(instance, duration): value = next(next_value) if isinstance(value, virt_inspector.InstanceStats): return value else: raise value self.inspector.inspect_instance = mock.Mock(side_effect=inspect) ceilometer-10.0.0/ceilometer/tests/unit/compute/pollsters/test_cpu.py0000666000175100017510000001407013236733243026132 0ustar zuulzuul00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from ceilometer.compute.pollsters import instance_stats from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import manager from ceilometer.tests.unit.compute.pollsters import base class TestCPUPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(cpu_time=1 * (10 ** 6), cpu_number=2), virt_inspector.InstanceStats(cpu_time=3 * (10 ** 6), cpu_number=2), # cpu_time resets on instance restart virt_inspector.InstanceStats(cpu_time=2 * (10 ** 6), cpu_number=2), ) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) def _verify_cpu_metering(expected_time): cache = {} samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['cpu']), set([s.name for s in samples])) self.assertEqual(expected_time, samples[0].volume) self.assertEqual(2, samples[0].resource_metadata.get('cpu_number')) # ensure elapsed time between polling cycles is non-zero time.sleep(0.001) _verify_cpu_metering(1 * (10 ** 6)) _verify_cpu_metering(3 * (10 ** 6)) _verify_cpu_metering(2 * (10 ** 6)) # the following apply to all instance resource pollsters but are tested # here alone. def test_get_metadata(self): mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(1, samples[0].resource_metadata['vcpus']) self.assertEqual(512, samples[0].resource_metadata['memory_mb']) self.assertEqual(20, samples[0].resource_metadata['disk_gb']) self.assertEqual(20, samples[0].resource_metadata['root_gb']) self.assertEqual(0, samples[0].resource_metadata['ephemeral_gb']) self.assertEqual('active', samples[0].resource_metadata['status']) self.assertEqual('active', samples[0].resource_metadata['state']) self.assertIsNone(samples[0].resource_metadata['task_state']) def test_get_reserved_metadata_with_keys(self): self.CONF.set_override('reserved_metadata_keys', ['fqdn']) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual({'fqdn': 'vm_fqdn', 'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, samples[0].resource_metadata['user_metadata']) def test_get_reserved_metadata_with_namespace(self): mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual({'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, samples[0].resource_metadata['user_metadata']) self.CONF.set_override('reserved_metadata_namespace', []) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertNotIn('user_metadata', samples[0].resource_metadata) def test_get_flavor_name_as_metadata_instance_type(self): mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual('m1.small', samples[0].resource_metadata['instance_type']) class TestCPUUtilPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(cpu_util=40), virt_inspector.InstanceStats(cpu_util=60), ) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUUtilPollster(self.CONF) def _verify_cpu_util_metering(expected_util): cache = {} samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['cpu_util']), set([s.name for s in samples])) self.assertEqual(expected_util, samples[0].volume) _verify_cpu_util_metering(40) _verify_cpu_util_metering(60) class TestCPUL3CachePollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(cpu_l3_cache_usage=90112), virt_inspector.InstanceStats(cpu_l3_cache_usage=180224), ) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUL3CachePollster(self.CONF) def _verify_cpu_l3_cache_metering(expected_usage): cache = {} samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(set(['cpu_l3_cache']), set([s.name for s in samples])) self.assertEqual(expected_usage, samples[0].volume) _verify_cpu_l3_cache_metering(90112) _verify_cpu_l3_cache_metering(180224) ceilometer-10.0.0/ceilometer/tests/unit/compute/pollsters/test_net.py0000666000175100017510000003074413236733243026137 0ustar zuulzuul00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from ceilometer.compute.pollsters import net from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import manager from ceilometer.tests.unit.compute.pollsters import base class FauxInstance(object): def __init__(self, **kwargs): for name, value in kwargs.items(): setattr(self, name, value) def __getitem__(self, key): return getattr(self, key) def get(self, key, default): return getattr(self, key, default) class TestNetPollster(base.TestPollsterBase): def setUp(self): super(TestNetPollster, self).setUp() self.vnic0 = virt_inspector.InterfaceStats( name='vnet0', fref='fa163e71ec6e', mac='fa:16:3e:71:ec:6d', parameters=dict(ip='10.0.0.2', projmask='255.255.255.0', projnet='proj1', dhcp_server='10.0.0.1'), rx_bytes=1, rx_packets=2, rx_drop=20, rx_errors=21, tx_bytes=3, tx_packets=4, tx_drop=22, tx_errors=23) self.vnic1 = virt_inspector.InterfaceStats( name='vnet1', fref='fa163e71ec6f', mac='fa:16:3e:71:ec:6e', parameters=dict(ip='192.168.0.3', projmask='255.255.255.0', projnet='proj2', dhcp_server='10.0.0.2'), rx_bytes=5, rx_packets=6, rx_drop=24, rx_errors=25, tx_bytes=7, tx_packets=8, tx_drop=26, tx_errors=27) self.vnic2 = virt_inspector.InterfaceStats( name='vnet2', fref=None, mac='fa:18:4e:72:fc:7e', parameters=dict(ip='192.168.0.4', projmask='255.255.255.0', projnet='proj3', dhcp_server='10.0.0.3'), rx_bytes=9, rx_packets=10, rx_drop=28, rx_errors=29, tx_bytes=11, tx_packets=12, tx_drop=30, tx_errors=31) vnics = [ self.vnic0, self.vnic1, self.vnic2, ] self.inspector.inspect_vnics = mock.Mock(return_value=vnics) self.INSTANCE_PROPERTIES = {'name': 'display name', 'OS-EXT-SRV-ATTR:instance_name': 'instance-000001', 'OS-EXT-AZ:availability_zone': 'foo-zone', 'reservation_id': 'reservation id', 'id': 'instance id', 'user_id': 'user id', 'tenant_id': 'tenant id', 'architecture': 'x86_64', 'kernel_id': 'kernel id', 'os_type': 'linux', 'ramdisk_id': 'ramdisk id', 'status': 'active', 'ephemeral_gb': 0, 'root_gb': 20, 'disk_gb': 20, 'image': {'id': 1, 'links': [{"rel": "bookmark", 'href': 2}]}, 'hostId': '1234-5678', 'OS-EXT-SRV-ATTR:host': 'host-test', 'flavor': {'disk': 20, 'ram': 512, 'name': 'tiny', 'vcpus': 2, 'ephemeral': 0}, 'metadata': {'metering.autoscale.group': 'X' * 512, 'metering.foobar': 42}} self.faux_instance = FauxInstance(**self.INSTANCE_PROPERTIES) def _check_get_samples(self, factory, expected): mgr = manager.AgentManager(0, self.CONF) pollster = factory(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(3, len(samples)) # one for each nic self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def _verify_vnic_metering(ip, expected_volume, expected_rid): match = [s for s in samples if s.resource_metadata['parameters']['ip'] == ip ] self.assertEqual(len(match), 1, 'missing ip %s' % ip) self.assertEqual(expected_volume, match[0].volume) self.assertEqual('cumulative', match[0].type) self.assertEqual(expected_rid, match[0].resource_id) for ip, volume, rid in expected: _verify_vnic_metering(ip, volume, rid) def test_incoming_bytes(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.IncomingBytesPollster, [('10.0.0.2', 1, self.vnic0.fref), ('192.168.0.3', 5, self.vnic1.fref), ('192.168.0.4', 9, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_outgoing_bytes(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingBytesPollster, [('10.0.0.2', 3, self.vnic0.fref), ('192.168.0.3', 7, self.vnic1.fref), ('192.168.0.4', 11, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_incoming_packets(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.IncomingPacketsPollster, [('10.0.0.2', 2, self.vnic0.fref), ('192.168.0.3', 6, self.vnic1.fref), ('192.168.0.4', 10, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_outgoing_packets(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingPacketsPollster, [('10.0.0.2', 4, self.vnic0.fref), ('192.168.0.3', 8, self.vnic1.fref), ('192.168.0.4', 12, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_incoming_drops(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.IncomingDropPollster, [('10.0.0.2', 20, self.vnic0.fref), ('192.168.0.3', 24, self.vnic1.fref), ('192.168.0.4', 28, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_outgoing_drops(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingDropPollster, [('10.0.0.2', 22, self.vnic0.fref), ('192.168.0.3', 26, self.vnic1.fref), ('192.168.0.4', 30, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_incoming_errors(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.IncomingErrorsPollster, [('10.0.0.2', 21, self.vnic0.fref), ('192.168.0.3', 25, self.vnic1.fref), ('192.168.0.4', 29, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_outgoing_errors(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingErrorsPollster, [('10.0.0.2', 23, self.vnic0.fref), ('192.168.0.3', 27, self.vnic1.fref), ('192.168.0.4', 31, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_metadata(self): factory = net.OutgoingBytesPollster pollster = factory(self.CONF) mgr = manager.AgentManager(0, self.CONF) pollster = factory(self.CONF) s = list(pollster.get_samples(mgr, {}, [self.faux_instance]))[0] user_metadata = s.resource_metadata['user_metadata'] expected = self.INSTANCE_PROPERTIES[ 'metadata']['metering.autoscale.group'][:256] self.assertEqual(expected, user_metadata['autoscale_group']) self.assertEqual(2, len(user_metadata)) class TestNetRatesPollster(base.TestPollsterBase): def setUp(self): super(TestNetRatesPollster, self).setUp() self.vnic0 = virt_inspector.InterfaceRateStats( name='vnet0', fref='fa163e71ec6e', mac='fa:16:3e:71:ec:6d', parameters=dict(ip='10.0.0.2', projmask='255.255.255.0', projnet='proj1', dhcp_server='10.0.0.1'), rx_bytes_rate=1, tx_bytes_rate=2) self.vnic1 = virt_inspector.InterfaceRateStats( name='vnet1', fref='fa163e71ec6f', mac='fa:16:3e:71:ec:6e', parameters=dict(ip='192.168.0.3', projmask='255.255.255.0', projnet='proj2', dhcp_server='10.0.0.2'), rx_bytes_rate=3, tx_bytes_rate=4) self.vnic2 = virt_inspector.InterfaceRateStats( name='vnet2', fref=None, mac='fa:18:4e:72:fc:7e', parameters=dict(ip='192.168.0.4', projmask='255.255.255.0', projnet='proj3', dhcp_server='10.0.0.3'), rx_bytes_rate=5, tx_bytes_rate=6) vnics = [ self.vnic0, self.vnic1, self.vnic2, ] self.inspector.inspect_vnic_rates = mock.Mock(return_value=vnics) def _check_get_samples(self, factory, expected): mgr = manager.AgentManager(0, self.CONF) pollster = factory(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(3, len(samples)) # one for each nic self.assertEqual(set([samples[0].name]), set([s.name for s in samples])) def _verify_vnic_metering(ip, expected_volume, expected_rid): match = [s for s in samples if s.resource_metadata['parameters']['ip'] == ip ] self.assertEqual(1, len(match), 'missing ip %s' % ip) self.assertEqual(expected_volume, match[0].volume) self.assertEqual('gauge', match[0].type) self.assertEqual(expected_rid, match[0].resource_id) for ip, volume, rid in expected: _verify_vnic_metering(ip, volume, rid) def test_incoming_bytes_rate(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.IncomingBytesRatePollster, [('10.0.0.2', 1, self.vnic0.fref), ('192.168.0.3', 3, self.vnic1.fref), ('192.168.0.4', 5, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) def test_outgoing_bytes_rate(self): instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingBytesRatePollster, [('10.0.0.2', 2, self.vnic0.fref), ('192.168.0.3', 4, self.vnic1.fref), ('192.168.0.4', 6, "%s-%s" % (instance_name_id, self.vnic2.name)), ], ) ceilometer-10.0.0/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py0000666000175100017510000001201313236733243031006 0ustar zuulzuul00000000000000# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the compute pollsters. """ from oslotest import base import six from ceilometer.compute.pollsters import util from ceilometer.polling import manager from ceilometer import service class FauxInstance(object): def __init__(self, **kwds): for name, value in kwds.items(): setattr(self, name, value) def __getitem__(self, key): return getattr(self, key) def get(self, key, default): try: return getattr(self, key) except AttributeError: return default class TestLocationMetadata(base.BaseTestCase): def setUp(self): self.CONF = service.prepare_service([], []) self.manager = manager.AgentManager(0, self.CONF) super(TestLocationMetadata, self).setUp() # Mimics an instance returned from nova api call self.INSTANCE_PROPERTIES = {'name': 'display name', 'id': ('234cbe81-4e09-4f64-9b2a-' '714f6b9046e3'), 'OS-EXT-SRV-ATTR:instance_name': 'instance-000001', 'OS-EXT-AZ:availability_zone': 'foo-zone', 'reservation_id': 'reservation id', 'architecture': 'x86_64', 'kernel_id': 'kernel id', 'os_type': 'linux', 'ramdisk_id': 'ramdisk id', 'status': 'active', 'ephemeral_gb': 0, 'root_gb': 20, 'disk_gb': 20, 'image': {'id': 1, 'links': [{"rel": "bookmark", 'href': 2}]}, 'hostId': '1234-5678', 'OS-EXT-SRV-ATTR:host': 'host-test', 'flavor': {'name': 'm1.tiny', 'id': 1, 'disk': 20, 'ram': 512, 'vcpus': 2, 'ephemeral': 0}, 'metadata': {'metering.autoscale.group': 'X' * 512, 'metering.ephemeral_gb': 42}} self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) def test_metadata(self): md = util._get_metadata_from_object(self.CONF, self.instance) for prop, value in six.iteritems(self.INSTANCE_PROPERTIES): if prop not in ("metadata"): # Special cases if prop == 'name': prop = 'display_name' elif prop == 'hostId': prop = "host" elif prop == 'OS-EXT-SRV-ATTR:host': prop = "instance_host" elif prop == 'OS-EXT-SRV-ATTR:instance_name': prop = 'name' elif prop == "id": prop = "instance_id" self.assertEqual(value, md[prop]) user_metadata = md['user_metadata'] expected = self.INSTANCE_PROPERTIES[ 'metadata']['metering.autoscale.group'][:256] self.assertEqual(expected, user_metadata['autoscale_group']) self.assertEqual(1, len(user_metadata)) def test_metadata_empty_image(self): self.INSTANCE_PROPERTIES['image'] = None self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) md = util._get_metadata_from_object(self.CONF, self.instance) self.assertIsNone(md['image']) self.assertIsNone(md['image_ref']) self.assertIsNone(md['image_ref_url']) def test_metadata_image_through_conductor(self): # There should be no links here, should default to None self.INSTANCE_PROPERTIES['image'] = {'id': 1} self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) md = util._get_metadata_from_object(self.CONF, self.instance) self.assertEqual(1, md['image_ref']) self.assertIsNone(md['image_ref_url']) ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/0000775000175100017510000000000013236733440022662 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/xenapi/0000775000175100017510000000000013236733440024146 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/xenapi/__init__.py0000666000175100017510000000000013236733243026250 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py0000666000175100017510000001523113236733243027572 0ustar zuulzuul00000000000000# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for xenapi inspector. """ import mock from oslotest import base from ceilometer.compute.virt.xenapi import inspector as xenapi_inspector from ceilometer import service class TestXenapiInspection(base.BaseTestCase): def setUp(self): super(TestXenapiInspection, self).setUp() conf = service.prepare_service([], []) api_session = mock.Mock() xenapi_inspector.get_api_session = mock.Mock(return_value=api_session) self.inspector = xenapi_inspector.XenapiInspector(conf) def test_inspect_instance(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} fake_total_mem = 134217728.0 fake_free_mem = 65536.0 session = self.inspector.session with mock.patch.object(session.VM, 'get_by_name_label') as mock_name, \ mock.patch.object(session.VM, 'get_VCPUs_max') as mock_vcpu, \ mock.patch.object(session.VM, 'query_data_source') \ as mock_query: mock_name.return_value = ['vm_ref'] mock_vcpu.return_value = '1' mock_query.side_effect = [0.4, fake_total_mem, fake_free_mem] stats = self.inspector.inspect_instance(fake_instance, None) self.assertEqual(40, stats.cpu_util) self.assertEqual(64, stats.memory_usage) def test_inspect_memory_usage_without_freeMem(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} fake_total_mem = 134217728.0 fake_free_mem = 0 session = self.inspector.session with mock.patch.object(session.VM, 'get_by_name_label') as mock_name, \ mock.patch.object(session.VM, 'get_VCPUs_max') as mock_vcpu, \ mock.patch.object(session.VM, 'query_data_source') \ as mock_query: mock_name.return_value = ['vm_ref'] mock_vcpu.return_value = '1' mock_query.side_effect = [0.4, fake_total_mem, fake_free_mem] stats = self.inspector.inspect_instance(fake_instance, None) self.assertEqual(128, stats.memory_usage) def test_inspect_vnics(self): fake_instance = { 'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} vif_rec = { 'uuid': 'vif_uuid', 'MAC': 'vif_mac', 'device': '0', } bandwidth_returns = [{ '10': { '0': { 'bw_in': 1024, 'bw_out': 2048 } } }] session = self.inspector.session with mock.patch.object(session.VM, 'get_by_name_label') as mock_name, \ mock.patch.object(session.VM, 'get_domid') as mock_domid, \ mock.patch.object(session.VM, 'get_VIFs') as mock_vif, \ mock.patch.object(session.VIF, 'get_record') as mock_record, \ mock.patch.object(session, 'call_plugin_serialized') \ as mock_plugin: mock_name.return_value = ['vm_ref'] mock_domid.return_value = '10' mock_vif.return_value = ['vif_ref'] mock_record.return_value = vif_rec mock_plugin.side_effect = bandwidth_returns interfaces = list(self.inspector.inspect_vnics( fake_instance, None)) self.assertEqual(1, len(interfaces)) vnic0 = interfaces[0] self.assertEqual('vif_uuid', vnic0.name) self.assertEqual('vif_mac', vnic0.mac) self.assertEqual(1024, vnic0.rx_bytes) self.assertEqual(2048, vnic0.tx_bytes) def test_inspect_vnic_rates(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} vif_rec = { 'metrics': 'vif_metrics_ref', 'uuid': 'vif_uuid', 'MAC': 'vif_mac', 'device': '0', } session = self.inspector.session with mock.patch.object(session.VM, 'get_by_name_label') as mock_name, \ mock.patch.object(session.VM, 'get_VIFs') as mock_vif, \ mock.patch.object(session.VIF, 'get_record') as mock_record, \ mock.patch.object(session.VM, 'query_data_source') \ as mock_query: mock_name.return_value = ['vm_ref'] mock_vif.return_value = ['vif_ref'] mock_record.return_value = vif_rec mock_query.side_effect = [1024.0, 2048.0] interfaces = list(self.inspector.inspect_vnic_rates( fake_instance, None)) self.assertEqual(1, len(interfaces)) vnic0 = interfaces[0] self.assertEqual('vif_uuid', vnic0.name) self.assertEqual('vif_mac', vnic0.mac) self.assertEqual(1024.0, vnic0.rx_bytes_rate) self.assertEqual(2048.0, vnic0.tx_bytes_rate) def test_inspect_disk_rates(self): fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', 'id': 'fake_instance_id'} vbd_rec = { 'device': 'xvdd' } session = self.inspector.session with mock.patch.object(session.VM, 'get_by_name_label') as mock_name, \ mock.patch.object(session.VM, 'get_VBDs') as mock_vbds, \ mock.patch.object(session.VBD, 'get_record') as mock_records, \ mock.patch.object(session.VM, 'query_data_source') \ as mock_query: mock_name.return_value = ['vm_ref'] mock_vbds.return_value = ['vbd_ref'] mock_records.return_value = vbd_rec mock_query.side_effect = [1024.0, 2048.0] disks = list(self.inspector.inspect_disk_rates( fake_instance, None)) self.assertEqual(1, len(disks)) disk0 = disks[0] self.assertEqual('xvdd', disk0.device) self.assertEqual(1024.0, disk0.read_bytes_rate) self.assertEqual(2048.0, disk0.write_bytes_rate) ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/__init__.py0000666000175100017510000000000013236733243024764 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/vmware/0000775000175100017510000000000013236733440024163 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py0000666000175100017510000001631113236733243031520 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_vmware import api from oslotest import base from ceilometer.compute.virt.vmware import vsphere_operations class VsphereOperationsTest(base.BaseTestCase): def setUp(self): api_session = api.VMwareAPISession("test_server", "test_user", "test_password", 0, None, create_session=False) api_session._vim = mock.MagicMock() self._vsphere_ops = vsphere_operations.VsphereOperations(api_session, 1000) super(VsphereOperationsTest, self).setUp() def test_get_vm_object(self): vm1_moid = "vm-1" vm2_moid = "vm-2" vm1_instance = "0a651a71-142c-4813-aaa6-42e5d5c80d85" vm2_instance = "db1d2533-6bef-4cb2-aef3-920e109f5693" def construct_mock_vm_object(vm_moid, vm_instance): vm_object = mock.MagicMock() vm_object.obj.value = vm_moid vm_object.obj._type = "VirtualMachine" vm_object.propSet[0].val = vm_instance return vm_object def retrieve_props_side_effect(pc, specSet, options, skip_op_id=False): # assert inputs self.assertEqual(self._vsphere_ops._max_objects, options.maxObjects) self.assertEqual(vsphere_operations.VM_INSTANCE_ID_PROPERTY, specSet[0].pathSet[0]) # mock return result vm1 = construct_mock_vm_object(vm1_moid, vm1_instance) vm2 = construct_mock_vm_object(vm2_moid, vm2_instance) result = mock.MagicMock() result.objects.__iter__.return_value = [vm1, vm2] return result vim_mock = self._vsphere_ops._api_session._vim vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect vim_mock.ContinueRetrievePropertiesEx.return_value = None vm_object = self._vsphere_ops.get_vm_mobj(vm1_instance) self.assertEqual(vm1_moid, vm_object.value) self.assertEqual("VirtualMachine", vm_object._type) vm_object = self._vsphere_ops.get_vm_mobj(vm2_instance) self.assertEqual(vm2_moid, vm_object.value) self.assertEqual("VirtualMachine", vm_object._type) def test_query_vm_property(self): vm_object = mock.MagicMock() vm_object.value = "vm-21" vm_property_name = "runtime.powerState" vm_property_val = "poweredON" def retrieve_props_side_effect(pc, specSet, options, skip_op_id=False): # assert inputs self.assertEqual(vm_object.value, specSet[0].obj.value) self.assertEqual(vm_property_name, specSet[0].pathSet[0]) # mock return result result = mock.MagicMock() result.objects[0].propSet[0].val = vm_property_val return result vim_mock = self._vsphere_ops._api_session._vim vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect actual_val = self._vsphere_ops.query_vm_property(vm_object, vm_property_name) self.assertEqual(vm_property_val, actual_val) def test_get_perf_counter_id(self): def construct_mock_counter_info(group_name, counter_name, rollup_type, counter_id): counter_info = mock.MagicMock() counter_info.groupInfo.key = group_name counter_info.nameInfo.key = counter_name counter_info.rollupType = rollup_type counter_info.key = counter_id return counter_info def retrieve_props_side_effect(pc, specSet, options, skip_op_id=False): # assert inputs self.assertEqual(vsphere_operations.PERF_COUNTER_PROPERTY, specSet[0].pathSet[0]) # mock return result counter_info1 = construct_mock_counter_info("a", "b", "c", 1) counter_info2 = construct_mock_counter_info("x", "y", "z", 2) result = mock.MagicMock() (result.objects[0].propSet[0].val.PerfCounterInfo.__iter__. return_value) = [counter_info1, counter_info2] return result vim_mock = self._vsphere_ops._api_session._vim vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect counter_id = self._vsphere_ops.get_perf_counter_id("a:b:c") self.assertEqual(1, counter_id) counter_id = self._vsphere_ops.get_perf_counter_id("x:y:z") self.assertEqual(2, counter_id) def test_query_vm_stats(self): vm_object = mock.MagicMock() vm_object.value = "vm-21" device1 = "device-1" device2 = "device-2" device3 = "device-3" counter_id = 5 def construct_mock_metric_series(device_name, stat_values): metric_series = mock.MagicMock() metric_series.value = stat_values metric_series.id.instance = device_name return metric_series def vim_query_perf_side_effect(perf_manager, querySpec): # assert inputs self.assertEqual(vm_object.value, querySpec[0].entity.value) self.assertEqual(counter_id, querySpec[0].metricId[0].counterId) self.assertEqual(vsphere_operations.VC_REAL_TIME_SAMPLING_INTERVAL, querySpec[0].intervalId) # mock return result perf_stats = mock.MagicMock() perf_stats[0].sampleInfo = ["s1", "s2", "s3"] perf_stats[0].value.__iter__.return_value = [ construct_mock_metric_series(None, [111, 222, 333]), construct_mock_metric_series(device1, [100, 200, 300]), construct_mock_metric_series(device2, [10, 20, 30]), construct_mock_metric_series(device3, [1, 2, 3]) ] return perf_stats vim_mock = self._vsphere_ops._api_session._vim vim_mock.QueryPerf.side_effect = vim_query_perf_side_effect ops = self._vsphere_ops # test aggregate stat stat_val = ops.query_vm_aggregate_stats(vm_object, counter_id, 60) self.assertEqual(222, stat_val) # test per-device(non-aggregate) stats expected_device_stats = { device1: 200, device2: 20, device3: 2 } stats = ops.query_vm_device_stats(vm_object, counter_id, 60) self.assertEqual(expected_device_stats, stats) ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/vmware/__init__.py0000666000175100017510000000000013236733243026265 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py0000666000175100017510000001736513236733243027621 0ustar zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for VMware vSphere inspector. """ import mock from oslo_vmware import api from oslotest import base from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.vmware import inspector as vsphere_inspector from ceilometer import service class TestVsphereInspection(base.BaseTestCase): def setUp(self): super(TestVsphereInspection, self).setUp() conf = service.prepare_service([], []) api_session = api.VMwareAPISession("test_server", "test_user", "test_password", 0, None, create_session=False, port=7443) vsphere_inspector.get_api_session = mock.Mock( return_value=api_session) self._inspector = vsphere_inspector.VsphereInspector(conf) self._inspector._ops = mock.MagicMock() def test_instance_notFound(self): test_vm_mobj = mock.MagicMock() test_vm_mobj = None ops_mock = self._inspector._ops ops_mock.get_vm_mobj.return_value = test_vm_mobj self.assertRaises(virt_inspector.InstanceNotFoundException, self._inspector._get_vm_mobj_not_power_off_or_raise, mock.MagicMock()) def test_instance_poweredOff(self): test_vm_mobj = mock.MagicMock() test_vm_mobj.value = "vm-21" test_vm_mobj_powerState = "poweredOff" ops_mock = self._inspector._ops ops_mock.get_vm_mobj.return_value = test_vm_mobj ops_mock.query_vm_property.return_value = test_vm_mobj_powerState self.assertRaises(virt_inspector.InstanceShutOffException, self._inspector._get_vm_mobj_not_power_off_or_raise, mock.MagicMock()) def test_instance_poweredOn(self): test_vm_mobj = mock.MagicMock() test_vm_mobj.value = "vm-21" test_vm_mobj_powerState = "poweredOn" ops_mock = self._inspector._ops ops_mock.get_vm_mobj.return_value = test_vm_mobj ops_mock.query_vm_property.return_value = test_vm_mobj_powerState vm_mobj = self._inspector._get_vm_mobj_not_power_off_or_raise( mock.MagicMock()) self.assertEqual(test_vm_mobj.value, vm_mobj.value) def test_inspect_memory_usage(self): test_vm_mobj = mock.MagicMock() test_vm_mobj.value = "vm-21" fake_perf_counter_id = 'fake_perf_counter_id' fake_memory_value = 1024.0 self._inspector._get_vm_mobj_not_power_off_or_raise = mock.MagicMock() self._inspector._get_vm_mobj_not_power_off_or_raise.return_value = ( test_vm_mobj) ops_mock = self._inspector._ops ops_mock.get_perf_counter_id.return_value = fake_perf_counter_id ops_mock.query_vm_aggregate_stats.return_value = fake_memory_value stats = self._inspector.inspect_instance(mock.MagicMock(), None) self.assertEqual(1.0, stats.memory_usage) def test_inspect_cpu_util(self): test_vm_mobj = mock.MagicMock() test_vm_mobj.value = "vm-21" fake_perf_counter_id = 'fake_perf_counter_id' fake_cpu_util_value = 60.0 self._inspector._get_vm_mobj_not_power_off_or_raise = mock.MagicMock() self._inspector._get_vm_mobj_not_power_off_or_raise.return_value = ( test_vm_mobj) ops_mock = self._inspector._ops ops_mock.get_perf_counter_id.return_value = fake_perf_counter_id (ops_mock.query_vm_aggregate_stats. return_value) = fake_cpu_util_value * 100 stats = self._inspector.inspect_instance(mock.MagicMock(), None) self.assertEqual(60.0, stats.cpu_util) def test_inspect_vnic_rates(self): # construct test data test_vm_mobj = mock.MagicMock() test_vm_mobj.value = "vm-21" vnic1 = "vnic-1" vnic2 = "vnic-2" counter_name_to_id_map = { vsphere_inspector.VC_NETWORK_RX_COUNTER: 1, vsphere_inspector.VC_NETWORK_TX_COUNTER: 2 } counter_id_to_stats_map = { 1: {vnic1: 1, vnic2: 3}, 2: {vnic1: 2, vnic2: 4}, } def get_counter_id_side_effect(counter_full_name): return counter_name_to_id_map[counter_full_name] def query_stat_side_effect(vm_mobj, counter_id, duration): # assert inputs self.assertEqual(test_vm_mobj.value, vm_mobj.value) self.assertIn(counter_id, counter_id_to_stats_map) return counter_id_to_stats_map[counter_id] self._inspector._get_vm_mobj_not_power_off_or_raise = mock.MagicMock() self._inspector._get_vm_mobj_not_power_off_or_raise.return_value = ( test_vm_mobj) # configure vsphere operations mock with the test data ops_mock = self._inspector._ops ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect result = list(self._inspector.inspect_vnic_rates( mock.MagicMock(), None)) self.assertEqual(1024.0, result[0].rx_bytes_rate) self.assertEqual(2048.0, result[0].tx_bytes_rate) self.assertEqual(3072.0, result[1].rx_bytes_rate) self.assertEqual(4096.0, result[1].tx_bytes_rate) def test_inspect_disk_rates(self): # construct test data test_vm_mobj = mock.MagicMock() test_vm_mobj.value = "vm-21" disk1 = "disk-1" disk2 = "disk-2" counter_name_to_id_map = { vsphere_inspector.VC_DISK_READ_RATE_CNTR: 1, vsphere_inspector.VC_DISK_READ_REQUESTS_RATE_CNTR: 2, vsphere_inspector.VC_DISK_WRITE_RATE_CNTR: 3, vsphere_inspector.VC_DISK_WRITE_REQUESTS_RATE_CNTR: 4 } counter_id_to_stats_map = { 1: {disk1: 1, disk2: 2}, 2: {disk1: 300, disk2: 400}, 3: {disk1: 5, disk2: 6}, 4: {disk1: 700}, } def get_counter_id_side_effect(counter_full_name): return counter_name_to_id_map[counter_full_name] def query_stat_side_effect(vm_mobj, counter_id, duration): # assert inputs self.assertEqual(test_vm_mobj.value, vm_mobj.value) self.assertIn(counter_id, counter_id_to_stats_map) return counter_id_to_stats_map[counter_id] self._inspector._get_vm_mobj_not_power_off_or_raise = mock.MagicMock() self._inspector._get_vm_mobj_not_power_off_or_raise.return_value = ( test_vm_mobj) # configure vsphere operations mock with the test data ops_mock = self._inspector._ops ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect result = self._inspector.inspect_disk_rates(mock.MagicMock(), None) # validate result expected_stats = { disk1: virt_inspector.DiskRateStats(disk1, 1024, 300, 5120, 700), disk2: virt_inspector.DiskRateStats(disk2, 2048, 400, 6144, 0) } actual_stats = dict((stats.device, stats) for stats in result) self.assertEqual(expected_stats, actual_stats) ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/libvirt/0000775000175100017510000000000013236733440024335 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/libvirt/__init__.py0000666000175100017510000000000013236733243026437 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py0000777000175100017510000005542713236733243027777 0ustar zuulzuul00000000000000# Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for libvirt inspector. """ import fixtures import mock from oslo_utils import units from oslotest import base from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.libvirt import inspector as libvirt_inspector from ceilometer.compute.virt.libvirt import utils from ceilometer import service class FakeLibvirtError(Exception): pass class VMInstance(object): id = 'ff58e738-12f4-4c58-acde-77617b68da56' name = 'instance-00000001' class TestLibvirtInspection(base.BaseTestCase): def setUp(self): super(TestLibvirtInspection, self).setUp() conf = service.prepare_service([], []) self.instance = VMInstance() libvirt_inspector.libvirt = mock.Mock() libvirt_inspector.libvirt.getVersion.return_value = 5001001 libvirt_inspector.libvirt.VIR_DOMAIN_SHUTOFF = 5 libvirt_inspector.libvirt.libvirtError = FakeLibvirtError utils.libvirt = libvirt_inspector.libvirt with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=None): self.inspector = libvirt_inspector.LibvirtInspector(conf) def test_inspect_instance_stats(self): domain = mock.Mock() domain.info.return_value = (0, 0, 0, 2, 999999) domain.memoryStats.return_value = {'available': 51200, 'unused': 25600, 'rss': 30000, 'swap_in': 5120, 'swap_out': 8192} conn = mock.Mock() conn.lookupByUUIDString.return_value = domain conn.domainListGetStats.return_value = [({}, { 'cpu.time': 999999, 'vcpu.maximum': 4, 'vcpu.current': 2, 'vcpu.0.time': 10000, 'vcpu.0.wait': 10000, 'vcpu.2.time': 10000, 'vcpu.2.wait': 10000, 'perf.cmt': 90112, 'perf.cpu_cycles': 7259361, 'perf.instructions': 8815623, 'perf.cache_references': 74184, 'perf.cache_misses': 16737, 'perf.mbmt': 1892352, 'perf.mbml': 1802240})] with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): stats = self.inspector.inspect_instance(self.instance, None) self.assertEqual(2, stats.cpu_number) self.assertEqual(40000, stats.cpu_time) self.assertEqual(90112, stats.cpu_l3_cache_usage) self.assertEqual(25600 / units.Ki, stats.memory_usage) self.assertEqual(30000 / units.Ki, stats.memory_resident) self.assertEqual(5120 / units.Ki, stats.memory_swap_in) self.assertEqual(8192 / units.Ki, stats.memory_swap_out) self.assertEqual(1892352, stats.memory_bandwidth_total) self.assertEqual(1802240, stats.memory_bandwidth_local) self.assertEqual(7259361, stats.cpu_cycles) self.assertEqual(8815623, stats.instructions) self.assertEqual(74184, stats.cache_references) self.assertEqual(16737, stats.cache_misses) def test_inspect_instance_stats_fallback_cpu_time(self): domain = mock.Mock() domain.info.return_value = (0, 0, 0, 2, 20000) domain.memoryStats.return_value = {'available': 51200, 'unused': 25600, 'rss': 30000} conn = mock.Mock() conn.lookupByUUIDString.return_value = domain conn.domainListGetStats.return_value = [({}, { 'vcpu.current': 2, 'vcpu.maximum': 4, 'vcpu.0.time': 10000, 'vcpu.1.time': 10000, 'cpu.time': 999999})] with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): stats = self.inspector.inspect_instance(self.instance) self.assertEqual(2, stats.cpu_number) self.assertEqual(999999, stats.cpu_time) def test_inspect_cpus_with_domain_shutoff(self): domain = mock.Mock() domain.info.return_value = (5, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): self.assertRaises(virt_inspector.InstanceShutOffException, self.inspector.inspect_instance, self.instance, None) def test_inspect_vnics(self): dom_xml = """
""" interface_stats = { 'vnet0': (1, 2, 21, 22, 3, 4, 23, 24), 'vnet1': (5, 6, 25, 26, 7, 8, 27, 28), 'vnet2': (9, 10, 29, 30, 11, 12, 31, 32), } interfaceStats = interface_stats.__getitem__ domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.info.return_value = (0, 0, 0, 2, 999999) domain.interfaceStats.side_effect = interfaceStats conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): interfaces = list(self.inspector.inspect_vnics( self.instance, None)) self.assertEqual(3, len(interfaces)) vnic0 = interfaces[0] self.assertEqual('vnet0', vnic0.name) self.assertEqual('fa:16:3e:71:ec:6d', vnic0.mac) self.assertEqual('nova-instance-00000001-fa163e71ec6d', vnic0.fref) self.assertEqual('255.255.255.0', vnic0.parameters.get('projmask')) self.assertEqual('10.0.0.2', vnic0.parameters.get('ip')) self.assertEqual('10.0.0.0', vnic0.parameters.get('projnet')) self.assertEqual('10.0.0.1', vnic0.parameters.get('dhcpserver')) self.assertEqual(1, vnic0.rx_bytes) self.assertEqual(2, vnic0.rx_packets) self.assertEqual(3, vnic0.tx_bytes) self.assertEqual(4, vnic0.tx_packets) self.assertEqual(21, vnic0.rx_errors) self.assertEqual(22, vnic0.rx_drop) self.assertEqual(23, vnic0.tx_errors) self.assertEqual(24, vnic0.tx_drop) vnic1 = interfaces[1] self.assertEqual('vnet1', vnic1.name) self.assertEqual('fa:16:3e:71:ec:6e', vnic1.mac) self.assertEqual('nova-instance-00000001-fa163e71ec6e', vnic1.fref) self.assertEqual('255.255.255.0', vnic1.parameters.get('projmask')) self.assertEqual('192.168.0.2', vnic1.parameters.get('ip')) self.assertEqual('192.168.0.0', vnic1.parameters.get('projnet')) self.assertEqual('192.168.0.1', vnic1.parameters.get('dhcpserver')) self.assertEqual(5, vnic1.rx_bytes) self.assertEqual(6, vnic1.rx_packets) self.assertEqual(7, vnic1.tx_bytes) self.assertEqual(8, vnic1.tx_packets) self.assertEqual(25, vnic1.rx_errors) self.assertEqual(26, vnic1.rx_drop) self.assertEqual(27, vnic1.tx_errors) self.assertEqual(28, vnic1.tx_drop) vnic2 = interfaces[2] self.assertEqual('vnet2', vnic2.name) self.assertEqual('fa:16:3e:96:33:f0', vnic2.mac) self.assertIsNone(vnic2.fref) self.assertEqual(dict(), vnic2.parameters) self.assertEqual(9, vnic2.rx_bytes) self.assertEqual(10, vnic2.rx_packets) self.assertEqual(11, vnic2.tx_bytes) self.assertEqual(12, vnic2.tx_packets) self.assertEqual(29, vnic2.rx_errors) self.assertEqual(30, vnic2.rx_drop) self.assertEqual(31, vnic2.tx_errors) self.assertEqual(32, vnic2.tx_drop) def test_inspect_vnics_with_domain_shutoff(self): domain = mock.Mock() domain.info.return_value = (5, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): inspect = self.inspector.inspect_vnics self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance, None)) def test_inspect_disks(self): dom_xml = """
""" blockStatsFlags = {'wr_total_times': 91752302267, 'rd_operations': 6756, 'flush_total_times': 1310427331, 'rd_total_times': 29142253616, 'rd_bytes': 171460096, 'flush_operations': 746, 'wr_operations': 1437, 'wr_bytes': 13574656} domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.info.return_value = (0, 0, 0, 2, 999999) domain.blockStats.return_value = (1, 2, 3, 4, -1) domain.blockStatsFlags.return_value = blockStatsFlags conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disks(self.instance, None)) self.assertEqual(1, len(disks)) self.assertEqual('vda', disks[0].device) self.assertEqual(1, disks[0].read_requests) self.assertEqual(2, disks[0].read_bytes) self.assertEqual(3, disks[0].write_requests) self.assertEqual(4, disks[0].write_bytes) self.assertEqual(91752302267, disks[0].wr_total_times) self.assertEqual(29142253616, disks[0].rd_total_times) def test_inspect_disks_with_domain_shutoff(self): domain = mock.Mock() domain.info.return_value = (5, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): inspect = self.inspector.inspect_disks self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance, None)) def test_inspect_disk_info(self): dom_xml = """
""" domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.blockInfo.return_value = (1, 2, 3, -1) domain.info.return_value = (0, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disk_info( self.instance, None)) self.assertEqual(1, len(disks)) self.assertEqual('vda', disks[0].device) self.assertEqual(1, disks[0].capacity) self.assertEqual(2, disks[0].allocation) self.assertEqual(3, disks[0].physical) def test_inspect_disk_info_network_type(self): dom_xml = """
""" domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.blockInfo.return_value = (1, 2, 3, -1) domain.info.return_value = (0, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disk_info(self.instance, None)) self.assertEqual(1, len(disks)) def test_inspect_disk_info_without_source_element(self): dom_xml = """
""" domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.blockInfo.return_value = (1, 2, 3, -1) domain.info.return_value = (0, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disk_info(self.instance, None)) self.assertEqual(0, len(disks)) def test_inspect_disks_without_source_element(self): dom_xml = """
""" blockStatsFlags = {'wr_total_times': 91752302267, 'rd_operations': 6756, 'flush_total_times': 1310427331, 'rd_total_times': 29142253616, 'rd_bytes': 171460096, 'flush_operations': 746, 'wr_operations': 1437, 'wr_bytes': 13574656} domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.info.return_value = (0, 0, 0, 2, 999999) domain.blockStats.return_value = (1, 2, 3, 4, -1) domain.blockStatsFlags.return_value = blockStatsFlags conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disks(self.instance, None)) self.assertEqual(0, len(disks)) def test_inspect_memory_usage_with_domain_shutoff(self): domain = mock.Mock() domain.info.return_value = (5, 0, 51200, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): self.assertRaises(virt_inspector.InstanceShutOffException, self.inspector.inspect_instance, self.instance, None) def test_inspect_memory_with_empty_stats(self): domain = mock.Mock() domain.info.return_value = (0, 0, 51200, 2, 999999) domain.memoryStats.return_value = {} conn = mock.Mock() conn.domainListGetStats.return_value = [({}, {})] conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): stats = self.inspector.inspect_instance(self.instance, None) self.assertIsNone(stats.memory_usage) self.assertIsNone(stats.memory_resident) self.assertIsNone(stats.memory_swap_in) self.assertIsNone(stats.memory_swap_out) def test_inspect_perf_events_libvirt_less_than_2_3_0(self): domain = mock.Mock() domain.info.return_value = (0, 0, 51200, 2, 999999) domain.memoryStats.return_value = {'rss': 0, 'available': 51200, 'unused': 25600} conn = mock.Mock() conn.domainListGetStats.return_value = [({}, {})] conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): stats = self.inspector.inspect_instance(self.instance, None) self.assertIsNone(stats.cpu_l3_cache_usage) self.assertIsNone(stats.memory_bandwidth_total) self.assertIsNone(stats.memory_bandwidth_local) self.assertIsNone(stats.cpu_cycles) self.assertIsNone(stats.instructions) self.assertIsNone(stats.cache_references) self.assertIsNone(stats.cache_misses) class TestLibvirtInspectionWithError(base.BaseTestCase): def setUp(self): super(TestLibvirtInspectionWithError, self).setUp() conf = service.prepare_service([], []) self.useFixture(fixtures.MonkeyPatch( 'ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', mock.MagicMock(side_effect=[None, Exception('dummy')]))) libvirt_inspector.libvirt = mock.Mock() libvirt_inspector.libvirt.libvirtError = FakeLibvirtError utils.libvirt = libvirt_inspector.libvirt self.inspector = libvirt_inspector.LibvirtInspector(conf) def test_inspect_unknown_error(self): self.assertRaises(virt_inspector.InspectorException, self.inspector.inspect_instance, 'foo', None) ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/hyperv/0000775000175100017510000000000013236733440024177 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/hyperv/__init__.py0000666000175100017510000000000013236733243026301 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py0000666000175100017510000001672013236733243027627 0ustar zuulzuul00000000000000# Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Hyper-V inspector. """ import sys import mock from os_win import exceptions as os_win_exc from oslo_utils import units from oslotest import base from ceilometer.compute.virt.hyperv import inspector as hyperv_inspector from ceilometer.compute.virt import inspector as virt_inspector from ceilometer import service class TestHyperVInspection(base.BaseTestCase): @mock.patch.object(hyperv_inspector, 'utilsfactory', mock.MagicMock()) @mock.patch.object(hyperv_inspector.HyperVInspector, '_compute_host_max_cpu_clock') def setUp(self, mock_compute_host_cpu_clock): conf = service.prepare_service([], []) self._inspector = hyperv_inspector.HyperVInspector(conf) self._inspector._utils = mock.MagicMock() super(TestHyperVInspection, self).setUp() def test_converted_exception(self): self._inspector._utils.get_cpu_metrics.side_effect = ( os_win_exc.OSWinException) self.assertRaises(virt_inspector.InspectorException, self._inspector.inspect_instance, mock.sentinel.instance, None) self._inspector._utils.get_cpu_metrics.side_effect = ( os_win_exc.HyperVException) self.assertRaises(virt_inspector.InspectorException, self._inspector.inspect_instance, mock.sentinel.instance, None) self._inspector._utils.get_cpu_metrics.side_effect = ( os_win_exc.NotFound(resource='foofoo')) self.assertRaises(virt_inspector.InstanceNotFoundException, self._inspector.inspect_instance, mock.sentinel.instance, None) def test_assert_original_traceback_maintained(self): def bar(self): foo = "foofoo" raise os_win_exc.NotFound(resource=foo) self._inspector._utils.get_cpu_metrics.side_effect = bar try: self._inspector.inspect_instance(mock.sentinel.instance, None) self.fail("Test expected exception, but it was not raised.") except virt_inspector.InstanceNotFoundException: # exception has been raised as expected. _, _, trace = sys.exc_info() while trace.tb_next: # iterate until the original exception source, bar. trace = trace.tb_next # original frame will contain the 'foo' variable. self.assertEqual('foofoo', trace.tb_frame.f_locals['foo']) @mock.patch.object(hyperv_inspector, 'utilsfactory') def test_compute_host_max_cpu_clock(self, mock_utilsfactory): mock_cpu = {'MaxClockSpeed': 1000} hostutils = mock_utilsfactory.get_hostutils.return_value.get_cpus_info hostutils.return_value = [mock_cpu, mock_cpu] cpu_clock = self._inspector._compute_host_max_cpu_clock() self.assertEqual(2000.0, cpu_clock) def test_inspect_instance(self): fake_instance_name = 'fake_instance_name' fake_cpu_clock_used = 2000 fake_cpu_count = 3000 fake_uptime = 4000 self._inspector._host_max_cpu_clock = 4000.0 fake_cpu_percent_used = (fake_cpu_clock_used / self._inspector._host_max_cpu_clock) fake_cpu_time = (int(fake_uptime * fake_cpu_percent_used) * 1000) self._inspector._utils.get_cpu_metrics.return_value = ( fake_cpu_clock_used, fake_cpu_count, fake_uptime) fake_usage = self._inspector._utils.get_memory_metrics.return_value stats = self._inspector.inspect_instance(fake_instance_name, None) self.assertEqual(fake_cpu_count, stats.cpu_number) self.assertEqual(fake_cpu_time, stats.cpu_time) self.assertEqual(fake_usage, stats.memory_usage) def test_inspect_vnics(self): fake_instance_name = 'fake_instance_name' fake_rx_mb = 1000 fake_tx_mb = 2000 fake_element_name = 'fake_element_name' fake_address = 'fake_address' self._inspector._utils.get_vnic_metrics.return_value = [{ 'rx_mb': fake_rx_mb, 'tx_mb': fake_tx_mb, 'element_name': fake_element_name, 'address': fake_address}] inspected_vnics = list(self._inspector.inspect_vnics( fake_instance_name, None)) self.assertEqual(1, len(inspected_vnics)) inspected_stats = inspected_vnics[0] self.assertEqual(fake_element_name, inspected_stats.name) self.assertEqual(fake_address, inspected_stats.mac) self.assertEqual(fake_rx_mb * units.Mi, inspected_stats.rx_bytes) self.assertEqual(fake_tx_mb * units.Mi, inspected_stats.tx_bytes) def test_inspect_disks(self): fake_instance_name = 'fake_instance_name' fake_read_mb = 1000 fake_write_mb = 2000 fake_instance_id = "fake_fake_instance_id" fake_host_resource = "fake_host_resource" self._inspector._utils.get_disk_metrics.return_value = [{ 'read_mb': fake_read_mb, 'write_mb': fake_write_mb, 'instance_id': fake_instance_id, 'host_resource': fake_host_resource}] inspected_disks = list(self._inspector.inspect_disks( fake_instance_name, None)) self.assertEqual(1, len(inspected_disks)) inspected_stats = inspected_disks[0] self.assertEqual(fake_instance_id, inspected_stats.device) self.assertEqual(fake_read_mb * units.Mi, inspected_stats.read_bytes) self.assertEqual(fake_write_mb * units.Mi, inspected_stats.write_bytes) def test_inspect_disk_latency(self): fake_instance_name = mock.sentinel.INSTANCE_NAME fake_disk_latency = 1000 fake_instance_id = mock.sentinel.INSTANCE_ID self._inspector._utils.get_disk_latency_metrics.return_value = [{ 'disk_latency': fake_disk_latency, 'instance_id': fake_instance_id}] inspected_disks = list(self._inspector.inspect_disk_latency( fake_instance_name, None)) self.assertEqual(1, len(inspected_disks)) inspected_stats = inspected_disks[0] self.assertEqual(fake_instance_id, inspected_stats.device) self.assertEqual(1, inspected_stats.disk_latency) def test_inspect_disk_iops_count(self): fake_instance_name = mock.sentinel.INSTANCE_NAME fake_disk_iops_count = 53 fake_instance_id = mock.sentinel.INSTANCE_ID self._inspector._utils.get_disk_iops_count.return_value = [{ 'iops_count': fake_disk_iops_count, 'instance_id': fake_instance_id}] inspected_disks = list(self._inspector.inspect_disk_iops( fake_instance_name, None)) self.assertEqual(1, len(inspected_disks)) inspected_stats = inspected_disks[0] self.assertEqual(fake_instance_id, inspected_stats.device) self.assertEqual(53, inspected_stats.iops_count) ceilometer-10.0.0/ceilometer/tests/unit/compute/__init__.py0000666000175100017510000000000013236733243024000 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/test_neutronclient.py0000666000175100017510000001727613236733243024544 0ustar zuulzuul00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslotest import base from ceilometer import neutron_client from ceilometer import service class TestNeutronClient(base.BaseTestCase): def setUp(self): super(TestNeutronClient, self).setUp() self.CONF = service.prepare_service([], []) self.nc = neutron_client.Client(self.CONF) self.nc.lb_version = 'v1' @staticmethod def fake_ports_list(): return {'ports': [{'admin_state_up': True, 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'device_owner': 'network:router_gateway', 'extra_dhcp_opts': [], 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', 'mac_address': 'fa:16:3e:c5:35:93', 'name': '', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'status': 'ACTIVE', 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}, ]} def test_port_get_all(self): with mock.patch.object(self.nc.client, 'list_ports', side_effect=self.fake_ports_list): ports = self.nc.port_get_all() self.assertEqual(1, len(ports)) self.assertEqual('96d49cc3-4e01-40ce-9cac-c0e32642a442', ports[0]['id']) @staticmethod def fake_networks_list(): return {'networks': [{'admin_state_up': True, 'id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'name': 'public', 'provider:network_type': 'gre', 'provider:physical_network': None, 'provider:segmentation_id': 2, 'router:external': True, 'shared': False, 'status': 'ACTIVE', 'subnets': [u'c4b6f5b8-3508-4896-b238-a441f25fb492'], 'tenant_id': '62d6f08bbd3a44f6ad6f00ca15cce4e5'}, ]} @staticmethod def fake_pool_list(): return {'pools': [{'status': 'ACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'status_description': None, 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'mylb', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'health_monitors_status': []}, ]} def test_pool_list(self): with mock.patch.object(self.nc.client, 'list_pools', side_effect=self.fake_pool_list): pools = self.nc.pool_get_all() self.assertEqual(1, len(pools)) self.assertEqual('ce73ad36-437d-4c84-aee1-186027d3da9a', pools[0]['id']) @staticmethod def fake_vip_list(): return {'vips': [{'status': 'ACTIVE', 'status_description': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'connection_limit': -1, 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'session_persistence': None, 'address': '10.0.0.2', 'protocol_port': 80, 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', 'name': 'myvip'}, ]} def test_vip_list(self): with mock.patch.object(self.nc.client, 'list_vips', side_effect=self.fake_vip_list): vips = self.nc.vip_get_all() self.assertEqual(1, len(vips)) self.assertEqual('cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', vips[0]['id']) @staticmethod def fake_member_list(): return {'members': [{'status': 'ACTIVE', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', 'address': '10.0.0.3', 'status_description': None, 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, ]} def test_member_list(self): with mock.patch.object(self.nc.client, 'list_members', side_effect=self.fake_member_list): members = self.nc.member_get_all() self.assertEqual(1, len(members)) self.assertEqual('290b61eb-07bc-4372-9fbf-36459dd0f96b', members[0]['id']) @staticmethod def fake_monitors_list(): return {'health_monitors': [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', 'admin_state_up': True, 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", 'delay': 2, 'max_retries': 5, 'timeout': 5, 'pools': [], 'type': 'PING', }]} def test_monitor_list(self): with mock.patch.object(self.nc.client, 'list_health_monitors', side_effect=self.fake_monitors_list): monitors = self.nc.health_monitor_get_all() self.assertEqual(1, len(monitors)) self.assertEqual('34ae33e1-0035-49e2-a2ca-77d5d3fab365', monitors[0]['id']) @staticmethod def fake_pool_stats(fake_pool): return {'stats': [{'active_connections': 1, 'total_connections': 2, 'bytes_in': 3, 'bytes_out': 4 }]} def test_pool_stats(self): with mock.patch.object(self.nc.client, 'retrieve_pool_stats', side_effect=self.fake_pool_stats): stats = self.nc.pool_stats('fake_pool')['stats'] self.assertEqual(1, len(stats)) self.assertEqual(1, stats[0]['active_connections']) self.assertEqual(2, stats[0]['total_connections']) self.assertEqual(3, stats[0]['bytes_in']) self.assertEqual(4, stats[0]['bytes_out']) def test_v1_list_loadbalancer_returns_empty_list(self): self.assertEqual([], self.nc.list_loadbalancer()) def test_v1_list_listener_returns_empty_list(self): self.assertEqual([], self.nc.list_listener()) ceilometer-10.0.0/ceilometer/tests/unit/test_novaclient.py0000666000175100017510000002227513236733243024010 0ustar zuulzuul00000000000000# Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import glanceclient import mock import novaclient from oslotest import base from ceilometer import nova_client from ceilometer import service class TestNovaClient(base.BaseTestCase): def setUp(self): super(TestNovaClient, self).setUp() self.CONF = service.prepare_service([], []) self._flavors_count = 0 self._images_count = 0 self.nv = nova_client.Client(self.CONF) self.useFixture(fixtures.MockPatchObject( self.nv.nova_client.flavors, 'get', side_effect=self.fake_flavors_get)) self.useFixture(fixtures.MockPatchObject( self.nv.glance_client.images, 'get', side_effect=self.fake_images_get)) def fake_flavors_get(self, *args, **kwargs): self._flavors_count += 1 a = mock.MagicMock() a.id = args[0] if a.id == 1: a.name = 'm1.tiny' elif a.id == 2: a.name = 'm1.large' else: raise novaclient.exceptions.NotFound('foobar') return a def fake_images_get(self, *args, **kwargs): self._images_count += 1 a = mock.MagicMock() a.id = args[0] image_details = { 1: ('ubuntu-12.04-x86', dict(kernel_id=11, ramdisk_id=21)), 2: ('centos-5.4-x64', dict(kernel_id=12, ramdisk_id=22)), 3: ('rhel-6-x64', None), 4: ('rhel-6-x64', dict()), 5: ('rhel-6-x64', dict(kernel_id=11)), 6: ('rhel-6-x64', dict(ramdisk_id=21)) } if a.id in image_details: a.name = image_details[a.id][0] a.metadata = image_details[a.id][1] else: raise glanceclient.exc.HTTPNotFound('foobar') return a @staticmethod def fake_servers_list(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': 1} b = mock.MagicMock() b.id = 43 b.flavor = {'id': 2} b.image = {'id': 2} return [a, b] def test_instance_get_all_by_host(self): with mock.patch.object(self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(2, len(instances)) self.assertEqual('m1.tiny', instances[0].flavor['name']) self.assertEqual('ubuntu-12.04-x86', instances[0].image['name']) self.assertEqual(11, instances[0].kernel_id) self.assertEqual(21, instances[0].ramdisk_id) def test_instance_get_all(self): with mock.patch.object(self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list): instances = self.nv.instance_get_all() self.assertEqual(2, len(instances)) self.assertEqual(42, instances[0].id) self.assertEqual(1, instances[0].flavor['id']) self.assertEqual(1, instances[0].image['id']) @staticmethod def fake_servers_list_unknown_flavor(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 666} a.image = {'id': 1} return [a] def test_instance_get_all_by_host_unknown_flavor(self): with mock.patch.object( self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list_unknown_flavor): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(1, len(instances)) self.assertEqual('unknown-id-666', instances[0].flavor['name']) @staticmethod def fake_servers_list_unknown_image(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': 666} return [a] @staticmethod def fake_servers_list_image_missing_metadata(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': args[0]} return [a] @staticmethod def fake_instance_image_missing(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 666} a.image = None return [a] def test_instance_get_all_by_host_unknown_image(self): with mock.patch.object( self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list_unknown_image): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(1, len(instances)) self.assertEqual('unknown-id-666', instances[0].image['name']) def test_with_flavor_and_image(self): results = self.nv._with_flavor_and_image(self.fake_servers_list()) instance = results[0] self.assertEqual(2, len(results)) self.assertEqual('ubuntu-12.04-x86', instance.image['name']) self.assertEqual('m1.tiny', instance.flavor['name']) self.assertEqual(11, instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_unknown_image(self): instances = self.fake_servers_list_unknown_image() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual('unknown-id-666', instance.image['name']) self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_unknown_flavor(self): instances = self.fake_servers_list_unknown_flavor() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual('unknown-id-666', instance.flavor['name']) self.assertEqual(0, instance.flavor['vcpus']) self.assertEqual(0, instance.flavor['ram']) self.assertEqual(0, instance.flavor['disk']) self.assertNotEqual(instance.image['name'], 'unknown-id-666') self.assertEqual(11, instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_none_metadata(self): instances = self.fake_servers_list_image_missing_metadata(3) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_metadata(self): instances = self.fake_servers_list_image_missing_metadata(4) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_ramdisk(self): instances = self.fake_servers_list_image_missing_metadata(5) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual(11, instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_kernel(self): instances = self.fake_servers_list_image_missing_metadata(6) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_no_cache(self): results = self.nv._with_flavor_and_image(self.fake_servers_list()) self.assertEqual(2, len(results)) self.assertEqual(2, self._flavors_count) self.assertEqual(2, self._images_count) def test_with_flavor_and_image_cache(self): results = self.nv._with_flavor_and_image(self.fake_servers_list() * 2) self.assertEqual(4, len(results)) self.assertEqual(2, self._flavors_count) self.assertEqual(2, self._images_count) def test_with_flavor_and_image_unknown_image_cache(self): instances = self.fake_servers_list_unknown_image() results = self.nv._with_flavor_and_image(instances * 2) self.assertEqual(2, len(results)) self.assertEqual(1, self._flavors_count) self.assertEqual(1, self._images_count) for instance in results: self.assertEqual('unknown-id-666', instance.image['name']) self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_missing_image_instance(self): instances = self.fake_instance_image_missing() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.image) self.assertIsNone(instance.ramdisk_id) ceilometer-10.0.0/ceilometer/tests/unit/test_neutronclient_lbaas_v2.py0000666000175100017510000003443513236733243026311 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutronclient.v2_0 import client from oslotest import base from ceilometer import neutron_client from ceilometer import service class TestNeutronClientLBaaSV2(base.BaseTestCase): def setUp(self): super(TestNeutronClientLBaaSV2, self).setUp() conf = service.prepare_service([], []) self.nc = neutron_client.Client(conf) @staticmethod def fake_list_lbaas_pools(): return { 'pools': [{ 'lb_algorithm': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': 'simple pool', 'admin_state_up': True, 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'healthmonitor_id': None, 'listeners': [{ 'id': "35cb8516-1173-4035-8dae-0dae3453f37f" } ], 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858'} ], 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', 'name': 'pool1' }] } @staticmethod def fake_list_lbaas_members(): return { 'members': [{ 'weight': 1, 'admin_state_up': True, 'subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', 'address': '10.0.0.8', 'protocol_port': 80, 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858' }] } @staticmethod def fake_list_lbaas_healthmonitors(): return { 'healthmonitors': [{ 'admin_state_up': True, 'tenant_id': '6f3584d5754048a18e30685362b88411', 'delay': 1, 'expected_codes': '200,201,202', 'max_retries': 5, 'http_method': 'GET', 'timeout': 1, 'pools': [{ 'id': '74aa2010-a59f-4d35-a436-60a6da882819' }], 'url_path': '/index.html', 'type': 'HTTP', 'id': '0a9ac99d-0a09-4b18-8499-a0796850279a' }] } @staticmethod def fake_show_listener(): return { 'listener': { 'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'loadbalancers': [{ 'id': 'a9729389-6147-41a3-ab22-a24aed8692b2' }], 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'connection_limit': 100, 'protocol_port': 80, 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'name': '' } } @staticmethod def fake_retrieve_loadbalancer_status(): return { 'statuses': { 'loadbalancer': { 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'listeners': [{ 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'pools': [{ 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4518', 'provisioning_status': 'ACTIVE' } }] }] } } } @staticmethod def fake_retrieve_loadbalancer_status_complex(): return { 'statuses': { 'loadbalancer': { 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'listeners': [{ 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'pools': [{ 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }, { 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf969', 'operating_status': 'OFFLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4518', 'provisioning_status': 'ACTIVE' } }, { 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6', 'operating_status': 'OFFLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbfa7a', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4629', 'provisioning_status': 'ACTIVE' } }] }, { 'id': '35cb8516-1173-4035-8dae-0dae3453f48e', 'operating_status': 'OFFLINE', 'provisioning_status': 'ACTIVE', 'pools': [{ 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce7g7', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'members': [{ 'id': 'fcf23bde-8cf9-4616-883f-208cebcbfb8b', 'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE' }], 'healthmonitor': { 'id': '785131d2-8f7b-4fee-a7e7-3196e11b473a', 'provisioning_status': 'ACTIVE' } }] }] } } } @staticmethod def fake_list_lbaas_listeners(): return { 'listeners': [{ 'default_pool_id': None, 'protocol': 'HTTP', 'description': '', 'admin_state_up': True, 'loadbalancers': [{ 'id': 'a9729389-6147-41a3-ab22-a24aed8692b2' }], 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', 'connection_limit': 100, 'protocol_port': 80, 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', 'name': 'listener_one' }]} @mock.patch.object(client.Client, 'list_lbaas_pools') @mock.patch.object(client.Client, 'show_listener') @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_list_pools_v2(self, mock_status, mock_show, mock_list): mock_status.return_value = self.fake_retrieve_loadbalancer_status() mock_show.return_value = self.fake_show_listener() mock_list.return_value = self.fake_list_lbaas_pools() pools = self.nc.list_pools_v2() self.assertEqual(1, len(pools)) for pool in pools: self.assertEqual('ONLINE', pool['status']) self.assertEqual('ROUND_ROBIN', pool['lb_method']) @mock.patch.object(client.Client, 'list_lbaas_pools') @mock.patch.object(client.Client, 'list_lbaas_members') @mock.patch.object(client.Client, 'show_listener') @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_list_members_v2(self, mock_status, mock_show, mock_list_members, mock_list_pools): mock_status.return_value = self.fake_retrieve_loadbalancer_status() mock_show.return_value = self.fake_show_listener() mock_list_pools.return_value = self.fake_list_lbaas_pools() mock_list_members.return_value = self.fake_list_lbaas_members() members = self.nc.list_members_v2() self.assertEqual(1, len(members)) for member in members: self.assertEqual('ONLINE', member['status']) self.assertEqual('4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', member['pool_id']) @mock.patch.object(client.Client, 'list_lbaas_healthmonitors') def test_list_health_monitors_v2(self, mock_list_healthmonitors): mock_list_healthmonitors.return_value = ( self.fake_list_lbaas_healthmonitors()) healthmonitors = self.nc.list_health_monitors_v2() self.assertEqual(1, len(healthmonitors)) for healthmonitor in healthmonitors: self.assertEqual(5, healthmonitor['max_retries']) @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_get_member_status(self, mock_status): mock_status.return_value = ( self.fake_retrieve_loadbalancer_status_complex()) loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' listener_id = '35cb8516-1173-4035-8dae-0dae3453f37f' pool_id = '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5' parent_id = [listener_id, pool_id] result_status = self.nc._get_member_status(loadbalancer_id, parent_id) expected_keys = ['fcf23bde-8cf9-4616-883f-208cebcbf858', 'fcf23bde-8cf9-4616-883f-208cebcbf969'] excepted_status = { 'fcf23bde-8cf9-4616-883f-208cebcbf858': 'ONLINE', 'fcf23bde-8cf9-4616-883f-208cebcbf969': 'OFFLINE'} for key in result_status.keys(): self.assertIn(key, expected_keys) self.assertEqual(excepted_status[key], result_status[key]) @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_get_pool_status(self, mock_status): mock_status.return_value = ( self.fake_retrieve_loadbalancer_status_complex()) loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' parent_id = '35cb8516-1173-4035-8dae-0dae3453f37f' result_status = self.nc._get_pool_status(loadbalancer_id, parent_id) expected_keys = ['4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6'] excepted_status = { '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5': 'ONLINE', '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6': 'OFFLINE'} for key in result_status.keys(): self.assertIn(key, expected_keys) self.assertEqual(excepted_status[key], result_status[key]) @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_get_listener_status(self, mock_status): mock_status.return_value = ( self.fake_retrieve_loadbalancer_status_complex()) loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' result_status = self.nc._get_listener_status(loadbalancer_id) expected_keys = ['35cb8516-1173-4035-8dae-0dae3453f37f', '35cb8516-1173-4035-8dae-0dae3453f48e'] excepted_status = { '35cb8516-1173-4035-8dae-0dae3453f37f': 'ONLINE', '35cb8516-1173-4035-8dae-0dae3453f48e': 'OFFLINE'} for key in result_status.keys(): self.assertIn(key, expected_keys) self.assertEqual(excepted_status[key], result_status[key]) @mock.patch.object(client.Client, 'list_listeners') @mock.patch.object(neutron_client.Client, '_retrieve_loadbalancer_status_tree') def test_list_listener(self, mock_status, mock_list_listeners): mock_list_listeners.return_value = ( self.fake_list_lbaas_listeners()) mock_status.return_value = ( self.fake_retrieve_loadbalancer_status()) listeners = self.nc.list_listener() expected_key = '35cb8516-1173-4035-8dae-0dae3453f37f' expected_status = 'ONLINE' self.assertEqual(1, len(listeners)) self.assertEqual(expected_key, listeners[0]['id']) self.assertEqual(expected_status, listeners[0]['operating_status']) ceilometer-10.0.0/ceilometer/tests/unit/transformer/0000775000175100017510000000000013236733440022564 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/transformer/__init__.py0000666000175100017510000000000013236733243024666 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/transformer/test_conversions.py0000666000175100017510000001066113236733243026554 0ustar zuulzuul00000000000000# # Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from oslo_utils import timeutils from oslotest import base from ceilometer import sample from ceilometer.transformer import conversions class AggregatorTransformerTestCase(base.BaseTestCase): SAMPLE = sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, unit='ns', volume='1234567', user_id='56c5692032f34041900342503fecab30', project_id='ac9494df2d9d4e709bac378cceabaf23', resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', timestamp="2015-10-29 14:12:15.485877+00:00", resource_metadata={} ) def setUp(self): super(AggregatorTransformerTestCase, self).setUp() self._sample_offset = 0 def test_init_input_validation(self): aggregator = conversions.AggregatorTransformer("2", "15", None, None, None) self.assertEqual(2, aggregator.size) self.assertEqual(15, aggregator.retention_time) def test_init_no_size_or_rention_time(self): aggregator = conversions.AggregatorTransformer() self.assertEqual(1, aggregator.size) self.assertIsNone(aggregator.retention_time) def test_init_size_zero(self): aggregator = conversions.AggregatorTransformer(size="0") self.assertEqual(1, aggregator.size) self.assertIsNone(aggregator.retention_time) def test_init_input_validation_size_invalid(self): self.assertRaises(ValueError, conversions.AggregatorTransformer, "abc", "15", None, None, None) def test_init_input_validation_retention_time_invalid(self): self.assertRaises(ValueError, conversions.AggregatorTransformer, "2", "abc", None, None, None) def test_init_no_timestamp(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None) self.assertEqual("first", aggregator.timestamp) def test_init_timestamp_none(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, None) self.assertEqual("first", aggregator.timestamp) def test_init_timestamp_first(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, "first") self.assertEqual("first", aggregator.timestamp) def test_init_timestamp_last(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, "last") self.assertEqual("last", aggregator.timestamp) def test_init_timestamp_invalid(self): aggregator = conversions.AggregatorTransformer("1", "1", None, None, None, "invalid_option") self.assertEqual("first", aggregator.timestamp) def test_size_unbounded(self): aggregator = conversions.AggregatorTransformer(size="0", retention_time="300") self._insert_sample_data(aggregator) samples = aggregator.flush() self.assertEqual([], samples) def test_size_bounded(self): aggregator = conversions.AggregatorTransformer(size="100") self._insert_sample_data(aggregator) samples = aggregator.flush() self.assertEqual(100, len(samples)) def _insert_sample_data(self, aggregator): for _ in range(100): sample = copy.copy(self.SAMPLE) sample.resource_id = sample.resource_id + str(self._sample_offset) sample.timestamp = datetime.datetime.isoformat(timeutils.utcnow()) aggregator.handle_sample(sample) self._sample_offset += 1 ceilometer-10.0.0/ceilometer/tests/unit/hardware/0000775000175100017510000000000013236733440022017 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/hardware/pollsters/0000775000175100017510000000000013236733440024046 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/hardware/pollsters/test_util.py0000666000175100017510000000457413236733243026451 0ustar zuulzuul00000000000000# # Copyright 2013 Intel Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import netutils from ceilometer.hardware.pollsters import util from ceilometer import sample from ceilometer.tests import base as test_base class TestPollsterUtils(test_base.BaseTestCase): def setUp(self): super(TestPollsterUtils, self).setUp() self.host_url = netutils.urlsplit("snmp://127.0.0.1:161") def test_make_sample(self): s = util.make_sample_from_host(self.host_url, name='test', sample_type=sample.TYPE_GAUGE, unit='B', volume=1, res_metadata={ 'metakey': 'metaval', }) self.assertEqual('127.0.0.1', s.resource_id) self.assertIn('snmp://127.0.0.1:161', s.resource_metadata.values()) self.assertIn('metakey', s.resource_metadata.keys()) def test_make_sample_extra(self): extra = { 'project_id': 'project', 'resource_id': 'resource' } s = util.make_sample_from_host(self.host_url, name='test', sample_type=sample.TYPE_GAUGE, unit='B', volume=1, extra=extra) self.assertIsNone(s.user_id) self.assertEqual('project', s.project_id) self.assertEqual('resource', s.resource_id) self.assertEqual({'resource_url': 'snmp://127.0.0.1:161', 'project_id': 'project', 'resource_id': 'resource'}, s.resource_metadata) ceilometer-10.0.0/ceilometer/tests/unit/hardware/pollsters/test_generic.py0000666000175100017510000001627213236733243027106 0ustar zuulzuul00000000000000# # Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six import yaml import fixtures from oslo_utils import fileutils from ceilometer import declarative from ceilometer.hardware.inspector import base as inspector_base from ceilometer.hardware.pollsters import generic from ceilometer import sample from ceilometer import service from ceilometer.tests import base as test_base class TestMeterDefinition(test_base.BaseTestCase): def test_config_definition(self): cfg = dict(name='test', type='gauge', unit='B', snmp_inspector={}) definition = generic.MeterDefinition(cfg) self.assertEqual('test', definition.name) self.assertEqual('gauge', definition.type) self.assertEqual('B', definition.unit) self.assertEqual({}, definition.snmp_inspector) def test_config_missing_field(self): cfg = dict(name='test', type='gauge') try: generic.MeterDefinition(cfg) except declarative.MeterDefinitionException as e: self.assertEqual("Missing field unit", e.brief_message) def test_config_invalid_field(self): cfg = dict(name='test', type='gauge', unit='B', invalid={}) definition = generic.MeterDefinition(cfg) self.assertEqual("foobar", getattr(definition, 'invalid', 'foobar')) def test_config_invalid_type_field(self): cfg = dict(name='test', type='invalid', unit='B', snmp_inspector={}) try: generic.MeterDefinition(cfg) except declarative.MeterDefinitionException as e: self.assertEqual("Unrecognized type value invalid", e.brief_message) @mock.patch('ceilometer.hardware.pollsters.generic.LOG') def test_bad_metric_skip(self, LOG): cfg = {'metric': [dict(name='test1', type='gauge', unit='B', snmp_inspector={}), dict(name='test_bad', type='invalid', unit='B', snmp_inspector={}), dict(name='test2', type='gauge', unit='B', snmp_inspector={})]} data = generic.load_definition(cfg) self.assertEqual(2, len(data)) LOG.error.assert_called_with( "Error loading meter definition: %s", "Unrecognized type value invalid") class FakeInspector(inspector_base.Inspector): net_metadata = dict(name='test.teest', mac='001122334455', ip='10.0.0.2', speed=1000) DATA = { 'test': (0.99, {}, {}), 'test2': (90, net_metadata, {}), } def inspect_generic(self, host, cache, extra_metadata=None, param=None): yield self.DATA[host.hostname] class TestGenericPollsters(test_base.BaseTestCase): @staticmethod def faux_get_inspector(url, namespace=None): return FakeInspector() def setUp(self): super(TestGenericPollsters, self).setUp() self.conf = service.prepare_service([], []) self.resources = ["snmp://test", "snmp://test2"] self.useFixture(fixtures.MockPatch( 'ceilometer.hardware.inspector.get_inspector', self.faux_get_inspector)) self.pollster = generic.GenericHardwareDeclarativePollster(self.conf) def _setup_meter_def_file(self, cfg): if six.PY3: cfg = cfg.encode('utf-8') meter_cfg_file = fileutils.write_to_tempfile(content=cfg, prefix="snmp", suffix="yaml") self.conf.set_override( 'meter_definitions_file', meter_cfg_file, group='hardware') cfg = declarative.load_definitions( self.conf, {}, self.conf.hardware.meter_definitions_file) return cfg def _check_get_samples(self, name, definition, expected_value, expected_type, expected_unit=None): self.pollster._update_meter_definition(definition) cache = {} samples = list(self.pollster.get_samples(None, cache, self.resources)) self.assertTrue(samples) self.assertIn(self.pollster.CACHE_KEY, cache) for resource in self.resources: self.assertIn(resource, cache[self.pollster.CACHE_KEY]) self.assertEqual(set([name]), set([s.name for s in samples])) match = [s for s in samples if s.name == name] self.assertEqual(expected_value, match[0].volume) self.assertEqual(expected_type, match[0].type) if expected_unit: self.assertEqual(expected_unit, match[0].unit) def test_get_samples(self): param = dict(matching_type='type_exact', oid='1.3.6.1.4.1.2021.10.1.3.1', type='lambda x: float(str(x))') meter_def = generic.MeterDefinition(dict(type='gauge', name='hardware.test1', unit='process', snmp_inspector=param)) self._check_get_samples('hardware.test1', meter_def, 0.99, sample.TYPE_GAUGE, expected_unit='process') def test_get_pollsters_extensions(self): param = dict(matching_type='type_exact', oid='1.3.6.1.4.1.2021.10.1.3.1', type='lambda x: float(str(x))') meter_cfg = yaml.dump( {'metric': [dict(type='gauge', name='hardware.test1', unit='process', snmp_inspector=param), dict(type='gauge', name='hardware.test2.abc', unit='process', snmp_inspector=param)]}) self._setup_meter_def_file(meter_cfg) pollster = generic.GenericHardwareDeclarativePollster # Clear cached mapping pollster.mapping = None exts = pollster.get_pollsters_extensions(self.conf) self.assertEqual(2, len(exts)) self.assertIn(exts[0].name, ['hardware.test1', 'hardware.test2.abc']) self.assertIn(exts[1].name, ['hardware.test1', 'hardware.test2.abc']) ceilometer-10.0.0/ceilometer/tests/unit/hardware/pollsters/__init__.py0000666000175100017510000000000013236733243026150 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/hardware/inspector/0000775000175100017510000000000013236733440024025 5ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/hardware/inspector/__init__.py0000666000175100017510000000000013236733243026127 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/hardware/inspector/test_snmp.py0000666000175100017510000002267713236733243026434 0ustar zuulzuul00000000000000# # Copyright 2013 Intel Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/hardware/inspector/snmp/inspector.py """ import fixtures import mock from oslo_utils import netutils from pysnmp.proto.rfc1905 import noSuchObject from ceilometer.hardware.inspector import snmp from ceilometer.tests import base as test_base ins = snmp.SNMPInspector class FakeObjectName(object): def __init__(self, name): self.name = name def __str__(self): return str(self.name) class FakeCommandGenerator(object): def getCmd(self, authData, transportTarget, *oids, **kwargs): emptyOID = '1.3.6.1.4.1.2021.4.14.0' varBinds = [ (FakeObjectName(oid), int(oid.split('.')[-1])) for oid in oids if oid != emptyOID ] if emptyOID in oids: varBinds += [(FakeObjectName(emptyOID), noSuchObject)] return (None, None, 0, varBinds) def bulkCmd(authData, transportTarget, nonRepeaters, maxRepetitions, *oids, **kwargs): varBindTable = [ [(FakeObjectName("%s.%d" % (oid, i)), i) for i in range(1, 3)] for oid in oids ] return (None, None, 0, varBindTable) class TestSNMPInspector(test_base.BaseTestCase): mapping = { 'test_exact': { 'matching_type': snmp.EXACT, 'metric_oid': ('1.3.6.1.4.1.2021.10.1.3.1', int), 'metadata': { 'meta': ('1.3.6.1.4.1.2021.10.1.3.8', int) }, 'post_op': '_fake_post_op', }, 'test_prefix': { 'matching_type': snmp.PREFIX, 'metric_oid': ('1.3.6.1.4.1.2021.9.1.8', int), 'metadata': { 'meta': ('1.3.6.1.4.1.2021.9.1.3', int) }, 'post_op': None, }, 'test_nosuch': { 'matching_type': snmp.EXACT, 'metric_oid': ('1.3.6.1.4.1.2021.4.14.0', int), 'metadata': {}, 'post_op': None, }, } def setUp(self): super(TestSNMPInspector, self).setUp() self.inspector = snmp.SNMPInspector() self.host = netutils.urlsplit("snmp://localhost") self.useFixture(fixtures.MockPatchObject( snmp.cmdgen, 'CommandGenerator', return_value=FakeCommandGenerator())) def test_snmp_error(self): def get_list(func, *args, **kwargs): return list(func(*args, **kwargs)) def faux_parse(ret, is_bulk): return (True, 'forced error') self.useFixture(fixtures.MockPatchObject( snmp, 'parse_snmp_return', new=faux_parse)) self.assertRaises(snmp.SNMPException, get_list, self.inspector.inspect_generic, host=self.host, cache={}, extra_metadata={}, param=self.mapping['test_exact']) @staticmethod def _fake_post_op(host, cache, meter_def, value, metadata, extra, suffix): metadata.update(post_op_meta=4) extra.update(project_id=2) return value def test_inspect_no_such_object(self): cache = {} try: # inspect_generic() is a generator, so we explicitly need to # iterate through it in order to trigger the exception. list(self.inspector.inspect_generic(self.host, cache, {}, self.mapping['test_nosuch'])) except ValueError: self.fail("got ValueError when interpreting NoSuchObject return") def test_inspect_generic_exact(self): self.inspector._fake_post_op = self._fake_post_op cache = {} ret = list(self.inspector.inspect_generic(self.host, cache, {}, self.mapping['test_exact'])) keys = cache[ins._CACHE_KEY_OID].keys() self.assertIn('1.3.6.1.4.1.2021.10.1.3.1', keys) self.assertIn('1.3.6.1.4.1.2021.10.1.3.8', keys) self.assertEqual(1, len(ret)) self.assertEqual(1, ret[0][0]) self.assertEqual(8, ret[0][1]['meta']) self.assertEqual(4, ret[0][1]['post_op_meta']) self.assertEqual(2, ret[0][2]['project_id']) def test_inspect_generic_prefix(self): cache = {} ret = list(self.inspector.inspect_generic(self.host, cache, {}, self.mapping['test_prefix'])) keys = cache[ins._CACHE_KEY_OID].keys() self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.1', keys) self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.2', keys) self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.1', keys) self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.2', keys) self.assertEqual(2, len(ret)) self.assertIn(ret[0][0], (1, 2)) self.assertEqual(ret[0][0], ret[0][1]['meta']) def test_post_op_net(self): cache = {} metadata = dict(name='lo', speed=0, mac='ba21e43302fe') extra = {} ret = self.inspector._post_op_net(self.host, cache, None, value=8, metadata=metadata, extra=extra, suffix=".2") self.assertEqual(8, ret) self.assertIn('ip', metadata) self.assertIn("2", metadata['ip']) self.assertIn('resource_id', extra) self.assertEqual("localhost.lo", extra['resource_id']) def test_post_op_disk(self): cache = {} metadata = dict(device='/dev/sda1', path='/') extra = {} ret = self.inspector._post_op_disk(self.host, cache, None, value=8, metadata=metadata, extra=extra, suffix=None) self.assertEqual(8, ret) self.assertIn('resource_id', extra) self.assertEqual("localhost./dev/sda1", extra['resource_id']) def test_prepare_params(self): param = {'post_op': '_post_op_disk', 'oid': '1.3.6.1.4.1.2021.9.1.6', 'type': 'int', 'matching_type': 'type_prefix', 'metadata': { 'device': {'oid': '1.3.6.1.4.1.2021.9.1.3', 'type': 'str'}, 'path': {'oid': '1.3.6.1.4.1.2021.9.1.2', 'type': "lambda x: str(x)"}}} processed = self.inspector.prepare_params(param) self.assertEqual('_post_op_disk', processed['post_op']) self.assertEqual('1.3.6.1.4.1.2021.9.1.6', processed['metric_oid'][0]) self.assertEqual(int, processed['metric_oid'][1]) self.assertEqual(snmp.PREFIX, processed['matching_type']) self.assertEqual(2, len(processed['metadata'].keys())) self.assertEqual('1.3.6.1.4.1.2021.9.1.2', processed['metadata']['path'][0]) self.assertEqual("4", processed['metadata']['path'][1](4)) def test_pysnmp_ver43(self): # Test pysnmp version >=4.3 compatibility of ObjectIdentifier from distutils import version import pysnmp has43 = (version.StrictVersion(pysnmp.__version__) >= version.StrictVersion('4.3.0')) oid = '1.3.6.4.1.2021.11.57.0' if has43: from pysnmp.entity import engine from pysnmp.smi import rfc1902 from pysnmp.smi import view snmp_engine = engine.SnmpEngine() mvc = view.MibViewController(snmp_engine.getMibBuilder()) name = rfc1902.ObjectIdentity(oid) name.resolveWithMib(mvc) else: from pysnmp.proto import rfc1902 name = rfc1902.ObjectName(oid) self.assertEqual(oid, str(name)) @mock.patch.object(snmp.cmdgen, 'UsmUserData') def test_auth_strategy(self, mock_method): host = ''.join(['snmp://a:b@foo?auth_proto=sha', '&priv_password=pass&priv_proto=aes256']) host = netutils.urlsplit(host) self.inspector._get_auth_strategy(host) mock_method.assert_called_with( 'a', authKey='b', authProtocol=snmp.cmdgen.usmHMACSHAAuthProtocol, privProtocol=snmp.cmdgen.usmAesCfb256Protocol, privKey='pass') host2 = 'snmp://a:b@foo?&priv_password=pass' host2 = netutils.urlsplit(host2) self.inspector._get_auth_strategy(host2) mock_method.assert_called_with('a', authKey='b', privKey='pass') ceilometer-10.0.0/ceilometer/tests/unit/hardware/inspector/test_inspector.py0000666000175100017510000000211613236733243027447 0ustar zuulzuul00000000000000# # Copyright 2014 Intel Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import netutils from ceilometer.hardware import inspector from ceilometer.tests import base class TestHardwareInspector(base.BaseTestCase): def test_get_inspector(self): url = netutils.urlsplit("snmp://") driver = inspector.get_inspector(url) self.assertTrue(driver) def test_get_inspector_illegal(self): url = netutils.urlsplit("illegal://") self.assertRaises(RuntimeError, inspector.get_inspector, url) ceilometer-10.0.0/ceilometer/tests/unit/hardware/__init__.py0000666000175100017510000000000013236733243024121 0ustar zuulzuul00000000000000ceilometer-10.0.0/ceilometer/tests/unit/test_decoupled_pipeline.py0000666000175100017510000002603313236733243025473 0ustar zuulzuul00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import yaml from ceilometer.pipeline import base from ceilometer.pipeline import sample as pipeline from ceilometer import sample from ceilometer.tests.unit import pipeline_base class TestDecoupledPipeline(pipeline_base.BasePipelineTestCase): def _setup_pipeline_cfg(self): source = {'name': 'test_source', 'meters': ['a'], 'sinks': ['test_sink']} sink = {'name': 'test_sink', 'transformers': [{'name': 'update', 'parameters': {}}], 'publishers': ['test://']} self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} def _augment_pipeline_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'meters': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'transformers': [{ 'name': 'update', 'parameters': { 'append_name': '_new', } }], 'publishers': ['new'], }) def _break_pipeline_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'meters': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'transformers': [{ 'name': 'update', 'parameters': { 'append_name': '_new', } }], 'publishers': ['except'], }) def _dup_pipeline_name_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'meters': ['b'], 'sinks': ['test_sink'] }) def _set_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field] = value else: self.pipeline_cfg['sinks'][0][field] = value def _extend_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field].extend(value) else: self.pipeline_cfg['sinks'][0][field].extend(value) def _unset_pipeline_cfg(self, field): if field in self.pipeline_cfg['sources'][0]: del self.pipeline_cfg['sources'][0][field] else: del self.pipeline_cfg['sinks'][0][field] def test_source_no_sink(self): del self.pipeline_cfg['sinks'] self._exception_create_pipelinemanager() def test_source_dangling_sink(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'meters': ['b'], 'sinks': ['second_sink'] }) self._exception_create_pipelinemanager() def test_sink_no_source(self): del self.pipeline_cfg['sources'] self._exception_create_pipelinemanager() def test_source_with_multiple_sinks(self): meter_cfg = ['a', 'b'] self._set_pipeline_cfg('meters', meter_cfg) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'transformers': [{ 'name': 'update', 'parameters': { 'append_name': '_new', } }], 'publishers': ['new'], }) self.pipeline_cfg['sources'][0]['sinks'].append('second_sink') self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([self.test_counter]) self.assertEqual(2, len(pipeline_manager.pipelines)) self.assertEqual('test_source:test_sink', str(pipeline_manager.pipelines[0])) self.assertEqual('test_source:second_sink', str(pipeline_manager.pipelines[1])) test_publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[1].publishers[0] for publisher, sfx in [(test_publisher, '_update'), (new_publisher, '_new')]: self.assertEqual(2, len(publisher.samples)) self.assertEqual(2, publisher.calls) self.assertEqual('a' + sfx, getattr(publisher.samples[0], "name")) self.assertEqual('b' + sfx, getattr(publisher.samples[1], "name")) def test_multiple_sources_with_single_sink(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'meters': ['b'], 'sinks': ['test_sink'] }) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([self.test_counter]) self.assertEqual(2, len(pipeline_manager.pipelines)) self.assertEqual('test_source:test_sink', str(pipeline_manager.pipelines[0])) self.assertEqual('second_source:test_sink', str(pipeline_manager.pipelines[1])) test_publisher = pipeline_manager.pipelines[0].publishers[0] another_publisher = pipeline_manager.pipelines[1].publishers[0] for publisher in [test_publisher, another_publisher]: self.assertEqual(2, len(publisher.samples)) self.assertEqual(2, publisher.calls) self.assertEqual('a_update', getattr(publisher.samples[0], "name")) self.assertEqual('b_update', getattr(publisher.samples[1], "name")) transformed_samples = self.TransformerClass.samples self.assertEqual(2, len(transformed_samples)) self.assertEqual(['a', 'b'], [getattr(s, 'name') for s in transformed_samples]) def _do_test_rate_of_change_in_boilerplate_pipeline_cfg(self, index, meters, units): with open('ceilometer/pipeline/data/pipeline.yaml') as fap: data = fap.read() pipeline_cfg = yaml.safe_load(data) for s in pipeline_cfg['sinks']: s['publishers'] = ['test://'] name = self.cfg2file(pipeline_cfg) self.CONF.set_override('pipeline_cfg_file', name) pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[index] self._do_test_rate_of_change_mapping(pipe, meters, units) def test_rate_of_change_boilerplate_disk_read_cfg(self): meters = ('disk.read.bytes', 'disk.read.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_disk_write_cfg(self): meters = ('disk.write.bytes', 'disk.write.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_network_incoming_cfg(self): meters = ('network.incoming.bytes', 'network.incoming.packets') units = ('B', 'packet') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(4, meters, units) def test_rate_of_change_boilerplate_per_disk_device_read_cfg(self): meters = ('disk.device.read.bytes', 'disk.device.read.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_per_disk_device_write_cfg(self): meters = ('disk.device.write.bytes', 'disk.device.write.requests') units = ('B', 'request') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, meters, units) def test_rate_of_change_boilerplate_network_outgoing_cfg(self): meters = ('network.outgoing.bytes', 'network.outgoing.packets') units = ('B', 'packet') self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(4, meters, units) def test_duplicated_sinks_names(self): self.pipeline_cfg['sinks'].append({ 'name': 'test_sink', 'publishers': ['except'], }) self._build_and_set_new_pipeline() self.assertRaises(base.PipelineException, pipeline.SamplePipelineManager, self.CONF) def test_duplicated_source_names(self): self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'meters': ['a'], 'sinks': ['test_sink'] }) self._build_and_set_new_pipeline() self.assertRaises(base.PipelineException, pipeline.SamplePipelineManager, self.CONF) ceilometer-10.0.0/requirements.txt0000666000175100017510000000245513236733243017246 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. cachetools>=1.1.0 # MIT License cotyledon>=1.3.0 #Apache-2.0 futures>=3.0;python_version=='2.7' or python_version=='2.6' # BSD futurist>=0.11.0 # Apache-2.0 debtcollector>=1.2.0 # Apache-2.0 jsonpath-rw-ext>=0.1.9 # Apache-2.0 lxml>=2.3 # BSD monotonic msgpack-python>=0.4.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 oslo.config>=3.22.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.reports>=0.6.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 pbr>=1.6 # Apache-2.0 oslo.messaging>=5.12.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 pysnmp<5.0.0,>=4.2.3 # BSD python-glanceclient>=2.0.0 # Apache-2.0 python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 # Apache-2.0 keystoneauth1>=2.1.0 # Apache-2.0 python-neutronclient>=4.2.0 # Apache-2.0 python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 python-swiftclient>=2.2.0 # Apache-2.0 python-cinderclient>=1.6.0,!=1.7.0,!=1.7.1 # Apache-2.0 PyYAML>=3.1.0 # MIT requests!=2.9.0,>=2.8.1 # Apache-2.0 six>=1.9.0 # MIT stevedore>=1.9.0 # Apache-2.0 tenacity>=3.2.1 # Apache-2.0 tooz[zake]>=1.47.0 # Apache-2.0 os-xenapi>=0.1.1 # Apache-2.0 ceilometer-10.0.0/PKG-INFO0000664000175100017510000000460513236733440015053 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: ceilometer Version: 10.0.0 Summary: OpenStack Telemetry Home-page: https://docs.openstack.org/ceilometer/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ========== Ceilometer ========== -------- Overview -------- Ceilometer is a data collection service that collects event and metering data by monitoring notifications sent from OpenStack services. It publishes collected data to various targets including data stores and message queues. Ceilometer is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. ------------- Documentation ------------- Release notes are available at https://releases.openstack.org/teams/telemetry.html Developer documentation is available at https://docs.openstack.org/ceilometer/latest/ Launchpad Projects ------------------ - Server: https://launchpad.net/ceilometer Code Repository --------------- - Server: https://github.com/openstack/ceilometer Bug Tracking ------------ - Bugs: https://bugs.launchpad.net/ceilometer IRC --- IRC Channel: #openstack-telemetry on `Freenode`_. Mailinglist ----------- Project use http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev as the mailinglist. Please use tag ``[Ceilometer]`` in the subject for new threads. .. _Freenode: https://freenode.net/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Topic :: System :: Monitoring ceilometer-10.0.0/HACKING.rst0000666000175100017510000000205713236733243015556 0ustar zuulzuul00000000000000Ceilometer Style Commandments ============================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Ceilometer Specific Commandments -------------------------------- - [C301] LOG.warn() is not allowed. Use LOG.warning() - [C302] Deprecated library function os.popen() Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up. ceilometer-10.0.0/.coveragerc0000666000175100017510000000014213236733243016072 0ustar zuulzuul00000000000000[run] branch = True source = ceilometer omit = ceilometer/tests/* [report] ignore_errors = True ceilometer-10.0.0/babel.cfg0000666000175100017510000000002113236733243015473 0ustar zuulzuul00000000000000[python: **.py] ceilometer-10.0.0/AUTHORS0000664000175100017510000004045013236733435015030 0ustar zuulzuul000000000000001iuwei <1iu.wei81@zte.com.cn> Abhishek Chanda Abhishek Lekshmanan Abhishek Lekshmanan Adelina Tuvenie Ajaya Agrawal Akhil Hingane Ala Rezmerita Alessandro Pilotti Alex Holden Alexander Chadin Alexei Kornienko Alfredo Moralejo Amy Fong Ana Malagon Ananya Chatterjee Andrea Frittoli Andreas Jaeger Andreas Jaeger Andrew Hutchings Andrew Melton Angus Lees Angus Salkeld Ann Kamyshnikova Artur Svechnikov Ashwin Agate Balazs Gibizer Bartosz GĂłrski Ben Nemec Ben Nemec Boris Pavlovic Brad Pokorny Brant Knudson Brian Cline Brian Moss Brooklyn Chen BĂ©la Vancsics Can ZHANG Cao Xuan Hoang Cedric Soulas Chad Lung Chandan Kumar Chandan Kumar ChangBo Guo(gcb) Chaozhe.Chen Charles Bitter ChenZheng Chinmaya Bharadwaj Chmouel Boudjnah Chris Dent Chris Dent Christian Berendt Christian Martinez Christian Schwede Chuck Short Clark Boylan Claudiu Belu Cyril Roelandt Cyril Roelandt Dai Dang Van Damian Van Vuuren Dan Florea Dan Prince Dan Travis Danek Duvall Daniel Russell Dao Cong Tien Darren Birkett Darren Hague Davanum Srinivas David Peraza David Rabel Dazhao Debo~ Dutta Deepthi V V Dina Belova Dirk Mueller Divya Dong Ma Doug Hellmann Drew Thorstensen Edwin Zhai Emilien Macchi Emma Foley Endre Karlson Eoghan Glynn Eoghan Glynn Eric Berglund Eric Brown Eyal Fabio Giannetti Fei Long Wang Feilong Wang Feng Xi Yan Fengqian Gao Flavio Percoco François Charlier François Rossigneux Frederic FAURE Gangyi Luo Gauvain Pocentek Gerard Garcia Gordon Chung Graham Binns Graham Hayes Guangyu Suo Gyorgy Szombathelyi Ha Van Tu Hang Liu Hangdong Zhang Hanxi Hanxi Liu Hanxi_Liu Haomeng, Wang Harri Hämäläinen Hisashi Osanai Hoang Trung Hieu Hongbin Lu Huachao Mao Huan Xie Huang Rui Ianeta Hutchinson Igor Degtiarov Ihar Hrachyshka Ildar Svetlov Ildiko Vancsa Ilya Sviridov Ilya Tyaptin IonuČ› ArČ›ÄriČ™i Jake Liu James E. Blair James E. Blair James Page Jason Myers Jason Zhang Jay Lau Jay Pipes Jens Rosenboom Jeremy Stanley Ji-Wei Jiang Qin Jianghua Wang Jie Li Jim Rollenhagen Jimmy McCrory Joanna H. Huang Joe Gordon Joe H. Rahme John H. Tran John Herndon Jonte Watford JordanP Joseph Richard Joshua Harlow JuPing Juan Antonio Osorio Robles Julien Danjou June.King Justin SB KIYOHIRO ADACHI Kamil Rykowski Keith Byrne Ken Pepple Ken'ichi Ohmichi Ken'ichi Ohmichi Kennan Kennan Kevin McDonald Kevin_Zheng Kirill Bespalov Kishore Juigil Kobi Samoray Koert van der Veer Komei Shimamura Ladislav Smola Lan Qi song Laszlo Hegedus Lena Novokshonova Lianhao Lu Lingxian Kong LinuxJedi LiuSheng Luis A. Garcia Luis Pigueiras Luo Gangyi Luong Anh Tuan Maho Koshiya Mark McClain Mark McLoughlin Martin Geisler Martin Kletzander Mathew Odden Mathieu GagneĚ Matt Riedemann Matt Wisch Maxime Guyot Mehdi Abaakouk Mehdi Abaakouk Michael Krotscheck Michael Still MichaĹ‚ JastrzÄ™bski Miguel Alex Cantu Miguel Grinberg Mike Spreitzer Milan Potdar Ming Shuang Xian Monsyne Dragon Monty Taylor Morgan Fainberg Nadya Privalova Nadya Shakhat Nam Nguyen Hoai Nejc Saje Ngo Quoc Cuong Nguyen Phuong An Nguyen Van Trung Nick Barcet Nicolas Barcet (nijaba) Nishant Kumar Noorul Islam K M Octavian Ciuhandu OpenStack Release Bot Pablo Iranzo GĂłmez PanFengyun PanFengyun Patrick East Paul Belanger Paul Bourke Peter Nordquist Peter Portante Petr Kovar Petr KubÄ›na Phil Neal Pierre Riteau Piyush Masrani Pradeep Kilambi Pradeep Kilambi Pradeep Kumar Singh Pradyumna Sampath Prudhvi Rao Shedimbi Pádraig Brady Qiaowei Ren Rabi Mishra Rafael Rivero Rafal Szmigiel Rich Bowen Rikimaru Honjo Rob Raymond Robert Collins Robert Mizielski Rohit Jaiswal Romain Soufflet Roman Bogorodskiy Roman Podoliaka Rosario Di Somma Ruslan Aliev Russell Bryant Ryan Petrello Ryota MIBU SU, HAO-CHEN Saba Ahmed Sam Morrison Samta Samuel Merritt Sandy Walsh Sanja Nosan Sascha Peilicke Sean Dague Sergey Lukjanov Sergey Vilgelm Shane Wang Shengjie Min Shilla Saebi Shuangtai Tian Shubham Chitranshi Simona Iuliana Toader Sofer Athlan-Guyot Srinivas Sakhamuri Stas Maksimov Stefano Zilli Stephen Balukoff Stephen Gran Steve Lewis Steve Martinelli Steven Berler Sumant Murke Sumit Jamgade SunAnChen Surya Prabhakar Svetlana Shturm Swami Reddy Swann Croiset Swapnil Kulkarni (coolsvap) Sylvain Afchain Takashi NATSUME Tatsuro Makita Terri Yu Thierry Carrez Thomas Bechtold Thomas Graichen Thomas Herve Thomas Herve Thomas Maddox Tin Lam Tong Li Tony Breeds Ubuntu Victor Stinner Victor Stinner Victoria Martinez de la Cruz Vitalii Lebedynskyi Vitaly Gridnev Vladislav Kuzmin Vu Cong Tuan WenyanZhang Wenzhi Yu Wu Wenxiang Xia Linjuan XiaBing Yao Xiang Li XieYingYun Yaguang Tang Yaguang Tang Yanyan Hu Yarko Tymciurak Yassine Lamgarchal Yathiraj Udupi You Yamagata Yuanbin.Chen Yunhong, Jiang Yurii Prokulevych Yuriy Zveryanskyy Yushiro FURUKAWA ZTE-SuZhengwei ZhaoBo Zhengwei Gao Zhi Kun Liu Zhi Yan Liu ZhiQiang Fan Zhongyue Luo Zi Lian Ji Zuul aggaatul alextricity25 ananya23d annegentle ansaba blue55 caoyuan cbitte000 ccrouch celik.esra chen-xing chenaidong1 chenxing daz dongwenjuan dsxyy eNovance emilienm fengchaoyang florent fujioka yuuichi gaofei gengchc2 gengjh ghanshyam ghanshyam gong yong sheng gord chung guillaume pernot hanxi.liu hgangwx jiaxi jimmygc jing.liuqing jinxingfang jizilian jonnary joyce kairoaraujo khushbuparakh kiwik-chenrui leizhang lianghuifei lijian lipan liuqing liusheng liuwei liyuenan lizheming lqslan lrqrun ls1175 lvdongbing lzhijun melissaml mizeng nellysmitt nicodemus npraveen35 obutenko pangliye qin.jiang replay sanuptpm sh.huang shangxiaobj shengjie min sin srsakhamuri tanlin terriyu unknown vagrant venkatamahesh vivek.nandavanam vivek.nandavanam wbluo0907 xialinjuan xianbin xiangjun li xiangjun.li xiaozhuangqing xiexianbin xingzhou xugang xuqiankun yanghuichan yanheven yuyafei zhang-jinnan zhang-shaoman zhang.lei zhangdaolong zhangguoqing zhangshengping2012 zhangxuanyuan zhangyangyang zhangyanxian zhaolihui zhufl zjingbj ceilometer-10.0.0/doc/0000775000175100017510000000000013236733440014516 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/0000775000175100017510000000000013236733440016016 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/glossary.rst0000666000175100017510000001120513236733243020415 0ustar zuulzuul00000000000000.. Copyright 2012 New Dream Network (DreamHost) Copyright 2013 eNovance Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========== Glossary ========== .. glossary:: agent Software service running on the OpenStack infrastructure measuring usage and sending the results to any number of target using the :term:`publisher`. billing Billing is the process to assemble bill line items into a single per customer bill, emitting the bill to start the payment collection. bus listener agent Bus listener agent which takes events generated on the Oslo notification bus and transforms them into Ceilometer samples. This is the preferred method of data collection. ceilometer From Wikipedia [#]_: A ceilometer is a device that uses a laser or other light source to determine the height of a cloud base. polling agent Software service running either on a central management node within the OpenStack infrastructure or compute node measuring usage and sending the results to a queue. notification agent The different OpenStack services emit several notifications about the various types of events. The notification agent consumes them from respective queues and filters them by the event_type. data store Storage system for recording data collected by ceilometer. meter The measurements tracked for a resource. For example, an instance has a number of meters, such as duration of instance, CPU time used, number of disk io requests, etc. Three types of meters are defined in ceilometer: * Cumulative: Increasing over time (e.g. disk I/O) * Gauge: Discrete items (e.g. floating IPs, image uploads) and fluctuating values (e.g. number of Swift objects) * Delta: Incremental change to a counter over time (e.g. bandwidth delta) metering Metering is the process of collecting information about what, who, when and how much regarding anything that can be billed. The result of this is a collection of "tickets" (a.k.a. samples) which are ready to be processed in any way you want. notification A message sent via an external OpenStack system (e.g Nova, Glance, etc) using the Oslo notification mechanism [#]_. These notifications are usually sent to and received by Ceilometer through the notifier RPC driver. non-repudiable From Wikipedia [#]_: Non-repudiation refers to a state of affairs where the purported maker of a statement will not be able to successfully challenge the validity of the statement or contract. The term is often seen in a legal setting wherein the authenticity of a signature is being challenged. In such an instance, the authenticity is being "repudiated". project The OpenStack tenant or project. polling agents The polling agent is collecting measurements by polling some API or other tool at a regular interval. publisher The publisher is publishing samples to a specific target. push agents The push agent is the only solution to fetch data within projects, which do not expose the required data in a remotely usable way. This is not the preferred method as it makes deployment a bit more complex having to add a component to each of the nodes that need to be monitored. rating Rating is the process of analysing a series of tickets, according to business rules defined by marketing, in order to transform them into bill line items with a currency value. resource The OpenStack entity being metered (e.g. instance, volume, image, etc). sample Data sample for a particular meter. source The origin of metering data. This field is set to "openstack" by default. It can be configured to a different value using the sample_source field in the ceilometer.conf file. user An OpenStack user. .. [#] http://en.wikipedia.org/wiki/Ceilometer .. [#] https://git.openstack.org/cgit/openstack/oslo.messaging/tree/oslo_messaging/notify/notifier.py .. [#] http://en.wikipedia.org/wiki/Non-repudiation ceilometer-10.0.0/doc/source/conf.py0000666000175100017510000002276313236733243017332 0ustar zuulzuul00000000000000# # Ceilometer documentation build configuration file, created by # sphinx-quickstart on Thu Oct 27 11:38:59 2011. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import subprocess import sys import os import warnings BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) sys.path.insert(0, ROOT) sys.path.insert(0, BASE_DIR) # This is required for ReadTheDocs.org, but isn't a bad idea anyway. os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = [ 'openstackdocstheme', 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'oslo_config.sphinxconfiggen', ] config_generator_config_file = os.path.join(ROOT, 'etc/ceilometer/ceilometer-config-generator.conf') sample_config_basename = '_static/ceilometer' todo_include_todos = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ceilometer' copyright = u'2012-2015, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['**/#*', '**~', '**/#*#'] # The reST default role (used for this markup: `text`) # to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] primary_domain = 'py' nitpicky = False # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme_path = ['.'] # html_theme = '_theme' html_theme = 'openstackdocs' # openstackdocstheme options repository_name = 'openstack/ceilometer' bug_project = 'ceilometer' bug_tag = '' # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # Must set this variable to include year, month, day, hours, and minutes. html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Ceilometerdoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Ceilometer.tex', u'Ceilometer Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ceilometer', u'Ceilometer Documentation', [u'OpenStack'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Ceilometer', u'Ceilometer Documentation', u'OpenStack', 'Ceilometer', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output -------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Ceilometer' epub_author = u'OpenStack' epub_publisher = u'OpenStack' epub_copyright = u'2012-2015, OpenStack' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # NOTE(dhellmann): pbr used to set this option but now that we are # using Sphinx>=1.6.2 it does not so we have to set it ourselves. suppress_warnings = [ 'app.add_directive', 'app.add_role', 'app.add_generic_role', 'app.add_node', 'image.nonlocal_uri', ] ceilometer-10.0.0/doc/source/configuration/0000775000175100017510000000000013236733440020665 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/configuration/index.rst0000666000175100017510000000155113236733243022533 0ustar zuulzuul00000000000000.. _configuring: ================================ Ceilometer Configuration Options ================================ Ceilometer Sample Configuration File ==================================== Configure Ceilometer by editing /etc/ceilometer/ceilometer.conf. No config file is provided with the source code, it will be created during the installation. In case where no configuration file was installed, one can be easily created by running:: oslo-config-generator \ --config-file=/etc/ceilometer/ceilometer-config-generator.conf \ --output-file=/etc/ceilometer/ceilometer.conf The following is a sample Ceilometer configuration for adaptation and use. It is auto-generated from Ceilometer when this documentation is built, and can also be viewed in `file form <_static/ceilometer.conf.sample>`_. .. literalinclude:: ../_static/ceilometer.conf.sample ceilometer-10.0.0/doc/source/releasenotes/0000775000175100017510000000000013236733440020507 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/releasenotes/folsom.rst0000666000175100017510000000504213236733243022544 0ustar zuulzuul00000000000000.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _folsom: ==================== Folsom ==================== This is the first release (Version 0.1) of Ceilometer. Please take all appropriate caution in using it, as it is a technology preview at this time. Version of OpenStack It is currently tested to work with OpenStack 2012.2 Folsom. Due to its use of openstack-common, and the modification that were made in term of notification to many other components (glance, cinder, quantum), it will not easily work with any prior version of OpenStack. Components Currently covered components are: Nova, Nova-network, Glance, Cinder and Quantum. Notably, there is no support yet for Swift and it was decided not to support nova-volume in favor of Cinder. A detailed list of meters covered per component can be found at in :ref:`measurements`. Nova with libvirt only Most of the Nova meters will only work with libvirt fronted hypervisors at the moment, and our test coverage was mostly done on KVM. Contributors are welcome to implement other virtualization backends' meters. Quantum delete events Quantum delete notifications do not include the same metadata as the other messages, so we ignore them for now. This isn't ideal, since it may mean we miss charging for some amount of time, but it is better than throwing away the existing metadata for a resource when it is deleted. Database backend The only tested and complete database backend is currently MongoDB, the SQLAlchemy one is still work in progress. Installation The current best source of information on how to deploy this project is found as the devstack implementation but feel free to come to #openstack-metering on freenode for more info. Volume of data Please note that metering can generate lots of data very quickly. Have a look at the following spreadsheet to evaluate what you will end up with. https://wiki.openstack.org/wiki/EfficientMetering#Volume_of_data ceilometer-10.0.0/doc/source/releasenotes/index.rst0000666000175100017510000000274113236733243022357 0ustar zuulzuul00000000000000.. Copyright 2012 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================ Release Notes ============================ .. toctree:: :hidden: folsom * :ref:`folsom` * `Havana`_ * `Icehouse`_ * `Juno`_ * `Kilo`_ * `Liberty`_ Since Mitaka development cycle, we start to host release notes on `Ceilometer Release Notes`_ .. _Havana: https://wiki.openstack.org/wiki/ReleaseNotes/Havana#OpenStack_Metering_.28Ceilometer.29 .. _IceHouse: https://wiki.openstack.org/wiki/ReleaseNotes/Icehouse#OpenStack_Telemetry_.28Ceilometer.29 .. _Juno: https://wiki.openstack.org/wiki/ReleaseNotes/Juno#OpenStack_Telemetry_.28Ceilometer.29 .. _Kilo: https://wiki.openstack.org/wiki/ReleaseNotes/Kilo#OpenStack_Telemetry_.28Ceilometer.29 .. _Liberty: https://wiki.openstack.org/wiki/ReleaseNotes/Liberty#OpenStack_Telemetry_.28Ceilometer.29 .. _Ceilometer Release Notes: https://docs.openstack.org/releasenotes/ceilometer/ ceilometer-10.0.0/doc/source/install/0000775000175100017510000000000013236733440017464 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/install/install-gnocchi.inc0000666000175100017510000000445113236733243023244 0ustar zuulzuul000000000000002. Create the database for Gnocchi's indexer: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p * Create the ``gnocchi`` database: .. code-block:: console CREATE DATABASE gnocchi; * Grant proper access to the ``gnocchi`` database: .. code-block:: console GRANT ALL PRIVILEGES ON gnocchi.* TO 'gnocchi'@'localhost' \ IDENTIFIED BY 'GNOCCHI_DBPASS'; GRANT ALL PRIVILEGES ON gnocchi.* TO 'gnocchi'@'%' \ IDENTIFIED BY 'GNOCCHI_DBPASS'; Replace ``GNOCCHI_DBPASS`` with a suitable password. * Exit the database access client. 3. Edit the ``/etc/gnocchi/gnocchi.conf`` file and add Keystone options: * In the ``[api]`` section, configure gnocchi to use keystone: .. code-block:: ini [api] auth_mode = keystone * In the ``[keystone_authtoken]`` section, configure keystone authentication: .. code-block:: ini [keystone_authtoken] ... auth_type = password auth_url = http://controller:5000/v3 project_domain_name = Default user_domain_name = Default project_name = service username = gnocchi password = GNOCCHI_PASS interface = internalURL region_name = RegionOne Replace ``GNOCCHI_PASS`` with the password you chose for the ``gnocchi`` user in the Identity service. * In the ``[indexer]`` section, configure database access: .. code-block:: ini [indexer] url = mysql+pymysql://gnocchi:GNOCCHI_DBPASS@controller/gnocchi Replace ``GNOCCHI_DBPASS`` with the password you chose for Gnocchi's indexer database. * In the ``[storage]`` section, configure location to store metric data. In this case, we will store it to the local file system. See Gnocchi documenation for a list of more durable and performant drivers: .. code-block:: ini [storage] # coordination_url is not required but specifying one will improve # performance with better workload division across workers. coordination_url = redis://controller:6379 file_basepath = /var/lib/gnocchi driver = file 4. Initialize Gnocchi: .. code-block:: console gnocchi-upgrade ceilometer-10.0.0/doc/source/install/next-steps.rst0000666000175100017510000000035213236733243022333 0ustar zuulzuul00000000000000.. _next-steps: Next steps ~~~~~~~~~~ Your OpenStack environment now includes the ceilometer service. To add additional services, see the `OpenStack Installation Tutorials and Guides `_. ceilometer-10.0.0/doc/source/install/glance/0000775000175100017510000000000013236733440020715 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/install/glance/install-glance-rdo.rst0000666000175100017510000000211513236733243025130 0ustar zuulzuul00000000000000Enable Image service meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Image service meters. Perform these steps on the controller node. Configure the Image service to use Telemetry -------------------------------------------- * Edit the ``/etc/glance/glance-api.conf`` and ``/etc/glance/glance-registry.conf`` files and complete the following actions: * In the ``[DEFAULT]``, ``[oslo_messaging_notifications]`` sections, configure notifications and RabbitMQ message broker access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller [oslo_messaging_notifications] ... driver = messagingv2 Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. Finalize installation --------------------- * Restart the Image service: .. code-block:: console # systemctl restart openstack-glance-api.service openstack-glance-registry.service ceilometer-10.0.0/doc/source/install/glance/install-glance-obs.rst0000666000175100017510000000211313236733243025125 0ustar zuulzuul00000000000000Enable Image service meters for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Image service meters. Perform these steps on the controller node. Configure the Image service to use Telemetry -------------------------------------------- * Edit the ``/etc/glance/glance-api.conf`` and ``/etc/glance/glance-registry.conf`` files and complete the following actions: * In the ``[DEFAULT]``, ``[oslo_messaging_notifications]`` sections, configure notifications and RabbitMQ message broker access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller [oslo_messaging_notifications] ... driver = messagingv2 Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. Finalize installation --------------------- * Restart the Image service: .. code-block:: console # systemctl restart openstack-glance-api.service openstack-glance-registry.service ceilometer-10.0.0/doc/source/install/glance/install-glance-ubuntu.rst0000666000175100017510000000200413236733243025663 0ustar zuulzuul00000000000000Enable Image service meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Image service meters. Perform these steps on the controller node. Configure the Image service to use Telemetry -------------------------------------------- * Edit the ``/etc/glance/glance-api.conf`` and ``/etc/glance/glance-registry.conf`` files and complete the following actions: * In the ``[DEFAULT]``, ``[oslo_messaging_notifications]`` sections, configure notifications and RabbitMQ message broker access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller [oslo_messaging_notifications] ... driver = messagingv2 Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. Finalize installation --------------------- * Restart the Image service: .. code-block:: console # service glance-registry restart # service glance-api restart ceilometer-10.0.0/doc/source/install/verify.rst0000666000175100017510000001116513236733243021531 0ustar zuulzuul00000000000000.. _verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Telemetry service. These steps only include the Image service meters to reduce clutter. Environments with ceilometer integration for additional services contain more meters. .. note:: Perform these steps on the controller node. .. note:: The following uses Gnocchi to verify data. Alternatively, data can be published to a file backend temporarily by adding ``meter_dispatchers=file`` or a ``file://`` publisher. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. List available resource and its metrics: .. code-block:: console $ gnocchi resource list --type image +--------------------------------------+-------+----------------------------------+---------+--------------------------------------+----------------------------------+----------+----------------------------------+--------------+ | id | type | project_id | user_id | original_resource_id | started_at | ended_at | revision_start | revision_end | +--------------------------------------+-------+----------------------------------+---------+--------------------------------------+----------------------------------+----------+----------------------------------+--------------+ | a6b387e1-4276-43db-b17a-e10f649d85a3 | image | 6fd9631226e34531b53814a0f39830a9 | None | a6b387e1-4276-43db-b17a-e10f649d85a3 | 2017-01-25T23:50:14.423584+00:00 | None | 2017-01-25T23:50:14.423601+00:00 | None | +--------------------------------------+-------+----------------------------------+---------+--------------------------------------+----------------------------------+----------+----------------------------------+--------------+ $ gnocchi resource show a6b387e1-4276-43db-b17a-e10f649d85a3 +-----------------------+-------------------------------------------------------------------+ | Field | Value | +-----------------------+-------------------------------------------------------------------+ | created_by_project_id | aca4db3db9904ecc9c1c9bb1763da6a8 | | created_by_user_id | 07b0945689a4407dbd1ea72c3c5b8d2f | | creator | 07b0945689a4407dbd1ea72c3c5b8d2f:aca4db3db9904ecc9c1c9bb1763da6a8 | | ended_at | None | | id | a6b387e1-4276-43db-b17a-e10f649d85a3 | | metrics | image.download: 839afa02-1668-4922-a33e-6b6ea7780715 | | | image.serve: 1132e4a0-9e35-4542-a6ad-d6dc5fb4b835 | | | image.size: 8ecf6c17-98fd-446c-8018-b741dc089a76 | | original_resource_id | a6b387e1-4276-43db-b17a-e10f649d85a3 | | project_id | 6fd9631226e34531b53814a0f39830a9 | | revision_end | None | | revision_start | 2017-01-25T23:50:14.423601+00:00 | | started_at | 2017-01-25T23:50:14.423584+00:00 | | type | image | | user_id | None | +-----------------------+-------------------------------------------------------------------+ #. Download the CirrOS image from the Image service: .. code-block:: console $ IMAGE_ID=$(glance image-list | grep 'cirros' | awk '{ print $2 }') $ glance image-download $IMAGE_ID > /tmp/cirros.img #. List available meters again to validate detection of the image download: .. code-block:: console $ gnocchi measures show 839afa02-1668-4922-a33e-6b6ea7780715 +---------------------------+-------------+-----------+ | timestamp | granularity | value | +---------------------------+-------------+-----------+ | 2017-01-26T15:35:00+00:00 | 300.0 | 3740163.0 | +---------------------------+-------------+-----------+ #. Remove the previously downloaded image file ``/tmp/cirros.img``: .. code-block:: console $ rm /tmp/cirros.img ceilometer-10.0.0/doc/source/install/install-controller.rst0000666000175100017510000000317113236733243024052 0ustar zuulzuul00000000000000.. _install_controller: Install and Configure Controller Services ========================================= This section assumes that you already have a working OpenStack environment with at least the following components installed: Compute, Image Service, Identity. Note that installation and configuration vary by distribution. Ceilometer ---------- .. toctree:: :maxdepth: 1 install-base-obs.rst install-base-rdo.rst install-base-ubuntu.rst Additional steps are required to configure services to interact with ceilometer: Cinder ------ .. toctree:: :maxdepth: 1 cinder/install-cinder-obs.rst cinder/install-cinder-rdo.rst cinder/install-cinder-ubuntu.rst Glance ------ .. toctree:: :maxdepth: 1 glance/install-glance-obs.rst glance/install-glance-rdo.rst glance/install-glance-ubuntu.rst Heat ---- .. toctree:: :maxdepth: 1 heat/install-heat-obs.rst heat/install-heat-rdo.rst heat/install-heat-ubuntu.rst Keystone -------- To enable auditing of API requests, Keystone provides middleware which captures API requests to a service and emits data to Ceilometer. Instructions to enable this functionality is available in `Keystone's developer documentation `_. Ceilometer will captures this information as ``audit.http.*`` events. Neutron ------- .. toctree:: :maxdepth: 1 neutron/install-neutron-obs.rst neutron/install-neutron-rdo.rst neutron/install-neutron-ubuntu.rst Swift ----- .. toctree:: :maxdepth: 1 swift/install-swift-obs.rst swift/install-swift-rdo.rst swift/install-swift-ubuntu.rst ceilometer-10.0.0/doc/source/install/install-compute-rdo.rst0000666000175100017510000000154213236733243024125 0ustar zuulzuul00000000000000Enable Compute service meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses a combination of notifications and an agent to collect Compute meters. Perform these steps on each compute node. Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # yum install openstack-ceilometer-compute .. include:: install-compute-common.inc Finalize installation --------------------- #. Start the agent and configure it to start when the system boots: .. code-block:: console # systemctl enable openstack-ceilometer-compute.service # systemctl start openstack-ceilometer-compute.service #. Restart the Compute service: .. code-block:: console # systemctl restart openstack-nova-compute.service ceilometer-10.0.0/doc/source/install/install-base-ubuntu.rst0000666000175100017510000000300413236733243024114 0ustar zuulzuul00000000000000.. _install_ubuntu: Install and configure for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Telemetry service, code-named ceilometer, on the controller node. Prerequisites ------------- Before you install and configure the Telemetry service, you must configure a target to send metering data to. The recommended endpoint is Gnocchi_. .. _Gnocchi: http://gnocchi.xyz .. include:: install-base-prereq-common.inc Install Gnocchi --------------- #. Install the Gnocchi packages. Alternatively, Gnocchi can be install using pip: .. code-block:: console # apt-get install gnocchi-api gnocchi-metricd python-gnocchiclient .. note:: Depending on your environment size, consider installing Gnocchi separately as it makes extensive use of the cpu. .. include:: install-gnocchi.inc Finalize Gnocchi installation ----------------------------- #. Restart the Gnocchi services: .. code-block:: console # service gnocchi-api restart # service gnocchi-metricd restart Install and configure components -------------------------------- #. Install the ceilometer packages: .. code-block:: console # apt-get install ceilometer-agent-notification \ ceilometer-agent-central .. include:: install-base-config-common.inc Finalize installation --------------------- #. Restart the Telemetry services: .. code-block:: console # service ceilometer-agent-central restart # service ceilometer-agent-notification restart ceilometer-10.0.0/doc/source/install/install-compute-ubuntu.rst0000666000175100017510000000123413236733243024661 0ustar zuulzuul00000000000000Enable Compute service meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses a combination of notifications and an agent to collect Compute meters. Perform these steps on each compute node. Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # apt-get install ceilometer-agent-compute .. include:: install-compute-common.inc Finalize installation --------------------- #. Restart the agent: .. code-block:: console # service ceilometer-agent-compute restart #. Restart the Compute service: .. code-block:: console # service nova-compute restart ceilometer-10.0.0/doc/source/install/neutron/0000775000175100017510000000000013236733440021156 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/install/neutron/install-neutron-rdo.rst0000666000175100017510000000141213236733243025631 0ustar zuulzuul00000000000000Enable Networking service meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Networking service meters. Perform these steps on the controller node. Configure the Networking service to use Telemetry ---------------------------------------------------- * Edit the ``/etc/neutron/neutron.conf`` and complete the following actions: * In the ``[oslo_messaging_notifications]`` sections, enable notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 Finalize installation --------------------- * Restart the Networking service: .. code-block:: console # systemctl restart neutron-server.service ceilometer-10.0.0/doc/source/install/neutron/install-neutron-ubuntu.rst0000666000175100017510000000130613236733243026371 0ustar zuulzuul00000000000000Enable Networking service meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Networking service meters. Perform these steps on the controller node. Configure the Networking service to use Telemetry ---------------------------------------------------- * Edit the ``/etc/neutron/neutron.conf`` and complete the following actions: * In the ``[oslo_messaging_notifications]`` sections, enable notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 Finalize installation --------------------- * Restart the Networking service: .. code-block:: console # service neutron-server restart ceilometer-10.0.0/doc/source/install/neutron/install-neutron-obs.rst0000666000175100017510000000141013236733243025626 0ustar zuulzuul00000000000000Enable Networking service meters for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Networking service meters. Perform these steps on the controller node. Configure the Networking service to use Telemetry ---------------------------------------------------- * Edit the ``/etc/neutron/neutron.conf`` and complete the following actions: * In the ``[oslo_messaging_notifications]`` sections, enable notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 Finalize installation --------------------- * Restart the Networking service: .. code-block:: console # systemctl restart neutron-server.service ceilometer-10.0.0/doc/source/install/install-compute-common.inc0000666000175100017510000000256513236733243024600 0ustar zuulzuul000000000000002. Edit the ``/etc/ceilometer/ceilometer.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[service_credentials]`` section, configure service credentials: .. code-block:: ini [service_credentials] ... auth_url = http://controller:5000 project_domain_id = default user_domain_id = default auth_type = password username = ceilometer project_name = service password = CEILOMETER_PASS interface = internalURL region_name = RegionOne Replace ``CEILOMETER_PASS`` with the password you chose for the ``ceilometer`` user in the Identity service. Configure Compute to use Telemetry ---------------------------------- * Edit the ``/etc/nova/nova.conf`` file and configure notifications in the ``[DEFAULT]`` section: .. code-block:: ini [DEFAULT] ... instance_usage_audit = True instance_usage_audit_period = hour notify_on_state_change = vm_and_task_state [oslo_messaging_notifications] ... driver = messagingv2 ceilometer-10.0.0/doc/source/install/install-base-config-common.inc0000666000175100017510000000276213236733243025300 0ustar zuulzuul000000000000002. Edit the ``/etc/ceilometer/pipeline.yaml`` file and complete the following section: * Configure Gnocchi connection: .. code-block:: yaml publishers: # set address of Gnocchi # + filter out Gnocchi-related activity meters (Swift driver) # + set default archive policy - gnocchi://?filter_project=service&archive_policy=low 3. Edit the ``/etc/ceilometer/ceilometer.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[service_credentials]`` section, configure service credentials: .. code-block:: ini [service_credentials] ... auth_type = password auth_url = http://controller:5000/v3 project_domain_id = default user_domain_id = default project_name = service username = ceilometer password = CEILOMETER_PASS interface = internalURL region_name = RegionOne Replace ``CEILOMETER_PASS`` with the password you chose for the ``ceilometer`` user in the Identity service. 4. Create Ceilometer resources in Gnocchi. Gnocchi should be running by this stage: .. code-block:: console # ceilometer-upgrade ceilometer-10.0.0/doc/source/install/install-base-obs.rst0000666000175100017510000000372113236733243023363 0ustar zuulzuul00000000000000.. _install_obs: Install and configure for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Telemetry service, code-named ceilometer, on the controller node. Prerequisites ------------- Before you install and configure the Telemetry service, you must configure a target to send metering data to. The recommended endpoint is Gnocchi_. .. _Gnocchi: http://gnocchi.xyz .. include:: install-base-prereq-common.inc Install Gnocchi --------------- #. Install the Gnocchi packages. Alternatively, Gnocchi can be install using pip: .. code-block:: console # zypper install openstack-gnocchi-api openstack-gnocchi-metricd \ python-gnocchiclient .. note:: Depending on your environment size, consider installing Gnocchi separately as it makes extensive use of the cpu. .. include:: install-gnocchi.inc Finalize Gnocchi installation ----------------------------- #. Start the Gnocchi services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-gnocchi-api.service \ openstack-gnocchi-metricd.service # systemctl start openstack-gnocchi-api.service \ openstack-gnocchi-metricd.service Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # zypper install openstack-ceilometer-agent-notification \ openstack-ceilometer-agent-central .. include:: install-base-config-common.inc Finalize installation --------------------- #. Start the Telemetry services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-ceilometer-agent-notification.service \ openstack-ceilometer-agent-central.service # systemctl start openstack-ceilometer-agent-notification.service \ openstack-ceilometer-agent-central.service ceilometer-10.0.0/doc/source/install/get_started.rst0000666000175100017510000000343713236733243022535 0ustar zuulzuul00000000000000========================================== Telemetry Data Collection service overview ========================================== The Telemetry Data Collection services provide the following functions: * Efficiently polls metering data related to OpenStack services. * Collects event and metering data by monitoring notifications sent from services. * Publishes collected data to various targets including data stores and message queues. The Telemetry service consists of the following components: A compute agent (``ceilometer-agent-compute``) Runs on each compute node and polls for resource utilization statistics. This is actually the polling agent ``ceilometer-polling`` running with parameter ``--polling-namespace compute``. A central agent (``ceilometer-agent-central``) Runs on a central management server to poll for resource utilization statistics for resources not tied to instances or compute nodes. Multiple agents can be started to scale service horizontally. This is actually the polling agent ``ceilometer-polling`` running with parameter ``--polling-namespace central``. A notification agent (``ceilometer-agent-notification``) Runs on a central management server(s) and consumes messages from the message queue(s) to build event and metering data. Data is then published to defined targets. By default, data is pushed to Gnocchi_. These services communicate by using the OpenStack messaging bus. Ceilometer data is designed to be published to various endpoints for storage and analysis. .. note:: Ceilometer previously provided a storage and API solution. As of Newton, this functionality is officially deprecated and discouraged. For efficient storage and statistical analysis of Ceilometer data, Gnocchi_ is recommended. .. _Gnocchi: http://gnocchi.xyz ceilometer-10.0.0/doc/source/install/install-base-rdo.rst0000666000175100017510000000366413236733243023372 0ustar zuulzuul00000000000000.. _install_rdo: Install and configure for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Telemetry service, code-named ceilometer, on the controller node. Prerequisites ------------- Before you install and configure the Telemetry service, you must configure a target to send metering data to. The recommended endpoint is Gnocchi_. .. _Gnocchi: http://gnocchi.xyz .. include:: install-base-prereq-common.inc Install Gnocchi --------------- #. Install the Gnocchi packages. Alternatively, Gnocchi can be install using pip: .. code-block:: console # yum install openstack-gnocchi-api openstack-gnocchi-metricd \ python-gnocchiclient .. note:: Depending on your environment size, consider installing Gnocchi separately as it makes extensive use of the cpu. .. include:: install-gnocchi.inc Finalize Gnocchi installation ----------------------------- #. Start the Gnocchi services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-gnocchi-api.service \ openstack-gnocchi-metricd.service # systemctl start openstack-gnocchi-api.service \ openstack-gnocchi-metricd.service Install and configure components -------------------------------- #. Install the Ceilometer packages: .. code-block:: console # yum install openstack-ceilometer-notification \ openstack-ceilometer-central .. include:: install-base-config-common.inc Finalize installation --------------------- #. Start the Telemetry services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-ceilometer-notification.service \ openstack-ceilometer-central.service # systemctl start openstack-ceilometer-notification.service \ openstack-ceilometer-central.service ceilometer-10.0.0/doc/source/install/swift/0000775000175100017510000000000013236733440020620 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/install/swift/install-swift-config-common.inc0000666000175100017510000000241613236733243026652 0ustar zuulzuul00000000000000Configure Object Storage to use Telemetry ----------------------------------------- Perform these steps on the controller and any other nodes that run the Object Storage proxy service. * Edit the ``/etc/swift/proxy-server.conf`` file and complete the following actions: * In the ``[filter:keystoneauth]`` section, add the ``ResellerAdmin`` role: .. code-block:: ini [filter:keystoneauth] ... operator_roles = admin, user, ResellerAdmin * In the ``[pipeline:main]`` section, add ``ceilometer``: .. code-block:: ini [pipeline:main] pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging ceilometer proxy-server * In the ``[filter:ceilometer]`` section, configure notifications: .. code-block:: ini [filter:ceilometer] paste.filter_factory = ceilometermiddleware.swift:filter_factory ... control_exchange = swift url = rabbit://openstack:RABBIT_PASS@controller:5672/ driver = messagingv2 topic = notifications log_level = WARN Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. ceilometer-10.0.0/doc/source/install/swift/install-swift-obs.rst0000666000175100017510000000130513236733243024735 0ustar zuulzuul00000000000000Enable Object Storage meters for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses a combination of polling and notifications to collect Object Storage meters. .. note:: Your environment must include the Object Storage service. .. include:: install-swift-prereq-common.inc Install components ------------------ * Install the packages: .. code-block:: console # zypper install python-ceilometermiddleware .. include:: install-swift-config-common.inc Finalize installation --------------------- * Restart the Object Storage proxy service: .. code-block:: console # systemctl restart openstack-swift-proxy.service ceilometer-10.0.0/doc/source/install/swift/install-swift-ubuntu.rst0000666000175100017510000000117213236733243025476 0ustar zuulzuul00000000000000Enable Object Storage meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses a combination of polling and notifications to collect Object Storage meters. .. note:: Your environment must include the Object Storage service. .. include:: install-swift-prereq-common.inc Install components ------------------ * Install the packages: .. code-block:: console # apt-get install python-ceilometermiddleware .. include:: install-swift-config-common.inc Finalize installation --------------------- * Restart the Object Storage proxy service: .. code-block:: console # service swift-proxy restart ceilometer-10.0.0/doc/source/install/swift/install-swift-prereq-common.inc0000666000175100017510000000200613236733243026676 0ustar zuulzuul00000000000000Prerequisites ------------- The Telemetry service requires access to the Object Storage service using the ``ResellerAdmin`` role. Perform these steps on the controller node. #. Source the ``admin`` credentials to gain access to admin-only CLI commands. .. code-block:: console $ . admin-openrc #. Create the ``ResellerAdmin`` role: .. code-block:: console $ openstack role create ResellerAdmin +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | None | | id | 462fa46c13fd4798a95a3bfbe27b5e54 | | name | ResellerAdmin | +-----------+----------------------------------+ #. Add the ``ResellerAdmin`` role to the ``ceilometer`` user: .. code-block:: console $ openstack role add --project service --user ceilometer ResellerAdmin .. note:: This command provides no output. ceilometer-10.0.0/doc/source/install/swift/install-swift-rdo.rst0000666000175100017510000000130413236733243024735 0ustar zuulzuul00000000000000Enable Object Storage meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses a combination of polling and notifications to collect Object Storage meters. .. note:: Your environment must include the Object Storage service. .. include:: install-swift-prereq-common.inc Install components ------------------ * Install the packages: .. code-block:: console # yum install python-ceilometermiddleware .. include:: install-swift-config-common.inc Finalize installation --------------------- * Restart the Object Storage proxy service: .. code-block:: console # systemctl restart openstack-swift-proxy.service ceilometer-10.0.0/doc/source/install/index.rst0000666000175100017510000000126313236733243021332 0ustar zuulzuul00000000000000================== Installation Guide ================== .. toctree:: :maxdepth: 2 get_started.rst install-compute.rst install-controller.rst verify.rst next-steps.rst This chapter assumes a working setup of OpenStack following the `OpenStack Installation Tutorials and Guides `_. Ocata ~~~~~ To install Ceilometer, see the Ocata Telemetry Data Collection service `install guide `__ Newton ~~~~~~ To install Ceilometer, see the Newton Telemetry Data Collection service `install guide `__ ceilometer-10.0.0/doc/source/install/install-compute.rst0000666000175100017510000000066713236733243023352 0ustar zuulzuul00000000000000.. _install_compute: Install and Configure Compute Services ====================================== This section assumes that you already have a working OpenStack environment with at least the following components installed: Compute, Image Service, Identity. Note that installation and configuration vary by distribution. .. toctree:: :maxdepth: 1 install-compute-obs.rst install-compute-rdo.rst install-compute-ubuntu.rst ceilometer-10.0.0/doc/source/install/install-compute-obs.rst0000666000175100017510000000156513236733243024131 0ustar zuulzuul00000000000000Enable Compute service meters for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses a combination of notifications and an agent to collect Compute meters. Perform these steps on each compute node. Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # zypper install openstack-ceilometer-agent-compute .. include:: install-compute-common.inc Finalize installation --------------------- #. Start the agent and configure it to start when the system boots: .. code-block:: console # systemctl enable openstack-ceilometer-agent-compute.service # systemctl start openstack-ceilometer-agent-compute.service #. Restart the Compute service: .. code-block:: console # systemctl restart openstack-nova-compute.service ceilometer-10.0.0/doc/source/install/install-base-prereq-common.inc0000666000175100017510000001244313236733243025326 0ustar zuulzuul000000000000001. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc 2. To create the service credentials, complete these steps: * Create the ``ceilometer`` user: .. code-block:: console $ openstack user create --domain default --password-prompt ceilometer User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | e0353a670a9e496da891347c589539e9 | | enabled | True | | id | c859c96f57bd4989a8ea1a0b1d8ff7cd | | name | ceilometer | +-----------+----------------------------------+ * Add the ``admin`` role to the ``ceilometer`` user. .. code-block:: console $ openstack role add --project service --user ceilometer admin .. note:: This command provides no output. 3. Register Gnocchi service in Keystone: * Create the ``gnocchi`` user: .. code-block:: console $ openstack user create --domain default --password-prompt gnocchi User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | e0353a670a9e496da891347c589539e9 | | enabled | True | | id | 8bacd064f6434ef2b6bbfbedb79b0318 | | name | gnocchi | +-----------+----------------------------------+ * Create the ``gnocchi`` service entity: .. code-block:: console $ openstack service create --name gnocchi \ --description "Metric Service" metric +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Metric Service | | enabled | True | | id | 205978b411674e5a9990428f81d69384 | | name | gnocchi | | type | metric | +-------------+----------------------------------+ * Add the ``admin`` role to the ``gnocchi`` user. .. code-block:: console $ openstack role add --project service --user gnocchi admin .. note:: This command provides no output. * Create the Metric service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ metric public http://controller:8041 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | b808b67b848d443e9eaaa5e5d796970c | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 205978b411674e5a9990428f81d69384 | | service_name | gnocchi | | service_type | metric | | url | http://controller:8041 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ metric internal http://controller:8041 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | c7009b1c2ee54b71b771fa3d0ae4f948 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 205978b411674e5a9990428f81d69384 | | service_name | gnocchi | | service_type | metric | | url | http://controller:8041 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ metric admin http://controller:8041 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | b2c00566d0604551b5fe1540c699db3d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 205978b411674e5a9990428f81d69384 | | service_name | gnocchi | | service_type | metric | | url | http://controller:8041 | +--------------+----------------------------------+ ceilometer-10.0.0/doc/source/install/cinder/0000775000175100017510000000000013236733440020730 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/install/cinder/install-cinder-rdo.rst0000666000175100017510000000207613236733243025164 0ustar zuulzuul00000000000000Enable Block Storage meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Block Storage service meters. Perform these steps on the controller and Block Storage nodes. .. note:: Your environment must include the Block Storage service. Configure Cinder to use Telemetry --------------------------------- Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: * In the ``[oslo_messaging_notifications]`` section, configure notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 .. include:: install-cinder-config-common.inc Finalize installation --------------------- #. Restart the Block Storage services on the controller node: .. code-block:: console # systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service #. Restart the Block Storage services on the storage nodes: .. code-block:: console # systemctl restart openstack-cinder-volume.service ceilometer-10.0.0/doc/source/install/cinder/install-cinder-ubuntu.rst0000666000175100017510000000174213236733243025721 0ustar zuulzuul00000000000000Enable Block Storage meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Block Storage service meters. Perform these steps on the controller and Block Storage nodes. .. note:: Your environment must include the Block Storage service. Configure Cinder to use Telemetry --------------------------------- Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: * In the ``[oslo_messaging_notifications]`` section, configure notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 .. include:: install-cinder-config-common.inc Finalize installation --------------------- #. Restart the Block Storage services on the controller node: .. code-block:: console # service cinder-api restart # service cinder-scheduler restart #. Restart the Block Storage services on the storage nodes: .. code-block:: console # service cinder-volume restart ceilometer-10.0.0/doc/source/install/cinder/install-cinder-config-common.inc0000666000175100017510000000115413236733243027070 0ustar zuulzuul00000000000000* Enable periodic usage statistics relating to block storage. To use it, you must run this command in the following format: .. code-block:: console $ cinder-volume-usage-audit --start_time='YYYY-MM-DD HH:MM:SS' \ --end_time='YYYY-MM-DD HH:MM:SS' --send_actions This script outputs what volumes or snapshots were created, deleted, or exists in a given period of time and some information about these volumes or snapshots. Using this script via cron you can get notifications periodically, for example, every 5 minutes:: */5 * * * * /path/to/cinder-volume-usage-audit --send_actions ceilometer-10.0.0/doc/source/install/cinder/install-cinder-obs.rst0000666000175100017510000000207413236733243025161 0ustar zuulzuul00000000000000Enable Block Storage meters for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Block Storage service meters. Perform these steps on the controller and Block Storage nodes. .. note:: Your environment must include the Block Storage service. Configure Cinder to use Telemetry --------------------------------- Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: * In the ``[oslo_messaging_notifications]`` section, configure notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 .. include:: install-cinder-config-common.inc Finalize installation --------------------- #. Restart the Block Storage services on the controller node: .. code-block:: console # systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service #. Restart the Block Storage services on the storage nodes: .. code-block:: console # systemctl restart openstack-cinder-volume.service ceilometer-10.0.0/doc/source/install/heat/0000775000175100017510000000000013236733440020405 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/install/heat/install-heat-rdo.rst0000666000175100017510000000153513236733243024315 0ustar zuulzuul00000000000000Enable Orchestration service meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Orchestration service meters. Perform these steps on the controller node. Configure the Orchestration service to use Telemetry ---------------------------------------------------- * Edit the ``/etc/heat/heat.conf`` and complete the following actions: * In the ``[oslo_messaging_notifications]`` sections, enable notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 Finalize installation --------------------- * Restart the Orchestration service: .. code-block:: console # systemctl restart openstack-heat-api.service \ openstack-heat-api-cfn.service openstack-heat-engine.service ceilometer-10.0.0/doc/source/install/heat/install-heat-obs.rst0000666000175100017510000000153313236733243024312 0ustar zuulzuul00000000000000Enable Orchestration service meters for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Orchestration service meters. Perform these steps on the controller node. Configure the Orchestration service to use Telemetry ---------------------------------------------------- * Edit the ``/etc/heat/heat.conf`` and complete the following actions: * In the ``[oslo_messaging_notifications]`` sections, enable notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 Finalize installation --------------------- * Restart the Orchestration service: .. code-block:: console # systemctl restart openstack-heat-api.service \ openstack-heat-api-cfn.service openstack-heat-engine.service ceilometer-10.0.0/doc/source/install/heat/install-heat-ubuntu.rst0000666000175100017510000000142013236733243025044 0ustar zuulzuul00000000000000Enable Orchestration service meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Orchestration service meters. Perform these steps on the controller node. Configure the Orchestration service to use Telemetry ---------------------------------------------------- * Edit the ``/etc/heat/heat.conf`` and complete the following actions: * In the ``[oslo_messaging_notifications]`` sections, enable notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 Finalize installation --------------------- * Restart the Orchestration service: .. code-block:: console # service heat-api restart # service heat-api-cfn restart # service heat-engine restart ceilometer-10.0.0/doc/source/admin/0000775000175100017510000000000013236733440017106 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/admin/telemetry-events.rst0000666000175100017510000001456413236733243023171 0ustar zuulzuul00000000000000====== Events ====== In addition to meters, the Telemetry service collects events triggered within an OpenStack environment. This section provides a brief summary of the events format in the Telemetry service. While a sample represents a single, numeric datapoint within a time-series, an event is a broader concept that represents the state of a resource at a point in time. The state may be described using various data types including non-numeric data such as an instance's flavor. In general, events represent any action made in the OpenStack system. Event configuration ~~~~~~~~~~~~~~~~~~~ By default, ceilometer builds event data from the messages it receives from other OpenStack services. .. note:: In releases older than Ocata, it is advisable to set ``disable_non_metric_meters`` to ``True`` when enabling events in the Telemetry service. The Telemetry service historically represented events as metering data, which may create duplication of data if both events and non-metric meters are enabled. Event structure ~~~~~~~~~~~~~~~ Events captured by the Telemetry service are represented by five key attributes: event\_type A dotted string defining what event occurred such as ``"compute.instance.resize.start"``. message\_id A UUID for the event. generated A timestamp of when the event occurred in the system. traits A flat mapping of key-value pairs which describe the event. The event's traits contain most of the details of the event. Traits are typed, and can be strings, integers, floats, or datetimes. raw Mainly for auditing purpose, the full event message can be stored (unindexed) for future evaluation. Event indexing ~~~~~~~~~~~~~~ The general philosophy of notifications in OpenStack is to emit any and all data someone might need, and let the consumer filter out what they are not interested in. In order to make processing simpler and more efficient, the notifications are stored and processed within Ceilometer as events. The notification payload, which can be an arbitrarily complex JSON data structure, is converted to a flat set of key-value pairs. This conversion is specified by a config file. .. note:: The event format is meant for efficient processing and querying. Storage of complete notifications for auditing purposes can be enabled by configuring ``store_raw`` option. Event conversion ---------------- The conversion from notifications to events is driven by a configuration file defined by the ``definitions_cfg_file`` in the ``ceilometer.conf`` configuration file. This includes descriptions of how to map fields in the notification body to Traits, and optional plug-ins for doing any programmatic translations (splitting a string, forcing case). The mapping of notifications to events is defined per event\_type, which can be wildcarded. Traits are added to events if the corresponding fields in the notification exist and are non-null. .. note:: The default definition file included with the Telemetry service contains a list of known notifications and useful traits. The mappings provided can be modified to include more or less data according to user requirements. If the definitions file is not present, a warning will be logged, but an empty set of definitions will be assumed. By default, any notifications that do not have a corresponding event definition in the definitions file will be converted to events with a set of minimal traits. This can be changed by setting the option ``drop_unmatched_notifications`` in the ``ceilometer.conf`` file. If this is set to ``True``, any unmapped notifications will be dropped. The basic set of traits (all are TEXT type) that will be added to all events if the notification has the relevant data are: service (notification's publisher), tenant\_id, and request\_id. These do not have to be specified in the event definition, they are automatically added, but their definitions can be overridden for a given event\_type. Event definitions format ------------------------ The event definitions file is in YAML format. It consists of a list of event definitions, which are mappings. Order is significant, the list of definitions is scanned in reverse order to find a definition which matches the notification's event\_type. That definition will be used to generate the event. The reverse ordering is done because it is common to want to have a more general wildcarded definition (such as ``compute.instance.*``) with a set of traits common to all of those events, with a few more specific event definitions afterwards that have all of the above traits, plus a few more. Each event definition is a mapping with two keys: event\_type This is a list (or a string, which will be taken as a 1 element list) of event\_types this definition will handle. These can be wildcarded with unix shell glob syntax. An exclusion listing (starting with a ``!``) will exclude any types listed from matching. If only exclusions are listed, the definition will match anything not matching the exclusions. traits This is a mapping, the keys are the trait names, and the values are trait definitions. Each trait definition is a mapping with the following keys: fields A path specification for the field(s) in the notification you wish to extract for this trait. Specifications can be written to match multiple possible fields. By default the value will be the first such field. The paths can be specified with a dot syntax (``payload.host``). Square bracket syntax (``payload[host]``) is also supported. In either case, if the key for the field you are looking for contains special characters, like ``.``, it will need to be quoted (with double or single quotes): ``payload.image_meta.`org.openstack__1__architecture```. The syntax used for the field specification is a variant of `JSONPath `__ type (Optional) The data type for this trait. Valid options are: ``text``, ``int``, ``float``, and ``datetime``. Defaults to ``text`` if not specified. plugin (Optional) Used to execute simple programmatic conversions on the value in a notification field. Event delivery to external sinks -------------------------------- You can configure the Telemetry service to deliver the events into external sinks. These sinks are configurable in the ``/etc/ceilometer/event_pipeline.yaml`` file. ceilometer-10.0.0/doc/source/admin/telemetry-system-architecture.rst0000666000175100017510000000603013236733243025656 0ustar zuulzuul00000000000000.. _telemetry-system-architecture: =================== System architecture =================== The Telemetry service uses an agent-based architecture. Several modules combine their responsibilities to collect, normalize, and redirect data to be used for use cases such as metering, monitoring, and alerting. The Telemetry service is built from the following agents: ceilometer-polling Polls for different kinds of meter data by using the polling plug-ins (pollsters) registered in different namespaces. It provides a single polling interface across different namespaces. .. note:: The ``ceilometer-polling`` service provides polling support on any namespace but many distributions continue to provide namespace-scoped agents: ``ceilometer-agent-central``, ``ceilometer-agent-compute``, and ``ceilometer-agent-ipmi``. ceilometer-agent-notification Consumes AMQP messages from other OpenStack services, normalizes messages, and publishes them to configured targets. Except for the ``ceilometer-polling`` agents polling the ``compute`` or ``ipmi`` namespaces, all the other services are placed on one or more controller nodes. The Telemetry architecture depends on the AMQP service both for consuming notifications coming from OpenStack services and internal communication. .. _telemetry-supported-databases: Supported databases ~~~~~~~~~~~~~~~~~~~ The other key external component of Telemetry is the database, where events, samples, alarm definitions, and alarms are stored. Each of the data models have their own storage service and each support various back ends. The list of supported base back ends for measurements: - `gnocchi `__ The list of supported base back ends for alarms: - `MySQL `__ - `PostgreSQL `__ The list of supported base back ends for events: - `ElasticSearch `__ - `MongoDB `__ - `MySQL `__ - `PostgreSQL `__ .. _telemetry-supported-hypervisors: Supported hypervisors ~~~~~~~~~~~~~~~~~~~~~ The Telemetry service collects information about the virtual machines, which requires close connection to the hypervisor that runs on the compute hosts. The following is a list of supported hypervisors. - `Libvirt supported hypervisors `__ such as KVM and QEMU - `Hyper-V `__ - `XEN `__ - `VMware vSphere `__ .. note:: For details about hypervisor support in libvirt please see the `Libvirt API support matrix `__. Supported networking services ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry is able to retrieve information from external networking services: - SDN controller meters: - `OpenDaylight `__ - `OpenContrail `__ ceilometer-10.0.0/doc/source/admin/telemetry-data-pipelines.rst0000666000175100017510000004223413236733243024557 0ustar zuulzuul00000000000000.. _telemetry-data-pipelines: ============================= Data processing and pipelines ============================= The mechanism by which data is processed is called a pipeline. Pipelines, at the configuration level, describe a coupling between sources of data and the corresponding sinks for transformation and publication of data. This functionality is handled by the notification agents. A source is a producer of data: ``samples`` or ``events``. In effect, it is a set of notification handlers emitting datapoints for a set of matching meters and event types. Each source configuration encapsulates name matching and mapping to one or more sinks for publication. A sink, on the other hand, is a consumer of data, providing logic for the transformation and publication of data emitted from related sources. In effect, a sink describes a chain of handlers. The chain starts with zero or more transformers and ends with one or more publishers. The first transformer in the chain is passed data from the corresponding source, takes some action such as deriving rate of change, performing unit conversion, or aggregating, before publishing_. .. _telemetry-pipeline-configuration: Pipeline configuration ~~~~~~~~~~~~~~~~~~~~~~ The notification agent supports two pipelines: one that handles samples and another that handles events. The pipelines can be enabled and disabled by setting `pipelines` option in the `[notifications]` section. The actual configuration of each pipelines is, by default, stored in separate configuration files: ``pipeline.yaml`` and ``event_pipeline.yaml``. The location of the configuration files can be set by the ``pipeline_cfg_file`` and ``event_pipeline_cfg_file`` options listed in :ref:`configuring` The meter pipeline definition looks like: .. code-block:: yaml --- sources: - name: 'source name' meters: - 'meter filter' sinks: - 'sink name' sinks: - name: 'sink name' transformers: 'definition of transformers' publishers: - 'list of publishers' There are several ways to define the list of meters for a pipeline source. The list of valid meters can be found in :ref:`telemetry-measurements`. There is a possibility to define all the meters, or just included or excluded meters, with which a source should operate: - To include all meters, use the ``*`` wildcard symbol. It is highly advisable to select only the meters that you intend on using to avoid flooding the metering database with unused data. - To define the list of meters, use either of the following: - To define the list of included meters, use the ``meter_name`` syntax. - To define the list of excluded meters, use the ``!meter_name`` syntax. .. note:: The OpenStack Telemetry service does not have any duplication check between pipelines, and if you add a meter to multiple pipelines then it is assumed the duplication is intentional and may be stored multiple times according to the specified sinks. The above definition methods can be used in the following combinations: - Use only the wildcard symbol. - Use the list of included meters. - Use the list of excluded meters. - Use wildcard symbol with the list of excluded meters. .. note:: At least one of the above variations should be included in the meters section. Included and excluded meters cannot co-exist in the same pipeline. Wildcard and included meters cannot co-exist in the same pipeline definition section. The transformers section of a pipeline sink provides the possibility to add a list of transformer definitions. The available transformers are: .. list-table:: :widths: 50 50 :header-rows: 1 * - Name of transformer - Reference name for configuration * - Accumulator - accumulator * - Aggregator - aggregator * - Arithmetic - arithmetic * - Rate of change - rate\_of\_change * - Unit conversion - unit\_conversion * - Delta - delta The publishers section contains the list of publishers, where the samples data should be sent after the possible transformations. Similarly, the event pipeline definition looks like: .. code-block:: yaml --- sources: - name: 'source name' events: - 'event filter' sinks: - 'sink name' sinks: - name: 'sink name' publishers: - 'list of publishers' The event filter uses the same filtering logic as the meter pipeline. .. _telemetry-transformers: Transformers ------------ .. note:: Transformers maintain data in memory and therefore do not guarantee durability in certain scenarios. A more durable and efficient solution may be achieved post-storage using solutions like Gnocchi. The definition of transformers can contain the following fields: name Name of the transformer. parameters Parameters of the transformer. The parameters section can contain transformer specific fields, like source and target fields with different subfields in case of the rate of change, which depends on the implementation of the transformer. The following are supported transformers: Rate of change transformer `````````````````````````` Transformer that computes the change in value between two data points in time. In the case of the transformer that creates the ``cpu_util`` meter, the definition looks like: .. code-block:: yaml transformers: - name: "rate_of_change" parameters: target: name: "cpu_util" unit: "%" type: "gauge" scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" The rate of change transformer generates the ``cpu_util`` meter from the sample values of the ``cpu`` counter, which represents cumulative CPU time in nanoseconds. The transformer definition above defines a scale factor (for nanoseconds and multiple CPUs), which is applied before the transformation derives a sequence of gauge samples with unit ``%``, from sequential values of the ``cpu`` meter. The definition for the disk I/O rate, which is also generated by the rate of change transformer: .. code-block:: yaml transformers: - name: "rate_of_change" parameters: source: map_from: name: "disk\\.(read|write)\\.(bytes|requests)" unit: "(B|request)" target: map_to: name: "disk.\\1.\\2.rate" unit: "\\1/s" type: "gauge" Unit conversion transformer ``````````````````````````` Transformer to apply a unit conversion. It takes the volume of the meter and multiplies it with the given ``scale`` expression. Also supports ``map_from`` and ``map_to`` like the rate of change transformer. Sample configuration: .. code-block:: yaml transformers: - name: "unit_conversion" parameters: target: name: "disk.kilobytes" unit: "KB" scale: "volume * 1.0 / 1024.0" With ``map_from`` and ``map_to``: .. code-block:: yaml transformers: - name: "unit_conversion" parameters: source: map_from: name: "disk\\.(read|write)\\.bytes" target: map_to: name: "disk.\\1.kilobytes" scale: "volume * 1.0 / 1024.0" unit: "KB" Aggregator transformer `````````````````````` A transformer that sums up the incoming samples until enough samples have come in or a timeout has been reached. Timeout can be specified with the ``retention_time`` option. If you want to flush the aggregation, after a set number of samples have been aggregated, specify the size parameter. The volume of the created sample is the sum of the volumes of samples that came into the transformer. Samples can be aggregated by the attributes ``project_id``, ``user_id`` and ``resource_metadata``. To aggregate by the chosen attributes, specify them in the configuration and set which value of the attribute to take for the new sample (first to take the first sample's attribute, last to take the last sample's attribute, and drop to discard the attribute). To aggregate 60s worth of samples by ``resource_metadata`` and keep the ``resource_metadata`` of the latest received sample: .. code-block:: yaml transformers: - name: "aggregator" parameters: retention_time: 60 resource_metadata: last To aggregate each 15 samples by ``user_id`` and ``resource_metadata`` and keep the ``user_id`` of the first received sample and drop the ``resource_metadata``: .. code-block:: yaml transformers: - name: "aggregator" parameters: size: 15 user_id: first resource_metadata: drop Accumulator transformer ``````````````````````` This transformer simply caches the samples until enough samples have arrived and then flushes them all down the pipeline at once: .. code-block:: yaml transformers: - name: "accumulator" parameters: size: 15 Multi meter arithmetic transformer `````````````````````````````````` This transformer enables us to perform arithmetic calculations over one or more meters and/or their metadata, for example:: memory_util = 100 * memory.usage / memory A new sample is created with the properties described in the ``target`` section of the transformer's configuration. The sample's volume is the result of the provided expression. The calculation is performed on samples from the same resource. .. note:: The calculation is limited to meters with the same interval. Example configuration: .. code-block:: yaml transformers: - name: "arithmetic" parameters: target: name: "memory_util" unit: "%" type: "gauge" expr: "100 * $(memory.usage) / $(memory)" To demonstrate the use of metadata, the following implementation of a novel meter shows average CPU time per core: .. code-block:: yaml transformers: - name: "arithmetic" parameters: target: name: "avg_cpu_per_core" unit: "ns" type: "cumulative" expr: "$(cpu) / ($(cpu).resource_metadata.cpu_number or 1)" .. note:: Expression evaluation gracefully handles NaNs and exceptions. In such a case it does not create a new sample but only logs a warning. Delta transformer ````````````````` This transformer calculates the change between two sample datapoints of a resource. It can be configured to capture only the positive growth deltas. Example configuration: .. code-block:: yaml transformers: - name: "delta" parameters: target: name: "cpu.delta" growth_only: True .. _publishing: Publishers ---------- The Telemetry service provides several transport methods to transfer the data collected to an external system. The consumers of this data are widely different, like monitoring systems, for which data loss is acceptable and billing systems, which require reliable data transportation. Telemetry provides methods to fulfill the requirements of both kind of systems. The publisher component makes it possible to save the data into persistent storage through the message bus or to send it to one or more external consumers. One chain can contain multiple publishers. To solve this problem, the multi-publisher can be configured for each data point within the Telemetry service, allowing the same technical meter or event to be published multiple times to multiple destinations, each potentially using a different transport. The following publisher types are supported: gnocchi (default) ````````````````` When the gnocchi publisher is enabled, measurement and resource information is pushed to gnocchi for time-series optimized storage. Gnocchi must be registered in the Identity service as Ceilometer discovers the exact path via the Identity service. More details on how to enable and configure gnocchi can be found on its `official documentation page `__. panko ````` Event data in Ceilometer can be stored in panko which provides an HTTP REST interface to query system events in OpenStack. To push data to panko, set the publisher to ``panko://``. notifier ```````` The notifier publisher can be specified in the form of ``notifier://?option1=value1&option2=value2``. It emits data over AMQP using oslo.messaging. Any consumer can then subscribe to the published topic for additional processing. The following customization options are available: ``per_meter_topic`` The value of this parameter is 1. It is used for publishing the samples on additional ``metering_topic.sample_name`` topic queue besides the default ``metering_topic`` queue. ``policy`` Used for configuring the behavior for the case, when the publisher fails to send the samples, where the possible predefined values are: default Used for waiting and blocking until the samples have been sent. drop Used for dropping the samples which are failed to be sent. queue Used for creating an in-memory queue and retrying to send the samples on the queue in the next samples publishing period (the queue length can be configured with ``max_queue_length``, where 1024 is the default value). ``topic`` The topic name of the queue to publish to. Setting this will override the default topic defined by ``metering_topic`` and ``event_topic`` options. This option can be used to support multiple consumers. udp ``` This publisher can be specified in the form of ``udp://:/``. It emits metering data over UDP. file ```` The file publisher can be specified in the form of ``file://path?option1=value1&option2=value2``. This publisher records metering data into a file. .. note:: If a file name and location is not specified, the ``file`` publisher does not log any meters, instead it logs a warning message in the configured log file for Telemetry. The following options are available for the ``file`` publisher: ``max_bytes`` When this option is greater than zero, it will cause a rollover. When the specified size is about to be exceeded, the file is closed and a new file is silently opened for output. If its value is zero, rollover never occurs. ``backup_count`` If this value is non-zero, an extension will be appended to the filename of the old log, as '.1', '.2', and so forth until the specified value is reached. The file that is written and contains the newest data is always the one that is specified without any extensions. http ```` The Telemetry service supports sending samples to an external HTTP target. The samples are sent without any modification. To set this option as the notification agents' target, set ``http://`` as a publisher endpoint in the pipeline definition files. The HTTP target should be set along with the publisher declaration. For example, additional configuration options can be passed in: ``http://localhost:80/?option1=value1&option2=value2`` The following options are availble: ``timeout`` The number of seconds before HTTP request times out. ``max_retries`` The number of times to retry a request before failing. ``batch`` If false, the publisher will send each sample and event individually, whether or not the notification agent is configured to process in batches. ``verify_ssl`` If false, the ssl certificate verification is disabled. The default publisher is ``gnocchi``, without any additional options specified. A sample ``publishers`` section in the ``/etc/ceilometer/pipeline.yaml`` looks like the following: .. code-block:: yaml publishers: - gnocchi:// - panko:// - udp://10.0.0.2:1234 - notifier://?policy=drop&max_queue_length=512&topic=custom_target Pipeline Partitioning ~~~~~~~~~~~~~~~~~~~~~ .. note:: Partitioning is only required if pipelines contain transformations. It has secondary benefit of supporting batching in certain publishers. On large workloads, multiple notification agents can be deployed to handle the flood of incoming messages from monitored services. If transformations are enabled in the pipeline, the notification agents must be coordinated to ensure related messages are routed to the same agent. To enable coordination, set the ``workload_partitioning`` value in ``notification`` section. To distribute messages across agents, ``pipeline_processing_queues`` option should be set. This value defines how many pipeline queues to create which will then be distributed to the active notification agents. It is recommended that the number of processing queues, at the very least, match the number of agents. Increasing the number of processing queues will improve the distribution of messages across the agents. It will also help batching which minimises the requests to Gnocchi storage backend. It will also increase the load the on message queue as it uses the queue to shard data. .. warning:: Decreasing the number of processing queues may result in lost data as any previously created queues may no longer be assigned to active agents. It is only recommended that you **increase** processing queues. ceilometer-10.0.0/doc/source/admin/telemetry-data-collection.rst0000666000175100017510000002776113236733243024732 0ustar zuulzuul00000000000000.. _telemetry-data-collection: =============== Data collection =============== The main responsibility of Telemetry in OpenStack is to collect information about the system that can be used by billing systems or interpreted by analytic tooling. Collected data can be stored in the form of samples or events in the supported databases, which are listed in :ref:`telemetry-supported-databases`. The available data collection mechanisms are: Notifications Processing notifications from other OpenStack services, by consuming messages from the configured message queue system. Polling Retrieve information directly from the hypervisor or from the host machine using SNMP, or by using the APIs of other OpenStack services. Notifications ============= All OpenStack services send notifications about the executed operations or system state. Several notifications carry information that can be metered. For example, CPU time of a VM instance created by OpenStack Compute service. The notification agent is responsible for consuming notifications. This component is responsible for consuming from the message bus and transforming notifications into events and measurement samples. By default, the notification agent is configured to build both events and samples. To enable selective data models, set the required pipelines using `pipelines` option under the `[notification]` section. Additionally, the notification agent is responsible for all data processing such as transformations and publishing. After processing, the data is sent to any supported publisher target such as gnocchi or panko. These services persist the data in configured databases. The different OpenStack services emit several notifications about the various types of events that happen in the system during normal operation. Not all these notifications are consumed by the Telemetry service, as the intention is only to capture the billable events and notifications that can be used for monitoring or profiling purposes. The notifications handled are contained under the `ceilometer.sample.endpoint` namespace. .. note:: Some services require additional configuration to emit the notifications. Please see the :ref:`install_controller` for more details. .. _meter_definitions: Meter definitions ----------------- The Telemetry service collects a subset of the meters by filtering notifications emitted by other OpenStack services. You can find the meter definitions in a separate configuration file, called ``ceilometer/data/meters.d/meters.yaml``. This enables operators/administrators to add new meters to Telemetry project by updating the ``meters.yaml`` file without any need for additional code changes. .. note:: The ``meters.yaml`` file should be modified with care. Unless intended, do not remove any existing meter definitions from the file. Also, the collected meters can differ in some cases from what is referenced in the documentation. It also support loading multiple meter definition files and allow users to add their own meter definitions into several files according to different types of metrics under the directory of ``/etc/ceilometer/meters.d``. A standard meter definition looks like: .. code-block:: yaml --- metric: - name: 'meter name' event_type: 'event name' type: 'type of meter eg: gauge, cumulative or delta' unit: 'name of unit eg: MB' volume: 'path to a measurable value eg: $.payload.size' resource_id: 'path to resource id eg: $.payload.id' project_id: 'path to project id eg: $.payload.owner' metadata: 'addiitonal key-value data describing resource' The definition above shows a simple meter definition with some fields, from which ``name``, ``event_type``, ``type``, ``unit``, and ``volume`` are required. If there is a match on the event type, samples are generated for the meter. The ``meters.yaml`` file contains the sample definitions for all the meters that Telemetry is collecting from notifications. The value of each field is specified by using JSON path in order to find the right value from the notification message. In order to be able to specify the right field you need to be aware of the format of the consumed notification. The values that need to be searched in the notification message are set with a JSON path starting with ``$.`` For instance, if you need the ``size`` information from the payload you can define it like ``$.payload.size``. A notification message may contain multiple meters. You can use ``*`` in the meter definition to capture all the meters and generate samples respectively. You can use wild cards as shown in the following example: .. code-block:: yaml --- metric: - name: $.payload.measurements.[*].metric.[*].name event_type: 'event_name.*' type: 'delta' unit: $.payload.measurements.[*].metric.[*].unit volume: payload.measurements.[*].result resource_id: $.payload.target user_id: $.payload.initiator.id project_id: $.payload.initiator.project_id In the above example, the ``name`` field is a JSON path with matching a list of meter names defined in the notification message. You can use complex operations on JSON paths. In the following example, ``volume`` and ``resource_id`` fields perform an arithmetic and string concatenation: .. code-block:: yaml --- metric: - name: 'compute.node.cpu.idle.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: payload.metrics[?(@.name='cpu.idle.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename You can use the ``timedelta`` plug-in to evaluate the difference in seconds between two ``datetime`` fields from one notification. .. code-block:: yaml --- metric: - name: 'compute.instance.booting.time' event_type: 'compute.instance.create.end' type: 'gauge' unit: 'sec' volume: fields: [$.payload.created_at, $.payload.launched_at] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.instance_id .. _Polling-Configuration: Polling ======= The Telemetry service is intended to store a complex picture of the infrastructure. This goal requires additional information than what is provided by the events and notifications published by each service. Some information is not emitted directly, like resource usage of the VM instances. Therefore Telemetry uses another method to gather this data by polling the infrastructure including the APIs of the different OpenStack services and other assets, like hypervisors. The latter case requires closer interaction with the compute hosts. To solve this issue, Telemetry uses an agent based architecture to fulfill the requirements against the data collection. Configuration ------------- Polling rules are defined by the `polling.yaml` file. It defines the pollsters to enable and the interval they should be polled. Each source configuration encapsulates meter name matching which matches against the entry point of pollster. It also includes: polling interval determination, optional resource enumeration or discovery. All samples generated by polling are placed on the queue to be handled by the pipeline configuration loaded in the notification agent. The polling definition may look like the following:: --- sources: - name: 'source name' interval: 'how often the samples should be generated' meters: - 'meter filter' resources: - 'list of resource URLs' discovery: - 'list of discoverers' The *interval* parameter in the sources section defines the cadence of sample generation in seconds. Polling plugins are invoked according to each source's section whose *meters* parameter matches the plugin's meter name. Its matching logic functions the same as pipeline filtering. The optional *resources* section of a polling source allows a list of static resource URLs to be configured. An amalgamated list of all statically defined resources are passed to individual pollsters for polling. The optional *discovery* section of a polling source contains the list of discoverers. These discoverers can be used to dynamically discover the resources to be polled by the pollsters. If both *resources* and *discovery* are set, the final resources passed to the pollsters will be the combination of the dynamic resources returned by the discoverers and the static resources defined in the *resources* section. Agents ------ There are three types of agents supporting the polling mechanism, the ``compute agent``, the ``central agent``, and the ``IPMI agent``. Under the hood, all the types of polling agents are the same ``ceilometer-polling`` agent, except that they load different polling plug-ins (pollsters) from different namespaces to gather data. The following subsections give further information regarding the architectural and configuration details of these components. Running :command:`ceilometer-agent-compute` is exactly the same as: .. code-block:: console $ ceilometer-polling --polling-namespaces compute Running :command:`ceilometer-agent-central` is exactly the same as: .. code-block:: console $ ceilometer-polling --polling-namespaces central Running :command:`ceilometer-agent-ipmi` is exactly the same as: .. code-block:: console $ ceilometer-polling --polling-namespaces ipmi Compute agent ~~~~~~~~~~~~~ This agent is responsible for collecting resource usage data of VM instances on individual compute nodes within an OpenStack deployment. This mechanism requires a closer interaction with the hypervisor, therefore a separate agent type fulfills the collection of the related meters, which is placed on the host machines to retrieve this information locally. A Compute agent instance has to be installed on each and every compute node, installation instructions can be found in the :ref:`install_compute` section in the Installation Tutorials and Guides. The list of supported hypervisors can be found in :ref:`telemetry-supported-hypervisors`. The Compute agent uses the API of the hypervisor installed on the compute hosts. Therefore, the supported meters may be different in case of each virtualization back end, as each inspection tool provides a different set of meters. The list of collected meters can be found in :ref:`telemetry-compute-meters`. The support column provides the information about which meter is available for each hypervisor supported by the Telemetry service. Central agent ~~~~~~~~~~~~~ This agent is responsible for polling public REST APIs to retrieve additional information on OpenStack resources not already surfaced via notifications, and also for polling hardware resources over SNMP. Some of the services polled with this agent are: - OpenStack Networking - OpenStack Object Storage - OpenStack Block Storage - Hardware resources via SNMP To install and configure this service use the :ref:`install_rdo` section in the Installation Tutorials and Guides. .. _telemetry-ipmi-agent: IPMI agent ~~~~~~~~~~ This agent is responsible for collecting IPMI sensor data and Intel Node Manager data on individual compute nodes within an OpenStack deployment. This agent requires an IPMI capable node with the ipmitool utility installed, which is commonly used for IPMI control on various Linux distributions. An IPMI agent instance could be installed on each and every compute node with IPMI support, except when the node is managed by the Bare metal service and the ``conductor.send_sensor_data`` option is set to ``true`` in the Bare metal service. It is no harm to install this agent on a compute node without IPMI or Intel Node Manager support, as the agent checks for the hardware and if none is available, returns empty data. It is suggested that you install the IPMI agent only on an IPMI capable node for performance reasons. The list of collected meters can be found in :ref:`telemetry-bare-metal-service`. .. note:: Do not deploy both the IPMI agent and the Bare metal service on one compute node. If ``conductor.send_sensor_data`` is set, this misconfiguration causes duplicated IPMI sensor samples. ceilometer-10.0.0/doc/source/admin/telemetry-best-practices.rst0000666000175100017510000000274513236733243024573 0ustar zuulzuul00000000000000Telemetry best practices ~~~~~~~~~~~~~~~~~~~~~~~~ The following are some suggested best practices to follow when deploying and configuring the Telemetry service. Data collection --------------- #. The Telemetry service collects a continuously growing set of data. Not all the data will be relevant for an administrator to monitor. - Based on your needs, you can edit the ``polling.yaml`` and ``pipeline.yaml`` configuration files to include select meters to generate or process - By default, Telemetry service polls the service APIs every 10 minutes. You can change the polling interval on a per meter basis by editing the ``polling.yaml`` configuration file. .. warning:: If the polling interval is too short, it will likely increase the stress on the service APIs. #. If polling many resources or at a high frequency, you can add additional central and compute agents as necessary. The agents are designed to scale horizontally. For more information refer to the `high availability guide `_. #. `workload_partitioning` of notification agents is only required if the pipeline configuration leverages transformers. It may also be enabled if batching is required to minimize load on the defined publisher targets. If transformers are not enabled, multiple agents may still be deployed without `workload_partitioning` and processing will be done greedily. ceilometer-10.0.0/doc/source/admin/telemetry-troubleshooting-guide.rst0000666000175100017510000000155113236733243026177 0ustar zuulzuul00000000000000Troubleshoot Telemetry ~~~~~~~~~~~~~~~~~~~~~~ Logging in Telemetry -------------------- The Telemetry service has similar log settings as the other OpenStack services. Multiple options are available to change the target of logging, the format of the log entries and the log levels. The log settings can be changed in ``ceilometer.conf``. The list of configuration options are listed in the logging configuration options table in the `Telemetry section `__ in the OpenStack Configuration Reference. By default ``stderr`` is used as standard output for the log messages. It can be changed to either a log file or syslog. The ``debug`` and ``verbose`` options are also set to false in the default settings, the default log levels of the corresponding modules can be found in the table referred above. ceilometer-10.0.0/doc/source/admin/index.rst0000666000175100017510000000075313236733243020757 0ustar zuulzuul00000000000000.. _admin: ===================== Administrator Guide ===================== Overview ======== .. toctree:: :maxdepth: 2 telemetry-system-architecture Configuration ============= .. toctree:: :maxdepth: 2 telemetry-data-collection telemetry-data-pipelines telemetry-best-practices Data Types ========== .. toctree:: :maxdepth: 2 telemetry-measurements telemetry-events Management ========== .. toctree:: :maxdepth: 2 telemetry-troubleshooting-guide ceilometer-10.0.0/doc/source/admin/telemetry-measurements.rst0000666000175100017510000021721613236733243024374 0ustar zuulzuul00000000000000.. _telemetry-measurements: ============ Measurements ============ The Telemetry service collects meters within an OpenStack deployment. This section provides a brief summary about meters format and origin and also contains the list of available meters. Telemetry collects meters by polling the infrastructure elements and also by consuming the notifications emitted by other OpenStack services. For more information about the polling mechanism and notifications see :ref:`telemetry-data-collection`. There are several meters which are collected by polling and by consuming. The origin for each meter is listed in the tables below. .. note:: You may need to configure Telemetry or other OpenStack services in order to be able to collect all the samples you need. For further information about configuration requirements see the `Telemetry chapter `__ in the Installation Tutorials and Guides. Also check the `Telemetry manual installation `__ description. Telemetry uses the following meter types: +--------------+--------------------------------------------------------------+ | Type | Description | +==============+==============================================================+ | Cumulative | Increasing over time (instance hours) | +--------------+--------------------------------------------------------------+ | Delta | Changing over time (bandwidth) | +--------------+--------------------------------------------------------------+ | Gauge | Discrete items (floating IPs, image uploads) and fluctuating | | | values (disk I/O) | +--------------+--------------------------------------------------------------+ | Telemetry provides the possibility to store metadata for samples. This metadata can be extended for OpenStack Compute and OpenStack Object Storage. In order to add additional metadata information to OpenStack Compute you have two options to choose from. The first one is to specify them when you boot up a new instance. The additional information will be stored with the sample in the form of ``resource_metadata.user_metadata.*``. The new field should be defined by using the prefix ``metering.``. The modified boot command look like the following: .. code-block:: console $ openstack server create --property metering.custom_metadata=a_value my_vm The other option is to set the ``reserved_metadata_keys`` to the list of metadata keys that you would like to be included in ``resource_metadata`` of the instance related samples that are collected for OpenStack Compute. This option is included in the ``DEFAULT`` section of the ``ceilometer.conf`` configuration file. You might also specify headers whose values will be stored along with the sample data of OpenStack Object Storage. The additional information is also stored under ``resource_metadata``. The format of the new field is ``resource_metadata.http_header_$name``, where ``$name`` is the name of the header with ``-`` replaced by ``_``. For specifying the new header, you need to set ``metadata_headers`` option under the ``[filter:ceilometer]`` section in ``proxy-server.conf`` under the ``swift`` folder. You can use this additional data for instance to distinguish external and internal users. Measurements are grouped by services which are polled by Telemetry or emit notifications that this service consumes. .. _telemetry-compute-meters: OpenStack Compute ~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Compute. +-----------+-------+------+----------+----------+---------+------------------+ | Name | Type | Unit | Resource | Origin | Support | Note | +===========+=======+======+==========+==========+=========+==================+ | **Meters added in the Mitaka release or earlier** | +-----------+-------+------+----------+----------+---------+------------------+ | memory | Gauge | MB | instance | Notific\ | Libvirt,| Volume of RAM | | | | | ID | ation | Hyper-V | allocated to the | | | | | | | | instance | +-----------+-------+------+----------+----------+---------+------------------+ | memory.\ | Gauge | MB | instance | Pollster | Libvirt,| Volume of RAM | | usage | | | ID | | Hyper-V,| used by the inst\| | | | | | | vSphere,| ance from the | | | | | | | XenAPI | amount of its | | | | | | | | allocated memory | +-----------+-------+------+----------+----------+---------+------------------+ | memory.r\ | Gauge | MB | instance | Pollster | Libvirt | Volume of RAM u\ | | esident | | | ID | | | sed by the inst\ | | | | | | | | ance on the phy\ | | | | | | | | sical machine | +-----------+-------+------+----------+----------+---------+------------------+ | cpu | Cumu\ | ns | instance | Pollster | Libvirt,| CPU time used | | | lative| | ID | | Hyper-V | | +-----------+-------+------+----------+----------+---------+------------------+ | cpu.delta | Delta | ns | instance | Pollster | Libvirt,| CPU time used s\ | | | | | ID | | Hyper-V | ince previous d\ | | | | | | | | atapoint | +-----------+-------+------+----------+----------+---------+------------------+ | cpu_util | Gauge | % | instance | Pollster | LibVirt,| Average CPU | | | | | ID | | vSphere,| utilization | | | | | | | XenAPI | | +-----------+-------+------+----------+----------+---------+------------------+ | vcpus | Gauge | vcpu | instance | Notific\ | Libvirt,| Number of virtual| | | | | ID | ation | Hyper-V | CPUs allocated to| | | | | | | | the instance | +-----------+-------+------+----------+----------+---------+------------------+ | disk.read\| Cumul\| req\ | instance | Pollster | Libvirt,| Number of read | | .requests | ative | uest | ID | | Hyper-V | requests | +-----------+-------+------+----------+----------+---------+------------------+ | disk.read\| Gauge | requ\| instance | Pollster | Libvirt,| Average rate of | | .requests\| | est/s| ID | | Hyper-V,| read requests | | .rate | | | | | vSphere | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.writ\| Cumul\| req\ | instance | Pollster | Libvirt,| Number of write | | e.requests| ative | uest | ID | | Hyper-V | requests | +-----------+-------+------+----------+----------+---------+------------------+ | disk.writ\| Gauge | requ\| instance | Pollster | Libvirt,| Average rate of | | e.request\| | est/s| ID | | Hyper-V,| write requests | | s.rate | | | | | vSphere | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.read\| Cumu\ | B | instance | Pollster | Libvirt,| Volume of reads | | .bytes | lative| | ID | | Hyper-V | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.read\| Gauge | B/s | instance | Pollster | Libvirt,| Average rate of | | .bytes.\ | | | ID | | Hyper-V,| reads | | rate | | | | | vSphere,| | | | | | | | XenAPI | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.writ\| Cumu\ | B | instance | Pollster | Libvirt,| Volume of writes | | e.bytes | lative| | ID | | Hyper-V | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.writ\| Gauge | B/s | instance | Pollster | Libvirt,| Average rate of | | e.bytes.\ | | | ID | | Hyper-V,| writes | | rate | | | | | vSphere,| | | | | | | | XenAPI | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Cumu\ | req\ | disk ID | Pollster | Libvirt,| Number of read | | ice.read\ | lative| uest | | | Hyper-V | requests | | .requests | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | requ\| disk ID | Pollster | Libvirt,| Average rate of | | ice.read\ | | est/s| | | Hyper-V,| read requests | | .requests\| | | | | vSphere | | | .rate | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Cumu\ | req\ | disk ID | Pollster | Libvirt,| Number of write | | ice.write\| lative| uest | | | Hyper-V | requests | | .requests | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | requ\| disk ID | Pollster | Libvirt,| Average rate of | | ice.write\| | est/s| | | Hyper-V,| write requests | | .requests\| | | | | vSphere | | | .rate | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Cumu\ | B | disk ID | Pollster | Libvirt,| Volume of reads | | ice.read\ | lative| | | | Hyper-V | | | .bytes | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | B/s | disk ID | Pollster | Libvirt,| Average rate of | | ice.read\ | | | | | Hyper-V,| reads | | .bytes | | | | | vSphere | | | .rate | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Cumu\ | B | disk ID | Pollster | Libvirt,| Volume of writes | | ice.write\| lative| | | | Hyper-V | | | .bytes | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | B/s | disk ID | Pollster | Libvirt,| Average rate of | | ice.write\| | | | | Hyper-V,| writes | | .bytes | | | | | vSphere | | | .rate | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.root\| Gauge | GB | instance | Notific\ | Libvirt,| Size of root disk| | .size | | | ID | ation | Hyper-V | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.ephe\| Gauge | GB | instance | Notific\ | Libvirt,| Size of ephemeral| | meral.size| | | ID | ation | Hyper-V | disk | +-----------+-------+------+----------+----------+---------+------------------+ | disk.lat\ | Gauge | ms | instance | Pollster | Hyper-V | Average disk la\ | | ency | | | ID | | | tency | +-----------+-------+------+----------+----------+---------+------------------+ | disk.iop\ | Gauge | coun\| instance | Pollster | Hyper-V | Average disk io\ | | s | | t/s | ID | | | ps | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | ms | disk ID | Pollster | Hyper-V | Average disk la\ | | ice.late\ | | | | | | tency per device | | ncy | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | coun\| disk ID | Pollster | Hyper-V | Average disk io\ | | ice.iops | | t/s | | | | ps per device | +-----------+-------+------+----------+----------+---------+------------------+ | disk.cap\ | Gauge | B | instance | Pollster | Libvirt | The amount of d\ | | acity | | | ID | | | isk that the in\ | | | | | | | | stance can see | +-----------+-------+------+----------+----------+---------+------------------+ | disk.all\ | Gauge | B | instance | Pollster | Libvirt | The amount of d\ | | ocation | | | ID | | | isk occupied by | | | | | | | | the instance o\ | | | | | | | | n the host mach\ | | | | | | | | ine | +-----------+-------+------+----------+----------+---------+------------------+ | disk.usa\ | Gauge | B | instance | Pollster | Libvirt | The physical si\ | | ge | | | ID | | | ze in bytes of | | | | | | | | the image conta\ | | | | | | | | iner on the host | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | B | disk ID | Pollster | Libvirt | The amount of d\ | | ice.capa\ | | | | | | isk per device | | city | | | | | | that the instan\ | | | | | | | | ce can see | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | B | disk ID | Pollster | Libvirt | The amount of d\ | | ice.allo\ | | | | | | isk per device | | cation | | | | | | occupied by the | | | | | | | | instance on th\ | | | | | | | | e host machine | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | B | disk ID | Pollster | Libvirt | The physical si\ | | ice.usag\ | | | | | | ze in bytes of | | e | | | | | | the image conta\ | | | | | | | | iner on the hos\ | | | | | | | | t per device | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumu\ | B | interface| Pollster | Libvirt,| Number of | | incoming.\| lative| | ID | | Hyper-V | incoming bytes | | bytes | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Gauge | B/s | interface| Pollster | Libvirt,| Average rate of | | incoming.\| | | ID | | Hyper-V,| incoming bytes | | bytes.rate| | | | | vSphere,| | | | | | | | XenAPI | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumu\ | B | interface| Pollster | Libvirt,| Number of | | outgoing\ | lative| | ID | | Hyper-V | outgoing bytes | | .bytes | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Gauge | B/s | interface| Pollster | Libvirt,| Average rate of | | outgoing.\| | | ID | | Hyper-V,| outgoing bytes | | bytes.rate| | | | | vSphere,| | | | | | | | XenAPI | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumu\ | pac\ | interface| Pollster | Libvirt,| Number of | | incoming\ | lative| ket | ID | | Hyper-V | incoming packets | | .packets | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Gauge | pack\| interface| Pollster | Libvirt,| Average rate of | | incoming\ | | et/s | ID | | Hyper-V,| incoming packets | | .packets\ | | | | | vSphere,| | | .rate | | | | | XenAPI | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumu\ | pac\ | interface| Pollster | Libvirt,| Number of | | outgoing\ | lative| ket | ID | | Hyper-V | outgoing packets | | .packets | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Gauge | pac\ | interface| Pollster | Libvirt,| Average rate of | | outgoing\ | | ket/s| ID | | Hyper-V,| outgoing packets | | .packets\ | | | | | vSphere,| | | .rate | | | | | XenAPI | | +-----------+-------+------+----------+----------+---------+------------------+ | **Meters added in the Newton release** | +-----------+-------+------+----------+----------+---------+------------------+ | cpu_l3_c\ | Gauge | B | instance | Pollster | Libvirt | L3 cache used b\ | | ache | | | ID | | | y the instance | +-----------+-------+------+----------+----------+---------+------------------+ | memory.b\ | Gauge | B/s | instance | Pollster | Libvirt | Total system ba\ | | andwidth\ | | | ID | | | ndwidth from on\ | | .total | | | | | | e level of cache | +-----------+-------+------+----------+----------+---------+------------------+ | memory.b\ | Gauge | B/s | instance | Pollster | Libvirt | Bandwidth of me\ | | andwidth\ | | | ID | | | mory traffic fo\ | | .local | | | | | | r a memory cont\ | | | | | | | | roller | +-----------+-------+------+----------+----------+---------+------------------+ | perf.cpu\ | Gauge | cyc\ | instance | Pollster | Libvirt | the number of c\ | | .cycles | | le | ID | | | pu cycles one i\ | | | | | | | | nstruction needs | +-----------+-------+------+----------+----------+---------+------------------+ | perf.ins\ | Gauge | inst\| instance | Pollster | Libvirt | the count of in\ | | tructions | | ruct\| ID | | | structions | | | | ion | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | perf.cac\ | Gauge | cou\ | instance | Pollster | Libvirt | the count of ca\ | | he.refer\ | | nt | ID | | | che hits | | ences | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | perf.cac\ | Gauge | cou\ | instance | Pollster | Libvirt | the count of ca\ | | he.misses | | nt | ID | | | che misses | +-----------+-------+------+----------+----------+---------+------------------+ | **Meters added in the Ocata release** | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | | incoming\ | ative | et | ID | | | incoming dropped | | .packets\ | | | | | | packets | | .drop | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | | outgoing\ | ative | et | ID | | | outgoing dropped | | .packets\ | | | | | | packets | | .drop | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | | incoming\ | ative | et | ID | | | incoming error | | .packets\ | | | | | | packets | | .error | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | | outgoing\ | ative | et | ID | | | outgoing error | | .packets\ | | | | | | packets | | .error | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | **Meters added in the Pike release** | +-----------+-------+------+----------+----------+---------+------------------+ | memory.\ | Cumul\| | | | | | | | ative | MB | instance | Pollster | Libvirt | Memory swap in | | swap.in | | | ID | | | | +-----------+-------+------+----------+----------+---------+------------------+ | memory.\ | Cumul\| | | | | | | | ative | MB | instance | Pollster | Libvirt | Memory swap out | | swap.out | | | ID | | | | +-----------+-------+------+----------+----------+---------+------------------+ .. note:: To enable the libvirt ``memory.usage`` support, you need to install libvirt version 1.1.1+, QEMU version 1.5+, and you also need to prepare suitable balloon driver in the image. It is applicable particularly for Windows guests, most modern Linux distributions already have it built in. Telemetry is not able to fetch the ``memory.usage`` samples without the image balloon driver. .. note:: To enable libvirt ``disk.*`` support when running on RBD-backed shared storage, you need to install libvirt version 1.2.16+. The Telemetry service supports creating new meters by using transformers. For more details about transformers see :ref:`telemetry-transformers`. Among the meters gathered from libvirt and Hyper-V, there are a few which are derived from other meters. The list of meters that are created by using the ``rate_of_change`` transformer from the above table is the following: - cpu_util - cpu.delta - disk.read.requests.rate - disk.write.requests.rate - disk.read.bytes.rate - disk.write.bytes.rate - disk.device.read.requests.rate - disk.device.write.requests.rate - disk.device.read.bytes.rate - disk.device.write.bytes.rate - network.incoming.bytes.rate - network.outgoing.bytes.rate - network.incoming.packets.rate - network.outgoing.packets.rate .. note:: If storing data in Gnocchi v4.1+, derived rate_of_change metrics can be computed using Gnocchi rather than Ceilometer transformers. This will minimize Ceilometer memory requirements and avoid missing data when Ceilometer services restart. OpenStack Compute is capable of collecting ``CPU`` related meters from the compute host machines. In order to use that you need to set the ``compute_monitors`` option to ``cpu.virt_driver`` in the ``nova.conf`` configuration file. For further information see the Compute configuration section in the `Compute chapter `__ of the OpenStack Configuration Reference. The following host machine related meters are collected for OpenStack Compute: +---------------------+-------+------+----------+-------------+---------------+ | Name | Type | Unit | Resource | Origin | Note | +=====================+=======+======+==========+=============+===============+ | **Meters added in the Mitaka release or earlier** | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | MHz | host ID | Notification| CPU frequency | | frequency | | | | | | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU kernel | | kernel.time | lative| | | | time | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU idle time | | idle.time | lative| | | | | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU user mode | | user.time | lative| | | | time | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU I/O wait | | iowait.time | lative| | | | time | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU kernel | | kernel.percent | | | | | percentage | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU idle | | idle.percent | | | | | percentage | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU user mode | | user.percent | | | | | percentage | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU I/O wait | | iowait.percent | | | | | percentage | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU | | percent | | | | | utilization | +---------------------+-------+------+----------+-------------+---------------+ .. _telemetry-bare-metal-service: Bare metal service ~~~~~~~~~~~~~~~~~~ Telemetry captures notifications that are emitted by the Bare metal service. The source of the notifications are IPMI sensors that collect data from the host machine. .. note:: The sensor data is not available in the Bare metal service by default. To enable the meters and configure this module to emit notifications about the measured values see the `Installation Guide `__ for the Bare metal service. The following meters are recorded for the Bare metal service: +------------------+-------+------+----------+-------------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +==================+=======+======+==========+=============+==================+ | **Meters added in the Mitaka release or earlier** | +------------------+-------+------+----------+-------------+------------------+ | hardware.ipmi.fan| Gauge | RPM | fan | Notification| Fan rounds per | | | | | sensor | | minute (RPM) | +------------------+-------+------+----------+-------------+------------------+ | hardware.ipmi\ | Gauge | C | temper\ | Notification| Temperature read\| | .temperature | | | ature | | ing from sensor | | | | | sensor | | | +------------------+-------+------+----------+-------------+------------------+ | hardware.ipmi\ | Gauge | W | current | Notification| Current reading | | .current | | | sensor | | from sensor | +------------------+-------+------+----------+-------------+------------------+ | hardware.ipmi\ | Gauge | V | voltage | Notification| Voltage reading | | .voltage | | | sensor | | from sensor | +------------------+-------+------+----------+-------------+------------------+ IPMI based meters ~~~~~~~~~~~~~~~~~ Another way of gathering IPMI based data is to use IPMI sensors independently from the Bare metal service's components. The same meters as :ref:`telemetry-bare-metal-service` can be fetched except that origin is ``Pollster`` instead of ``Notification``. You need to deploy the ceilometer-agent-ipmi on each IPMI-capable node in order to poll local sensor data. For further information about the IPMI agent see :ref:`telemetry-ipmi-agent`. .. warning:: To avoid duplication of metering data and unnecessary load on the IPMI interface, do not deploy the IPMI agent on nodes that are managed by the Bare metal service and keep the ``conductor.send_sensor_data`` option set to ``False`` in the ``ironic.conf`` configuration file. Besides generic IPMI sensor data, the following Intel Node Manager meters are recorded from capable platform: +---------------------+-------+------+----------+----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +=====================+=======+======+==========+==========+==================+ | **Meters added in the Mitaka release or earlier** | +---------------------+-------+------+----------+----------+------------------+ | hardware.ipmi.node\ | Gauge | W | host ID | Pollster | Current power | | .power | | | | | of the system | +---------------------+-------+------+----------+----------+------------------+ | hardware.ipmi.node\ | Gauge | C | host ID | Pollster | Current tempera\ | | .temperature | | | | | ture of the | | | | | | | system | +---------------------+-------+------+----------+----------+------------------+ | hardware.ipmi.node\ | Gauge | C | host ID | Pollster | Inlet temperatu\ | | .inlet_temperature | | | | | re of the system | +---------------------+-------+------+----------+----------+------------------+ | hardware.ipmi.node\ | Gauge | C | host ID | Pollster | Outlet temperat\ | | .outlet_temperature | | | | | ure of the system| +---------------------+-------+------+----------+----------+------------------+ | hardware.ipmi.node\ | Gauge | CFM | host ID | Pollster | Volumetric airf\ | | .airflow | | | | | low of the syst\ | | | | | | | em, expressed as | | | | | | | 1/10th of CFM | +---------------------+-------+------+----------+----------+------------------+ | hardware.ipmi.node\ | Gauge | CUPS | host ID | Pollster | CUPS(Compute Us\ | | .cups | | | | | age Per Second) | | | | | | | index data of the| | | | | | | system | +---------------------+-------+------+----------+----------+------------------+ | hardware.ipmi.node\ | Gauge | % | host ID | Pollster | CPU CUPS utiliz\ | | .cpu_util | | | | | ation of the | | | | | | | system | +---------------------+-------+------+----------+----------+------------------+ | hardware.ipmi.node\ | Gauge | % | host ID | Pollster | Memory CUPS | | .mem_util | | | | | utilization of | | | | | | | the system | +---------------------+-------+------+----------+----------+------------------+ | hardware.ipmi.node\ | Gauge | % | host ID | Pollster | IO CUPS | | .io_util | | | | | utilization of | | | | | | | the system | +---------------------+-------+------+----------+----------+------------------+ SNMP based meters ~~~~~~~~~~~~~~~~~ Telemetry supports gathering SNMP based generic host meters. In order to be able to collect this data you need to run snmpd on each target host. The following meters are available about the host machines by using SNMP: +---------------------+-------+------+----------+----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +=====================+=======+======+==========+==========+==================+ | **Meters added in the Mitaka release or earlier** | +---------------------+-------+------+----------+----------+------------------+ | hardware.cpu.load.\ | Gauge | proc\| host ID | Pollster | CPU load in the | | 1min | | ess | | | past 1 minute | +---------------------+-------+------+----------+----------+------------------+ | hardware.cpu.load.\ | Gauge | proc\| host ID | Pollster | CPU load in the | | 5min | | ess | | | past 5 minutes | +---------------------+-------+------+----------+----------+------------------+ | hardware.cpu.load.\ | Gauge | proc\| host ID | Pollster | CPU load in the | | 15min | | ess | | | past 15 minutes | +---------------------+-------+------+----------+----------+------------------+ | hardware.cpu.util | Gauge | % | host ID | Pollster | cpu usage | | | | | | | percentage | +---------------------+-------+------+----------+----------+------------------+ | hardware.disk.size\ | Gauge | KB | disk ID | Pollster | Total disk size | | .total | | | | | | +---------------------+-------+------+----------+----------+------------------+ | hardware.disk.size\ | Gauge | KB | disk ID | Pollster | Used disk size | | .used | | | | | | +---------------------+-------+------+----------+----------+------------------+ | hardware.memory.to\ | Gauge | KB | host ID | Pollster | Total physical | | tal | | | | | memory size | +---------------------+-------+------+----------+----------+------------------+ | hardware.memory.us\ | Gauge | KB | host ID | Pollster | Used physical m\ | | ed | | | | | emory size | +---------------------+-------+------+----------+----------+------------------+ | hardware.memory.bu\ | Gauge | KB | host ID | Pollster | Physical memory | | ffer | | | | | buffer size | +---------------------+-------+------+----------+----------+------------------+ | hardware.memory.ca\ | Gauge | KB | host ID | Pollster | Cached physical | | ched | | | | | memory size | +---------------------+-------+------+----------+----------+------------------+ | hardware.memory.sw\ | Gauge | KB | host ID | Pollster | Total swap space | | ap.total | | | | | size | +---------------------+-------+------+----------+----------+------------------+ | hardware.memory.sw\ | Gauge | KB | host ID | Pollster | Available swap | | ap.avail | | | | | space size | +---------------------+-------+------+----------+----------+------------------+ | hardware.network.i\ | Cumul\| B | interface| Pollster | Bytes received | | ncoming.bytes | ative | | ID | | by network inte\ | | | | | | | rface | +---------------------+-------+------+----------+----------+------------------+ | hardware.network.o\ | Cumul\| B | interface| Pollster | Bytes sent by n\ | | utgoing.bytes | ative | | ID | | etwork interface | +---------------------+-------+------+----------+----------+------------------+ | hardware.network.o\ | Cumul\| pack\| interface| Pollster | Sending error o\ | | utgoing.errors | ative | et | ID | | f network inter\ | | | | | | | face | +---------------------+-------+------+----------+----------+------------------+ | hardware.network.i\ | Cumul\| data\| host ID | Pollster | Number of recei\ | | p.incoming.datagra\ | ative | grams| | | ved datagrams | | ms | | | | | | +---------------------+-------+------+----------+----------+------------------+ | hardware.network.i\ | Cumul\| data\| host ID | Pollster | Number of sent | | p.outgoing.datagra\ | ative | grams| | | datagrams | | ms | | | | | | +---------------------+-------+------+----------+----------+------------------+ | hardware.system_st\ | Cumul\| bloc\| host ID | Pollster | Aggregated numb\ | | ats.io.incoming.bl\ | ative | ks | | | er of blocks re\ | | ocks | | | | | ceived to block | | | | | | | device | +---------------------+-------+------+----------+----------+------------------+ | hardware.system_st\ | Cumul\| bloc\| host ID | Pollster | Aggregated numb\ | | ats.io.outgoing.bl\ | ative | ks | | | er of blocks se\ | | ocks | | | | | nt to block dev\ | | | | | | | ice | +---------------------+-------+------+----------+----------+------------------+ | hardware.system_st\ | Gauge | % | host ID | Pollster | CPU idle percen\ | | ats.cpu.idle | | | | | tage | +---------------------+-------+------+----------+----------+------------------+ | **Meters added in the Queens release** | +---------------------+-------+------+----------+----------+------------------+ | hardware.disk.read.\| Gauge | B | disk ID | Pollster | Bytes read from | | bytes | | | | | device since boot| +---------------------+-------+------+----------+----------+------------------+ | hardware.disk.write\| Gauge | B | disk ID | Pollster | Bytes written to | | .bytes | | | | | device since boot| +---------------------+-------+------+----------+----------+------------------+ | hardware.disk.read.\| Gauge | requ\| disk ID | Pollster | Read requests to | | requests | | ests | | | device since boot| +---------------------+-------+------+----------+----------+------------------+ | hardware.disk.write\| Gauge | requ\| disk ID | Pollster | Write requests to| | .requests | | ests | | | device since boot| +---------------------+-------+------+----------+----------+------------------+ OpenStack Image service ~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Image service: +--------------------+--------+------+----------+----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +====================+========+======+==========+==========+==================+ | **Meters added in the Mitaka release or earlier** | +--------------------+--------+------+----------+----------+------------------+ | image.size | Gauge | B | image ID | Notifica\| Size of the upl\ | | | | | | tion, Po\| oaded image | | | | | | llster | | +--------------------+--------+------+----------+----------+------------------+ | image.download | Delta | B | image ID | Notifica\| Image is downlo\ | | | | | | tion | aded | +--------------------+--------+------+----------+----------+------------------+ | image.serve | Delta | B | image ID | Notifica\| Image is served | | | | | | tion | out | +--------------------+--------+------+----------+----------+------------------+ OpenStack Block Storage ~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Block Storage: +--------------------+-------+--------+----------+----------+-----------------+ | Name | Type | Unit | Resource | Origin | Note | +====================+=======+========+==========+==========+=================+ | **Meters added in the Mitaka release or earlier** | +--------------------+-------+--------+----------+----------+-----------------+ | volume.size | Gauge | GB | volume ID| Notifica\| Size of the vol\| | | | | | tion | ume | +--------------------+-------+--------+----------+----------+-----------------+ | snapshot.size | Gauge | GB | snapshot | Notifica\| Size of the sna\| | | | | ID | tion | pshot | +--------------------+-------+--------+----------+----------+-----------------+ | **Meters added in the Queens release** | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.ca\| Gauge | GB | hostname | Notifica\| Total volume | | pacity.total | | | | tion | capacity on host| +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.ca\| Gauge | GB | hostname | Notifica\| Free volume | | pacity.free | | | | tion | capacity on host| +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.ca\| Gauge | GB | hostname | Notifica\| Assigned volume | | pacity.allocated | | | | tion | capacity on host| | | | | | | by Cinder | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.ca\| Gauge | GB | hostname | Notifica\| Assigned volume | | pacity.provisioned | | | | tion | capacity on host| +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.ca\| Gauge | GB | hostname | Notifica\| Virtual free | | pacity.virtual_free| | | | tion | volume capacity | | | | | | | on host | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.po\| Gauge | GB | hostname\| Notifica\| Total volume | | ol.capacity.total | | | #pool | tion | capacity in pool| +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.po\| Gauge | GB | hostname\| Notifica\| Free volume | | ol.capacity.free | | | #pool | tion | capacity in pool| +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.po\| Gauge | GB | hostname\| Notifica\| Assigned volume | | ol.capacity.alloca\| | | #pool | tion | capacity in pool| | ted | | | | | by Cinder | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.po\| Gauge | GB | hostname\| Notifica\| Assigned volume | | ol.capacity.provis\| | | #pool | tion | capacity in pool| | ioned | | | | | | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.po\| Gauge | GB | hostname\| Notifica\| Virtual free | | ol.capacity.virtua\| | | #pool | tion | volume capacity | | l_free | | | | | in pool | +--------------------+-------+--------+----------+----------+-----------------+ OpenStack File Share ~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack File Share: +--------------------+-------+--------+----------+----------+-----------------+ | Name | Type | Unit | Resource | Origin | Note | +====================+=======+========+==========+==========+=================+ | **Meters added in the Pike release** | +--------------------+-------+--------+----------+----------+-----------------+ | manila.share.size | Gauge | GB | share ID | Notifica\| Size of the fil\| | | | | | tion | e share | +--------------------+-------+--------+----------+----------+-----------------+ .. _telemetry-object-storage-meter: OpenStack Object Storage ~~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Object Storage: +--------------------+-------+-------+------------+---------+-----------------+ | Name | Type | Unit | Resource | Origin | Note | +====================+=======+=======+============+=========+=================+ | **Meters added in the Mitaka release or earlier** | +--------------------+-------+-------+------------+---------+-----------------+ | storage.objects | Gauge | object| storage ID | Pollster| Number of objec\| | | | | | | ts | +--------------------+-------+-------+------------+---------+-----------------+ | storage.objects.si\| Gauge | B | storage ID | Pollster| Total size of s\| | ze | | | | | tored objects | +--------------------+-------+-------+------------+---------+-----------------+ | storage.objects.co\| Gauge | conta\| storage ID | Pollster| Number of conta\| | ntainers | | iner | | | iners | +--------------------+-------+-------+------------+---------+-----------------+ | storage.objects.in\| Delta | B | storage ID | Notific\| Number of incom\| | coming.bytes | | | | ation | ing bytes | +--------------------+-------+-------+------------+---------+-----------------+ | storage.objects.ou\| Delta | B | storage ID | Notific\| Number of outgo\| | tgoing.bytes | | | | ation | ing bytes | +--------------------+-------+-------+------------+---------+-----------------+ | storage.api.request| Delta | requ\ | storage ID | Notific\| Number of API r\| | | | est | | ation | equests against | | | | | | | OpenStack Obje\ | | | | | | | ct Storage | +--------------------+-------+-------+------------+---------+-----------------+ | storage.containers\| Gauge | object| storage ID\| Pollster| Number of objec\| | .objects | | | /container | | ts in container | +--------------------+-------+-------+------------+---------+-----------------+ | storage.containers\| Gauge | B | storage ID\| Pollster| Total size of s\| | .objects.size | | | /container | | tored objects i\| | | | | | | n container | +--------------------+-------+-------+------------+---------+-----------------+ Ceph Object Storage ~~~~~~~~~~~~~~~~~~~ In order to gather meters from Ceph, you have to install and configure the Ceph Object Gateway (radosgw) as it is described in the `Installation Manual `__. You also have to enable `usage logging `__ in order to get the related meters from Ceph. You will need an ``admin`` user with ``users``, ``buckets``, ``metadata`` and ``usage`` ``caps`` configured. In order to access Ceph from Telemetry, you need to specify a ``service group`` for ``radosgw`` in the ``ceilometer.conf`` configuration file along with ``access_key`` and ``secret_key`` of the ``admin`` user mentioned above. The following meters are collected for Ceph Object Storage: +------------------+------+--------+------------+----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +==================+======+========+============+==========+==================+ | **Meters added in the Mitaka release or earlier** | +------------------+------+--------+------------+----------+------------------+ | radosgw.objects | Gauge| object | storage ID | Pollster | Number of objects| +------------------+------+--------+------------+----------+------------------+ | radosgw.objects.\| Gauge| B | storage ID | Pollster | Total size of s\ | | size | | | | | tored objects | +------------------+------+--------+------------+----------+------------------+ | radosgw.objects.\| Gauge| contai\| storage ID | Pollster | Number of conta\ | | containers | | ner | | | iners | +------------------+------+--------+------------+----------+------------------+ | radosgw.api.requ\| Gauge| request| storage ID | Pollster | Number of API r\ | | est | | | | | equests against | | | | | | | Ceph Object Ga\ | | | | | | | teway (radosgw) | +------------------+------+--------+------------+----------+------------------+ | radosgw.containe\| Gauge| object | storage ID\| Pollster | Number of objec\ | | rs.objects | | | /container | | ts in container | +------------------+------+--------+------------+----------+------------------+ | radosgw.containe\| Gauge| B | storage ID\| Pollster | Total size of s\ | | rs.objects.size | | | /container | | tored objects in | | | | | | | container | +------------------+------+--------+------------+----------+------------------+ .. note:: The ``usage`` related information may not be updated right after an upload or download, because the Ceph Object Gateway needs time to update the usage properties. For instance, the default configuration needs approximately 30 minutes to generate the usage logs. OpenStack Identity ~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Identity: +-------------------+------+--------+-----------+-----------+-----------------+ | Name | Type | Unit | Resource | Origin | Note | +===================+======+========+===========+===========+=================+ | **Meters added in the Mitaka release or earlier** | +-------------------+------+--------+-----------+-----------+-----------------+ | identity.authent\ | Delta| user | user ID | Notifica\ | User successful\| | icate.success | | | | tion | ly authenticated| +-------------------+------+--------+-----------+-----------+-----------------+ | identity.authent\ | Delta| user | user ID | Notifica\ | User pending au\| | icate.pending | | | | tion | thentication | +-------------------+------+--------+-----------+-----------+-----------------+ | identity.authent\ | Delta| user | user ID | Notifica\ | User failed to | | icate.failure | | | | tion | authenticate | +-------------------+------+--------+-----------+-----------+-----------------+ OpenStack Networking ~~~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Networking: +-----------------+-------+--------+-----------+-----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +=================+=======+========+===========+===========+==================+ | **Meters added in the Mitaka release or earlier** | +-----------------+-------+--------+-----------+-----------+------------------+ | bandwidth | Delta | B | label ID | Notifica\ | Bytes through t\ | | | | | | tion | his l3 metering | | | | | | | label | +-----------------+-------+--------+-----------+-----------+------------------+ SDN controllers ~~~~~~~~~~~~~~~ The following meters are collected for SDN: +-----------------+---------+--------+-----------+----------+-----------------+ | Name | Type | Unit | Resource | Origin | Note | +=================+=========+========+===========+==========+=================+ | **Meters added in the Mitaka release or earlier** | +-----------------+---------+--------+-----------+----------+-----------------+ | switch | Gauge | switch | switch ID | Pollster | Existence of sw\| | | | | | | itch | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port | Gauge | port | switch ID | Pollster | Existence of po\| | | | | | | rt | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.re\ | Cumula\ | packet | switch ID | Pollster | Packets receive\| | ceive.packets | tive | | | | d on port | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.tr\ | Cumula\ | packet | switch ID | Pollster | Packets transmi\| | ansmit.packets | tive | | | | tted on port | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.re\ | Cumula\ | B | switch ID | Pollster | Bytes received | | ceive.bytes | tive | | | | on port | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.tr\ | Cumula\ | B | switch ID | Pollster | Bytes transmitt\| | ansmit.bytes | tive | | | | ed on port | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.re\ | Cumula\ | packet | switch ID | Pollster | Drops received | | ceive.drops | tive | | | | on port | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.tr\ | Cumula\ | packet | switch ID | Pollster | Drops transmitt\| | ansmit.drops | tive | | | | ed on port | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.re\ | Cumula\ | packet | switch ID | Pollster | Errors received | | ceive.errors | tive | | | | on port | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.tr\ | Cumula\ | packet | switch ID | Pollster | Errors transmit\| | ansmit.errors | tive | | | | ted on port | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.re\ | Cumula\ | packet | switch ID | Pollster | Frame alignment | | ceive.frame\_er\| tive | | | | errors receive\ | | ror | | | | | d on port | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.re\ | Cumula\ | packet | switch ID | Pollster | Overrun errors | | ceive.overrun\_\| tive | | | | received on port| | error | | | | | | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.re\ | Cumula\ | packet | switch ID | Pollster | CRC errors rece\| | ceive.crc\_error| tive | | | | ived on port | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.co\ | Cumula\ | count | switch ID | Pollster | Collisions on p\| | llision.count | tive | | | | ort | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.table | Gauge | table | switch ID | Pollster | Duration of tab\| | | | | | | le | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.table.a\ | Gauge | entry | switch ID | Pollster | Active entries | | ctive.entries | | | | | in table | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.table.l\ | Gauge | packet | switch ID | Pollster | Lookup packets | | ookup.packets | | | | | for table | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.table.m\ | Gauge | packet | switch ID | Pollster | Packets matches | | atched.packets | | | | | for table | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.flow | Gauge | flow | switch ID | Pollster | Duration of flow| +-----------------+---------+--------+-----------+----------+-----------------+ | switch.flow.du\ | Gauge | s | switch ID | Pollster | Duration of flow| | ration.seconds | | | | | in seconds | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.flow.du\ | Gauge | ns | switch ID | Pollster | Duration of flow| | ration.nanosec\ | | | | | in nanoseconds | | onds | | | | | | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.flow.pa\ | Cumula\ | packet | switch ID | Pollster | Packets received| | ckets | tive | | | | | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.flow.by\ | Cumula\ | B | switch ID | Pollster | Bytes received | | tes | tive | | | | | +-----------------+---------+--------+-----------+----------+-----------------+ | **Meters added in the Pike release** | +-----------------+---------+--------+-----------+----------+-----------------+ | port | Gauge | port | port ID | Pollster | Existence of po\| | | | | | | rt | +-----------------+---------+--------+-----------+----------+-----------------+ | port.uptime | Gauge | s | port ID | Pollster | Uptime of port | | | | | | | | +-----------------+---------+--------+-----------+----------+-----------------+ | port.receive.pa\| Cumula\ | packet | port ID | Pollster | Packets trasmit\| | ckets | tive | | | | ted on port | +-----------------+---------+--------+-----------+----------+-----------------+ | port.transmit.\ | Cumula\ | packet | port ID | Pollster | Packets transmi\| | packets | tive | | | | tted on port | +-----------------+---------+--------+-----------+----------+-----------------+ | port.receive.\ | Cumula\ | B | port ID | Pollster | Bytes received | | bytes | tive | | | | on port | +-----------------+---------+--------+-----------+----------+-----------------+ | port.transmit.\ | Cumula\ | B | port ID | Pollster | Bytes transmitt\| | bytes | tive | | | | ed on port | +-----------------+---------+--------+-----------+----------+-----------------+ | port.receive.\ | Cumula\ | packet | port ID | Pollster | Drops received | | drops | tive | | | | on port | +-----------------+---------+--------+-----------+----------+-----------------+ | port.receive.\ | Cumula\ | packet | port ID | Pollster | Errors received | | errors | tive | | | | on port | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.ports | Gauge | ports | switch ID | Pollster | Number of ports\| | | | | | | on switch | +-----------------+---------+--------+-----------+----------+-----------------+ | switch.port.upt\| Gauge | s | switch ID | Pollster | Uptime of switch| | ime | | | | | | +-----------------+---------+--------+-----------+----------+-----------------+ These meters are available for OpenFlow based switches. In order to enable these meters, each driver needs to be properly configured. Load-Balancer-as-a-Service (LBaaS v1) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for LBaaS v1: +---------------+---------+---------+-----------+-----------+-----------------+ | Name | Type | Unit | Resource | Origin | Note | +===============+=========+=========+===========+===========+=================+ | **Meters added in the Mitaka release or earlier** | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | pool | pool ID | Pollster | Existence of a | | ices.lb.pool | | | | | LB pool | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | vip | vip ID | Pollster | Existence of a | | ices.lb.vip | | | | | LB VIP | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | member | member ID | Pollster | Existence of a | | ices.lb.memb\ | | | | | LB member | | er | | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | health\ | monitor ID| Pollster | Existence of a | | ices.lb.heal\ | | _monit\ | | | LB health probe | | th_monitor | | or | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Cumula\ | connec\ | pool ID | Pollster | Total connectio\| | ices.lb.tota\ | tive | tion | | | ns on a LB | | l.connections | | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | connec\ | pool ID | Pollster | Active connecti\| | ices.lb.acti\ | | tion | | | ons on a LB | | ve.connections| | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | B | pool ID | Pollster | Number of incom\| | ices.lb.inco\ | | | | | ing Bytes | | ming.bytes | | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | B | pool ID | Pollster | Number of outgo\| | ices.lb.outg\ | | | | | ing Bytes | | oing.bytes | | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ Load-Balancer-as-a-Service (LBaaS v2) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for LBaaS v2. +---------------+---------+---------+-----------+-----------+-----------------+ | Name | Type | Unit | Resource | Origin | Note | +===============+=========+=========+===========+===========+=================+ | **Meters added in the Mitaka release or earlier** | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | pool | pool ID | Pollster | Existence of a | | ices.lb.pool | | | | | LB pool | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | listen\ | listener | Pollster | Existence of a | | ices.lb.list\ | | er | ID | | LB listener | | ener | | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | member | member ID | Pollster | Existence of a | | ices.lb.memb\ | | | | | LB member | | er | | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | health\ | monitor ID| Pollster | Existence of a | | ices.lb.heal\ | | _monit\ | | | LB health probe | | th_monitor | | or | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | loadba\ | loadbala\ | Pollster | Existence of a | | ices.lb.load\ | | lancer | ncer ID | | LB loadbalancer | | balancer | | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Cumula\ | connec\ | pool ID | Pollster | Total connectio\| | ices.lb.tota\ | tive | tion | | | ns on a LB | | l.connections | | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | connec\ | pool ID | Pollster | Active connecti\| | ices.lb.acti\ | | tion | | | ons on a LB | | ve.connections| | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | B | pool ID | Pollster | Number of incom\| | ices.lb.inco\ | | | | | ing Bytes | | ming.bytes | | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ | network.serv\ | Gauge | B | pool ID | Pollster | Number of outgo\| | ices.lb.outg\ | | | | | ing Bytes | | oing.bytes | | | | | | +---------------+---------+---------+-----------+-----------+-----------------+ .. note:: The above meters are experimental and may generate a large load against the Neutron APIs. The future enhancement will be implemented when Neutron supports the new APIs. VPN-as-a-Service (VPNaaS) ~~~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for VPNaaS: +---------------+-------+---------+------------+-----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +===============+=======+=========+============+===========+==================+ | **Meters added in the Mitaka release or earlier** | +---------------+-------+---------+------------+-----------+------------------+ | network.serv\ | Gauge | vpnser\ | vpn ID | Pollster | Existence of a | | ices.vpn | | vice | | | VPN | +---------------+-------+---------+------------+-----------+------------------+ | network.serv\ | Gauge | ipsec\_\| connection | Pollster | Existence of an | | ices.vpn.con\ | | site\_c\| ID | | IPSec connection | | nections | | onnect\ | | | | | | | ion | | | | +---------------+-------+---------+------------+-----------+------------------+ Firewall-as-a-Service (FWaaS) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for FWaaS: +---------------+-------+---------+------------+-----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +===============+=======+=========+============+===========+==================+ | **Meters added in the Mitaka release or earlier** | +---------------+-------+---------+------------+-----------+------------------+ | network.serv\ | Gauge | firewall| firewall ID| Pollster | Existence of a | | ices.firewall | | | | | firewall | +---------------+-------+---------+------------+-----------+------------------+ | network.serv\ | Gauge | firewa\ | firewall ID| Pollster | Existence of a | | ices.firewal\ | | ll_pol\ | | | firewall policy | | l.policy | | icy | | | | +---------------+-------+---------+------------+-----------+------------------+ ceilometer-10.0.0/doc/source/index.rst0000666000175100017510000000305313236733243017663 0ustar zuulzuul00000000000000.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================================== Welcome to Ceilometer's documentation! ====================================== The :term:`Ceilometer` project is a data collection service that provides the ability to normalise and transform data across all current OpenStack core components with work underway to support future OpenStack components. Ceilometer is a component of the Telemetry project. Its data can be used to provide customer billing, resource tracking, and alarming capabilities across all OpenStack core components. This documentation offers information on how Ceilometer works and how to contribute to the project. Overview ======== .. toctree:: :maxdepth: 2 install/index contributor/index admin/index configuration/index Appendix ======== .. toctree:: :maxdepth: 1 releasenotes/index glossary .. update index Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ceilometer-10.0.0/doc/source/contributor/0000775000175100017510000000000013236733440020370 5ustar zuulzuul00000000000000ceilometer-10.0.0/doc/source/contributor/2-accessmodel.png0000666000175100017510000012576613236733243023543 0ustar zuulzuul00000000000000‰PNG  IHDR3‡­T‰zTXtRaw profile type exifxÚUŽË Ă@DďT‘ří°”Y¶”R~ ëČÎ;Ŕh´z,íď×AŹFXÉGL$Ŕ…§§>+L^ł(Kďš‹s›TŇ«&Ó3ŘŻ‡~ö?†aâŹŔŔ†MË®»‰Ťž%j+÷7ňvÍľĘ ˙}ť’»ś>B ,(.AJ iTXtXML:com.adobe.xmp ΓÖ[sBIT|d IDATxÚěÝwXWĂđłŔŇ‹ôލ¨X˘ X± ±˘&Ɔ%[,XbMvMŢ(–ÄX˘±%Ř %¨`7*bŁI ťĄ|ŔîÇ‚K“]<żçńgfďĚÜÖťł· @DDDDDDD¤$TXDDDDDDD¤Lf‘RaADDDDDDDJ…a)5VUŐŢ˝{qćĚVQ6|řpôęŐ‹AD -3¨Ę˘ŁŁY DDuÜőë×Y D¤0Ř2ŞŤ0;F/^°"ęcc44XD¤PfQµfgĂňy<+‚¨IŐŐeAD §Ě0ýź‰ę>kkk,]ş”ADDDDDJĄĚ13dŐ}±±±¬"""""R:oífŇĐÂVV¬)˘:$2.ŹăŮ€”Ó[Ă ++ôsqaMŐ!§ÂÂf‘ŇâÔ¬DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RaADDDDDDDJ…a)†DDDDDDD¤Tf‘RQcŐéééرcŕłĎ>ŽŽ+…ę¶Ě "ŞC˘ŁŁqóćMÜĽyß˙=ŇÓÓY)DDDDTç0Ě "ŞŁbcchQťÄn&DDu8Đ?>»śPą**°ďŮv]»Âر)4 ęA 2SR|˙>ž"&$¤Úö7öěŔ.·r-§ęĺ¶jl»¸âyx8‚ć/űz•TźŹśÔTĽ|ôŹńčôi  @îëMDDTYl™ADTGšZ` z;};; Úöş,^»îÝ cn5M ¨jh@×Ňö=z ÇšŐčőýZ¨ëëłÂ*ŔqČ'eµEÓĐÖ;"_$‚e›6Đ17ŻtYhŔ˘Mtţú+¸­Z Ş*/<Ő8…k™ˇëâRáפ……ńJ–¨?Ö up‚Č»Wůß¶Đ 2éŰؠ߆_ ˇ§‡ô„Dü·o/b/_AFR**0tp€ăOPßÍ –mۢ×˙ľĹ©™łź›[Ąýľ/ßĐ5i˘pÇÔ¨o_¨¨©âÖÎÝh5nőí‹[;wVęş©…Đ13C}77|0öSŘşşÂqČÜ;p€ż\DDTŁŢë–›öí«TxBD¤,ş» ‡ćí°…ÉÖeń"hčé!ţÚ5ť0÷˙öGjl,ňrr›•…¤;wpŢg%.řú˘ /ĆŽŽh6b8+NNĆMš*Ü15ęß™/Rp{ç.d˝z…†ýúVş¬|‘©±±¸ł{7Â~Ůhا7/<Ő8…3ă]´,¸~ďď"Şóş» lˇAĄXwčcGGd˝|‰—Ż€(#ŁĚmź˙GGÔkĐ©±±2·1mŢÍFŚ€Ů@Ă@9©©Hşűî<„7¤¶­č –mŰÂqč4ou]]ŇÓńâÁD9‚č Km/ÔŃÁČăÇđ:* GĆ}††˝{ŁĹ¨Qе˛BzBţŰ»‘'O @óáĂ ke…Ě”D?ŽŰ»ţ¨Ň9¶= m¦L)uľç}Vâé?˙TŞÎÔuuáqě(^?}ŠŁ“&ĂeĆ Ř÷pš¦&ţěď.W=š¶húőq˙Żżź›‹§ÁÁp:ćNNĄöWQQçĎŁă‚ůĐ·µă/˝żaĆ»Ŕ0h0ĐxźŮuď8r9oŢĽuű° Ë\ר_?tZ¸@jĽMCCŘvq…­kg\Y˙3řűWę8[ډ6Ó¦J-Ó00€Uűö°jßwvďĆő­żJ­ĎËÎ.ü Łˇ ű=ŕşČ[˛Î ľ:}ő%2’“aذ!ÚLź&Y§ka§I“ý&‡×č9V´ĽÜ˘sRŐĐ@ 8ů¤ÂuéŕŢđ8 đčôi8 ‡ţý«fĎŁ ?Źż\DDTăęD7“ôĚL8 ]l)ŁŹ¦ßŢ˝ĐuqóСXłu+t]\đߣG…\\\ ëâ‚§OK˝ćĘíŰűŐWhÔ·/ ;vD>}ŕ1o·‡—*˙Uj*t]\ŕ2brss1˙˙C>}`޵+ŕMZšd=śÇ@OOŘöě SWWt;űŠľ!KzůŢ?ţ—áĂaÖĄ ;v„〼t)î=~\-u÷ďŐ«1oôéŁNťĐjđ`|óëŻČ*úŔTҙ˗1|î\Ř÷î ĂŽa߫Ϛ…ŁgĎĘÜľ2ç}ůÖ-Śóň‚ă€0ęÔ Önnč0r$|7oFŇË—2÷#ľ†e)ľ^|LÎC‡öž8ö0uuE›ˇC±ëČÉë~÷÷‡Ë0îÜÍ ·żý†Ł´Wô~‘çž!މ@]N¨8Ó- ď‡Ë—«TŽžµ:Ě› ¸ł{7Ťů{úô…˙¨Ń¸ľőWäçĺÁeĆ čŰÚV¸lĂFŤŕüůd  wöü‰ĂăĆaOß~84z ®oýůůh9z4L›7—zťxL5m-´ž4ˇß˙€?űőĂß |0n,šŹ‰ «VaO_éu űö©Ň9ŢÝó§T«“]n=°Ë­‡¤UFeęLrNZZhňńÇůn-ţě×_îVjš¨ďć†×OźâĹ€”‡xůř1ěşwP[»J÷­«+ŕeä#ţrĂ yčhiá·•+!TSĂ’őëń$&Fj}dT–ýň „jjضjԅ·–ůÇŃŁč=y2ü‘đâDąąHJIÁńsçŕ>m6ďŰ'µ˝–† 3+ ?îÚ…Íű÷#)%陙͢őYŮŮ8sů2>š1gŻ\ÁË7o™ťŤë÷îaŇ’%đÄÄÇĂu̬˙ăÜňYYĺć"&!{Oś@·±c~÷n•ęmĂž=čé‰çÎ!)%9"ÇÄ`Ő¦Mč=yr©@ăÇť;1hĆ ś<É/_B”›‹äWŻ‚Q `ů† ĄöQŃó>Ś>źŽż“€‘ŻÓŇp72k¶nEçŃŁ›PĄóÖ,v­ţ ŔäĄKńߣGČĚÎFÄłgîă ü´kfůúâţăÇČÎÉAÔóçXé燭Vů~‘çž!b A5MËŘđ&*ŞJĺ4ůx0TŐŐqcŰ︾őWÉiĎźăÎîݸµcTÔTŃxŕŔŠ—ýŃ TUyâ®oŮ‚7QŃČËÎFj\îěŢŤ‡µ8(ICOĎÎśĹĂŁG‘›•Ťô„D„űůĚ>ř‡ăIP0ň˛Ą×ŘŐŻŃs¬TyEaşf˝zľp‘ÇŹ#7+ ąrţżQßÍ BťÂéS‹y|ú4Ô45Qż‡[Ĺ?HŞ©BĎĘ -<<ж¨…ËĂăÇřËEDD 3äŐ¦ys,ž6 é™™¶bňóóůůůľb2˛˛°tút87k†ůăÇKŤÉ‘†´°0 ë[8Öă|±f `Á„ ¸éďŹä‹qçđa,óô„šŞ*ľúáD<}*)C¨VŘc'=3ż<ź-BÂůó?wNj}jZf­^Ťńź|‚űÇŽáUh(®ěÝ —˘oǶěßřć×_——-ĽmâĎťCüąsüőW87k†Ěěl™áĽî=zďź~‚šŞ*~řę+< Bâ… 8µe šÚŰăú˝{řßożI¶że6@ `î¸q?xI.ŕöˇCXćé |ż};®Üľ-µźŠž÷J??äĺĺaŢgźáî‘#H AěŮłŘ˙ðł´Äó¤$ř}Ь,ń1ĄedŔÇĎ?yy!ńÂÜ;v }‹ľUúßożaÝΝضj’J¬űóřq镸_äąghPMSÓÔ2łŞTŽeçÂ÷Ă€™ëźÖćN­+\¶é= —]v`aw q+YJWÚóç˙˙ú˘0˝ä:uí=ÇŞ–÷´ÄqËŁQżţ(ČĎÇ“Ŕ ő„üÜ<4ę×ď­eŚ={FęĎ  Ţłm¦OP[‘ÇOŕńéţrQÍŽQÔ“w–‘âˇÄÜqă‚óááŘřçź9f ~Ůł!7o˘{»vřběXąĘܲ?˛sr°ĚÓ 'N”,··¶Ć‰‘_P€•~~řÝßkć6UQ)Ě…’_ľÄ4LřDş«@ ĽxýýşvĹşŻľ’¬kîŕ€ľú ÝĆŤĂíɲžŽ6ŻX¦öö’m;99aă’%č4z4®ŢąSéúÝú×_ČËËĂÂÉ“1eř˙ŹJߥMüîë‹^“&!äćMÉňm˙ŤĽĽ<Ś<+gĎ–,o`c…'">9›÷ďÇ®#GĐľčgeÎűIŃ r &L€ľ®náJˇîÝşÁÔČCfĎF|rr•î-ń1˝JMĹ”áĂ1©¨»‰¶…|çĚÁé‹qéĆ xO™‚Eěl‹­{đäI•ďy4ގńľĄ§AĂŔęş:Č~ý¦ŇĺčXX†Ř_ţ˙íVV˙<`nxýLvë‘×E­JtĚĚĘ,##1Qęßů"Ń˙ŻK*c]Ń˙5uŽU-ďu‰V¨oŁgmóÖ­đ<ü2Jü?š•’‚¸«W`Ó©ômmń&:Zţ‚ ťšŠ”‡8zQ˙2'"˘÷<̨ l]±ťFŹĆŠŤaoc??`늒‡Ç·ů·( =`€ĚőýúaĄź.\»&sý·|ł1}äČRËš5lXřá$- pbÓ¦2_/Ţ6­śQçßćBŃ8źôęUj]«¦M‘xá‚Ô˛KE‚}6x°ĚňFą»cóţý˝u«JçÝÄŢw>ĬիńÍÜą°45•lŰ®eKDľ:x¸K÷3n`m-ůyXź>2ץ–¨÷ŞŢ/ňÜ3µ!((ʆhPÝž 6r@|9ďQo#ÔŇ’k;őJŚÉ ¦© evĄ/o's›¬ěJ­«És¬jyň ŘZ\Ł~ý–.m%3«ČÜ®?\߲µĚőňÎ>CDDôކ•ťšŐĆÂ?y{cÜ×_cäüů€ßV®„U9ßŘ”hZĆéؓ2ľiô–ÎěJOY¦Uô!¬řŕ’ÉŻ^aËţýř÷ęUÄ$$HĆ©ČÍ«ú(áϊαŤŤ|uRÔě¶x+‘âš-Ź‰ŹŻŇyoZ¶ <=ńW@ü‚ŕҢş·k·víŕęě 5µę˝em‹ľÓPW—ş—d­+9hUďyî™ÚpýúuľC*9}cT®¤˙î¨IcŘuëZĄ0C”™ u]]ě89EátuefA]WjZZÉč%ÔÖ’CMŞîs¬É:+E (5 iYöéżţ†‚˘îşDDD 3Ţ!÷®]ajd„¤”ˇOçÎ Räü@”ZFË#r_§+Ç·6OccŃ{ňdăËhRÚĺ|“WŢşęĽ_äągj“Žž!·ěŔwK%clf ˝zĆrmË@ăýő$8MFŁ~ýpďŔ¤ĆĆ•»˝YË–h?wţŰ·_j¬‡ÔŘX7m };[$˙W˝Óž§'ÄC]·ęŮŰ#IĆŔ×őí ·+'LŻŐ}Ž5Yg%Yµk33Ä„„⌗W™Űą­ZŰ.®°jß±ˇ—ů BDD 3޵7")%†úúHJIÁŠŤńÍĽyrż^WK ŻÓŇsć ęééUx˙‚ýl+cÉúőxž”;KK,ź1[·†‘Ô…B¨©ŞBż}űŞ= ji!5=)Ż_ĂÔČčíŰkjâMz:Ň33%cYČz ×•łŮlyĚML°röl¬ś=‘QQ p0.\»†™«V!''SЦz•GN±ľŃ5ˇŞ÷KuÝ35v~úFhëęŞŰhĽź’nßAüµk°hÓn+W!hádľH‘ą­QăĆčş|´ML`Ü´©T~ ĆM›˘ůś[ľĽôĂtűöh7k&žťý7Š .-Ź„›·`ب¸Ë 3Šf1I¸u»F몪ç(PUEA±–•5YgeŐ‘x°Ô˛< €mW4ęןa)<•şvB!7n`ĂźÂŇÔçwí‚™±16îÝ+óA ‹šü?|ö¬ÖÎC<˙Ď?cDż~°ł´„®¶6Ô…BÄTqjR¨_4 Ř3l”Ĺ®hű{ŹË\żhą]%w+÷ť¦zxŕÔ–-Xďí řeĎž2Ă€’ÓÉ(5‹HuS„ű…¨ş Îrňţąôí·Č|ńő6Ŕ mŰĐbÔHčŰŮAUCúú0iÖ ífÍDßź×CŰÄ/""p}ëé÷Ů#G›•…únÝŃeń"čŰŘ@EM ZĆFhňńGč¶l)ômm!¬D8qř0ňsóŕŕîçĎ'CĎÚ ŞĐ·±Ó¤IppwG~n"Ž©ŃzŞě9ćµX¬ďÖ*jjĐ44¬ń:+NC_6ť;C”žŽč‹ËÝ6ćR˛SSaëÚęúúüĺ ""†ďJFV¦űř ???~ý5ě­­±vÁ©éYeÉÍÍ•úwŹ˘Vëwí’ą}ŕĄKp2>7ÖŘądç䬊 €)¶fëVÉĂ{e[¸:N ·çXéąŕoGDŔÄŐ='L(µýöC‡d–·«čCdg'§*ť÷ÄĹ‹áĐŻźĚ™ZÄrĆÉčzŁWÔ…ĺćĄÖ­ŰąłFď;E¸_hPeĄ'$âÔĚYxńŕ4 ĐfęT|ĽsFź>…GŁżßF8 5MMD_¸€€Ů_”43->—ľůůąąhĐ«>ţcĆbŘ_ˇĂÜąęčŕEDnn«x ×Ďž!ÜŻđýłĺ1Ľ{7Fź>…Ź˙؅ƶ( ߸Żk8¸®ě9ľ¸_ř˙R×%K0&(Ăý˙®ń:+®Aď^PUWÇł˙E^vůťć‹D:{*B!öúżDDÄ0ă]YúóĎŚŠÂĐ>}0 {wŔ޽ѿkW<ŠŽĆŇź–Ú^<&‚p0rD"$ľx4t(´55áډ‹#2* 9"â““±őŕAŚóňBdT”dŽšĐ¸~}Ŕ˛ ü겲łvçF/\Ô´4É̇˙ůG|T(42***Řuô(ľŰ¶ I/_"#+ çĂĂ1aŃ"degŁC«V’í'5UUě:rË7lŔădfe!2* >7bÇáĂPSUĹä˘iN+«  ńÉɰhN]¸€W©©ČÍÍĹł¸8ÉőkáŕPęuâe^ëÖáÎÇČÎÉAlBć~ű-®ÜşĂü†Iî"TĄőçĎqbę4ś[ľĎΞEZ|ŹNťBzBňE"äffâĹ¸ć· §fĚDNZĺîŁűýŤŔąó‚ěׯ‘ꛇ¬—/}áN1÷˙ö'őT™s ýá{$Ţş…ܬ,䤥KŤŹQ“u&Ö¨h¶¬'Arm˙čta÷ˇFýűóš  äÔ E¦Nť čÓ¦ úą¸ĽłŇ­ÄľŇÂÂp><îÓ¦ÁŘŔW€iQ3N p† —#ž™‰›6ˇk۶€ţS§â|ŃĄĹË€ż1iÉJ´ÚsrtÄńM›`Plüń±—5KEÖď;y“–,)µŤŤą9‚·mĂŇ_~Áľ“'K÷ŰöQÜŹ;wbńúő2×96h€€_•Ňoď^,\»VöŤ$ŕ» 0ÍĂŁJçý,.năÇ#)EvźmM řŻ_/ą†bOźĆřE‹JmŻ.âŻÄ4Ä&$ őęU‚rŹ©2ë*sżTôzU§Saa(šą`óćÍ2·ůî»ď sëF8ę ľ[ľ‡ţ=ń"˙»°¶¶.w ńý˘ýć š<ŚdĺŐ!ŤˇŻ,\¸BD ˇN´ĚHĎĚÄtŕŰůóĄ‚  pŠÍĺ3f   Ó}|^4XĺŹ^^puv†¶¦&ôutŕҢ…ä5Cz÷ĆĹ?ţŔakauˇ:ZZpnÖ ľ_|ŕmŰJ=V'Źţý±váB8ŘŮA¨¦ssŚýč#ýö¬ÍÍá=e śˇ.VzZĎ9ăĆÁýz|ر# őő!TSCkkĚ7˙üţ{©6¦Ź‰ă~~čץ LęŐšŞ*LŤŚ0ČÍ '7o–dTT}++\Ú˝ó>ű Ž @OGęB!ě,-1fŕ@śßąłTĂúöĹ–+đA“&ĐÔĐ@===tsqÁ‘ ĐŁCčµÂÉ|KŰĘŞíű…¨&°…)*…k™AD5Ź-3¨"äiˇÁ–DDu[f‘"RaQyŘB Ă ""z+DDDD¤Hf‘\h‘˘`ADDr+hlßľť•BDDDDďĂ ""Ş´ŚŚ V˝s 3Hn%g6ńôôdĄŃ;Ç0ä"Ď­DDDDDďĂ ""z+DDDD¤HfQąd‘˘aADDebADDDDŠaIŮ´ot]\XDÄ Ă ’rýŢ=V1Č """"…Ć0¤0Ě "DDDD¤čfԲ˷naś— €Q§N°vsC‡‘#á»y3’^ľ$żzăÎťˇëâ‚›÷ď—YÖ§OˇëâăÎťńâŐ+ąĘű~űv躸ŕżGŹş..ĐuqÁÁÓ§%Ű\ą}cżú Ťúö…aÇŽhЧ<ćÍĂůđp™Çó&- ş..p:°÷Ä ´÷đ€©«+Ú Š]GŽH¶ýÝß.#FŔ¸sg44ßţö x1Č """"*EŤUP{ă3ooäĺĺI–ĺDx‰»‘‘Ř~čÎnßkss|Üł'öź:…GŽŕGG™ĺí9v đIŻ^8.wŮňřăčQĚXµJŞĽ¤”?w'ÎźÇÚ 0ŐĂCę5š€Ě¬,ü€ÉK—JÖE<{†é>>°45ĹÝČH,úé'Éş¨çϱŇφúú2|8o"DDDDD 3ĹJ??äĺĺaŢgźaŇС°45Efv6.^»†ß}‡¨çĎáăç‡ÍË—câ!Řęöź<‰Ő_|! Äňóó±÷äIŔ¤!C0ł(x§l?~<ćŹ/ü3-,LRöă|±f `Á„ űŃG°63C|r2ś>ŤŐ[¶ŕ«~@ŹĐÄŢ^ň:ˇZáí•–‘??üäĺ…QŕĹ«Włf N_Ľ˙ýö"ž=öU«0ČÍ ÉĹÖýyü8Ă “ť‰¸¨‡¬%٧o˝zĆ 2aUŻ'±±’€@_W .½[7aČěŮONtiÓŽ ŕţ“'8rć Fôë'UÖ™+W›€čääTˇ˛ßfËţýČÎÉÁ2OO,ś8Q˛ÜŢÚ 'ND~AVúůáw¬™;W˛^ ^Ą¦bĘđáTÔÝDŰÂľsćŕôĹ‹¸t㼧L‘śŹm±už<áM˘^&ÇáäţźYJhÄäer 2HŮpĚŚZ$nĹ0kőj†ôę…=ÇŹcב#Xęé ř;(™ŮŮ8d¤KIEË.OZf¦\ەגB[SłRë¨vŮŮٱę(DDDD¤ĚfÔ2s¬ś=+gĎFdT‚CBŕŚ ×®aćŞUČÉÉÁ”#$ŰO2{ŽÇîăDZdút) µď IDATě>z@áŔźU)»,şZZxť–†3gPOOŹŤHÉ1Č """"eÇ13ť¦zxŕÔ–-Xďí řeĎ©m:¶nŤMHŔ…k×p˙Ʉܼ —-ĐşŚ)[ĺ-», ‹ĆNxřě/‘’cADDDDuĂŚZ4qńb8ô뇫wî”Z'3®ÄŕťŔ˙·Ŕ8xú4v :yذj)[,77WňsŹöíëwí’ąmŕĄKp2>7ň˘)0DDDDTW0̨EONĆ„E‹pęÂĽJMEnn.žĹĹaéĎ…Sa¶pp(ő:wwhkjâPp0ţó7^&Ç`ADDDDu [f(ˇÜÜ\¬ÚĽđůđá¬"’‰AŐUl™ˇDrD"†DDDD¤°Ţu!Ć@H±1Ě """"…T[A† ""ĹĹ0*E @ Č˝^üďšřSVůŞŞŞ077ÇđáĂqýúő ź[É?Bˇ666řôÓOqďŢ=ą^#ëXKşző*FŹŤ¦M›BOO¨_ż>>ýôSÜąs§RűyŰő!"bÁ@aŃ[”úSŢşŠl[Ö~^ż~Ťť;wâĘ•+čر#.\¸PĄcNLLÄúőëńĎ?˙ŔĹĹWŻ^•ë<Ë:V8vě:w/bÝşuHLLDll,ľůćś˙üsĽzőŞF÷Ą«« ???ŔáÇYůDT§Ô• CŚch˝{ 3ädii‰ďľű±±±={v•ËűňË/áęꊨ¨¨r·{óć +źęŚşd1Đ "z·ŘÍ„¨&Mš„Ý»wc×®]:t(>ţřăJ—•ššŠK—.aÆ řöŰoK­ß»w/€ę›……¨¶2 MçţČÉÎB\ÔCą^ݧo˝zĆďôĺ=6hܲ^$Ćŕer»śŐ0†DD °uëV´jŐ S§NE—.]`l\ąÖľľľ¸|ů2Ö®]‹üü|Ěś9VVVHLLÄÁ±hŃ"ššbÝşu¬x"RzQQQRA\>ë_ˇ2„ęš7űďě©đ1‹íŰ·cĆŚĽŞ»™U–/_Ž„„xzzJ­ĺţ)ÎČČ˙ţű/V®\‰sçΡU«VĐÔÔDłfͰ}űvĚž=wîÜAłfÍXéD¤ô´µµˇ©©YĄ2D9Yďôłł3«ĺĽ‰¨ú±eUĘŰf ‘gV‘ŠĚ… 3H)%''ăĹ‹’Büo@±Ă‰ŠĘĘĘ’:yέqăĆ[[[hkkĂĆĆFňw]<f‘ÂČČČ@tt4222[ĄrMŐŐ%?««ŞÂP(”ZoVĆtŇę**0ŇШ–sKÉÎFN~~©ĺ˘ü|ĽĚÉ‘Z–ő˙ÓS‹ đJ$*·lqŕQVđamm ŘÚÚJZw(SĐÁ0Â-imŤ¬bńoŁ­Ş UU©pÂP]B•Âą/,´´ę|Ë Elĺ ˛óň$ÁGzn.ŇssüřńJ$‚¨  Ôëbcc‹›7oJ-wiiÚ´©$ŕPÄi¦ëd‘sss››#>>žď XgĽFDDDDDď·¨¨(DDD ::HII‘ëu¦ęę’°BGM :jj0TW‡†Şę{YŹŞŞr…4âV âŔăĄH„ôÜÜR-<Ä]ZJ¶čhܸ1lmmakk‹&MšHĆě¨-5fÄĆĆÂĆĆFć:###tďŢË–-CëÖ­«ĽŻ””ś;wÜąsŕääÄw9•Ug%붦÷Gdgg--­R2‘r{đŕ"""đŕÁ·Žů PO(„ˇş:t…BÔ ®U…˛)ŻHšH„´˘`ăeNŇD"$•čćR2ŕ022’Mš4yç­7j,̸|ů2`Ŕ€8vědąH$Âőë×1iŇ$¸¸¸ŕرcčŰ·oĄ÷“——‡îÝ»ŁWŻ^’îž={˘@F3*›¬:“U·5ą?˘7n ::óćÍ{ë¶ Ô˛’÷ÔŐ«W±nÝ:„‡‡#..999°°°@×®]ńő×_ŁeË–2ËÎÍÍĹŽ;°wď^ÜĽyŻ^˝B˝zőŕěěŚŃŁGăÓO?…j‰ä_|<§Nť’ůž&^/ď}źźźŹŤ7b۶m¸˙>TUUáŕŕ€ńăÇcĆŚPSc/A"""Rlâ–Ąş2” .Ě45a(ÂLSşjjĐ-1~Ő,]ˇş2Ł”ělĽĚÉAzn.˛˛¤Ž””¤¤¤H®­‘‘š4i‚¦M›˘uëÖ5>öFŤ‡:tľQ…B´oß›7o†««+ĽĽĽŞflܸwîÜ——ďŔjĆşĄwíčŃŁ€ľuŰ⡀¬`ăرcřä“O`ee???ôčŃééé ÄĚ™3ń÷ßăěŮłhßľ˝Ôë1hĐ Üľ}‹/ĆÖ­[aee…¸¸8ěÜąS§NŦM›pôčQ™M뼽˝Ń§O™ÇT‹/Ćš5k0nÜ8ś:u ŞŞŞXľ|9ćĚ™»wďbË–-ĽaH!ŚܸqŁĚn#ő„BihŔP]ćšš .‘†F©)ŮŮH, 9˛˛‘—' 7BCC  p€ŃÎť;ĂÉÉ©Fş¤¨ÔtQňAALÜ˝ŕîÝ»ĄÖť>ź|ň ôőőaii‰5kÖâââ0lŘ0ŔĆĆß~űm©2ž>}ŠiÓ¦ˇAPWW‡ˇˇ!ÜÝÝqéŇ%™ű“÷ś‹×Yyu+ďţ  «« +++¤§§ăóĎ?‡‘‘śťťúQí‡őęŐC×®]«\–——rss±uëV¸»»CKK &&&5jÖŻ_ŹĚĚL,]şTę5ůůů>|8®^˝ŠĂ‡ĂŰŰöööPWW‡˝˝=–.]Š"44#FŚ(ŐĘB(âÖ­[8xđ`•ŹçÎť€ďż˙fff066ĆęŐ«»wďćÍBDDD #99űöí——|}}ńĎ?˙HÚŞŞh˘«‹®&&jcw++¸Ł‘ž % 8őőŃÉÄmlđ‘•:ÁZK Âb_čĹĆĆâŔX´h|||„ôôtĹ3ňňň@Pf‘XXFFRËwďŢŤ ==WŻ^ERRşté‚/żü‹-’lwíÚ5ÉT<ĆĆĆ(((@LL ž?ŽçĎźĂČČ 6DGGăÍ›7°¶¶ĆňĺËńÓO?áúőëPUUĹâĹ‹qáÂIąÄŔagg‡ű÷ďăćÍ›‹‹§§'ÂÂÂ`bb‡ ׉řlll°|ůrüř㏸˙>ŚŤŤáííŤĂ‡Ă××ß˙=îÝ»|ýő×8{ö¬¤Ś+W® uëÖ Ĺźţ‰WŻ^áĚ™3ŽŽ†››.^ĽXjňśsÉ:+«n+˛˙¨¨(¤§§ĂÁÁăĆŤCź>}‡óçĎ+ě5˘Ę‹ŤŤ…@ ŔĉK­›5k–T8VÜçź@€ŘŘXÄÇÇ#,, }űö­–.âţ|]şt)µnČ!Ř´i–,Y"µÜßßçÎť‡‡z÷î-łÜbčС8sćŚT:PQQÁ¤I“°dÉä%Ô•őňĺKI™bâß1[[[ŢtDDDTë.]ş„µk×bѢEĄ k--t02ÂGVVlcccŘę輷tÖeşB!é须™†Ű١ź…ščęB»ŘµóćÍĂďż˙Žű÷ď+fq÷î]¤§§ŁqăĆ044”ąÍŢ˝{Ž©Qňa˘^˝zظq# §§‡eË–víÚ%µíŐ«Wm۶•, /µL܇'66kÖ¬ŤŤ 6lAř˙V$™™™9s&ĚḚ̌}űvÔŻ_ćććŘĽy3ţüóO M›6•Ş“âÇŕëë‹úőëĂĘĘ ü1ŔÓÓ+W®”,›¸ĹCff&†Ž‚‚śšššĐŐŐUŘkD•gmm ggg–ZCCĂ2×9;;ĂÚÚÇŽCAA\]LäaooR­Ä´´´0uęT¸şşJ-·x=ztąeŹ3Fj{±śś,Y˛Ďž=Ăďż˙^Ąăo×®ťä÷S$aÆ 6l´µµ±aĂŢtDDDT+ŇÓÓqôčQ|ńĹرc‡Ô€âc¨Ť ş›™±ĺĹ{ĘHC.ĆĆlc~h©ŻŹzĹîĐĐP¬[·^^^2{ÔjQV“‚‚DEEá»ďľĂŠ+ŕää$i6-vđŕA¤¤¤ U«V’efff€ÔÔT©mĂÂÂ...Ą–É 8>űě3©pEܧ]<źżż?0bÄhkkK¶300@ăĆŤK•[âc;v,ŚŤŤĄŢ €ÂoŠ‹·RÉĚĚ,Ľ@EßĘîÚµ QQQ7n,--ĄĘźż¬€âmç\Vť•¬ŰĘîżm۶ř裏d^7E»FT5DLLŚTĘ…`Ú´i¸˙>˘ŁŁ%ëçh«Ş˘Ąľ>>˛˛’l}AĹŤV††p·˛B? 4ĐŃ‘tEIIIÁŽ;*jÔHqĺĘŔü@ ůŁ˘˘‚úőëăĚ™3X»v-._ľ, *Ę’źźŹ§OꀤKBya†řˇXVŔŃ˝{w©×?yňĐ´iS…ł@·nÝJ‡şşz•”ĹÇĐŁG©ĺâ$łärńâc;tč€Ň-Y@Łh@–ěěě źóŰęLĽ¬˘ű—9~üř2E»FTő0€T ŚŔŔ@…BĚ™3jjjĄÖ‰ď©¬¬,ˇS§NRa_UŚ7GŽĄĄ%¦L™SSS¸¸¸ŔŰŰ7nÜůq÷·· P$~ßJHHąţ믿ƛ7o°qăĆJ{TT”dĚŚ[·naĐ Axúô)öîÝ dggKZ?Ő´   R!†©ş:şŤ™ĐĘĐ-0H®`Ł“‰ >˛¶F##I7q¨áăăSˇî'5Ú2ăęŐ«ČĎĎG^^rssńĹ_(ě'?cĆ ÉĂgqÇŽĂŔaeeˇPˇPGGGj­QüXü-iYĘâe%gVą~ý:HĄĽvíČś®ńůóçĄĘ­ń1těŘQć1”ulâ.âckÝşu©˛Ĺă[é¬Č9ż-\×me÷/ëdE˝FT5íÚµąąy©Ŕ˘cÇŽ033CűöíĄÖH–###ŁÚş 4×®]ĂDZqăF4lŘëׯ‡łł3†^޵—¸%ÔۦOÍĎĎ€RÓłŠcţüůXłf ŢĽySˇcމ‰««+.\¸€+V@UUŹ?–'ĂßߣFŤâMGDDD5*99k׮ŤBŚžffčmi Űžz“ę& UU4ŇÓĂ`©P#66ëÖ­Ăľ}űj'ĚHKKĂÝ»wˇ©©‰Ö­[KZd¨ŞŞbŢĽyPSSĂŞU«dľvóćÍ4hâăăqđŕAĽ~ýyyyřꫯJ=H?}úÉÉɰ°°‹ŹŹ/·śśś ,ŃÔČÎÎ#FŚŔµk×pâĉRŻ]»v-€Â) ;wî,!$$¤T!kÉňĆ~(Ůő@üúâEŠg(Ůb䯿ţ’YFEꤼcgąé®bÇŹP8îFEĎąĽń+*»YűQôkDŐcŕŔHMMEhh(®_żŽäädôęŐKf$''ăúőë AjjޤŰұcÇĐ A´hѢJaŠ}$aĹ’%KĐ˝{wL›6  Äůóçńé§źň†#""˘qńâE¬[·NŇى®.ú3Ä ÔĘĐý--aZôś oooÉ—ęď$Ěw1)Ů]@lÁ‚€•+W–Z'î&Đ Aɲ7nH¦ĺüŕ$ËĹýĹe˛ş/Čó ,žÎóŃŁG’e’ĄŞV%Ě»]|ŞV pJÓ}űöˇeË–TŤo@şşčŢ˝;ţůç {÷î’ß_ˇPnÝş!((gĎžE·nÝ §§'w###$''ăŔRËĹ-·zöě)Y–ššŠK—.•9…©¸ËF×®]Ą–÷ďß˝{÷ĆńăÇ%­}J:}ú4ţúë/|ňÉ'pss+÷544°lŮ2lÚ´Iî:Ź#>/044ÄńăÇńřńc<ţĽT÷"""˘ęžž.é"+Đłhv˘wÉĹŘXhÄĆĆb˙ţýď6Ě(9-kqââ%[glŢĽýű÷DzeËФI„‡‡cĘ”)X˝z5š5k†ž={ââĹ‹ żmŢĽ9vîÜ)™ ¤ĽoýK>XËZîéé ___ś? 6Ä‚ ŕĺĺ…gĎžÉ<§[·n•Đł2a†<ÇfooŹ‹/˘qăĆřđĂa``€#F k×®¸yó&š5kV©reŐ™¬ş­Žýżëk$ďőˇę5pŕ@„‡‡#44´Ô°˝zőÂĹ‹qőęUI“ŁGŹBWW÷­ÁŔĘ•+aff†U«Vaßľ}HKKCHHfĎž CCC©÷___8;;cíÚµX¸p!ž={‘H„ŘŘXüôÓOX°`LMM±nÝ:©}ěÝ»®®®5j–-[†'Ož ''Oź>…ŻŻ/Ś?ü;vě«>ĆŹ;;;äääȵýŞU« ¦¦†É“'#((iii¸˙>ćĚ™‘HKKKLź>ńńń\”¨<ÁÁÁ’®%m Ů­„j5а.ş˙BCCe¶¸ô$Qä IDATĽmŘţ÷ÜłgĎ`ooŹ6mÚH®Ĺüüü››‹Yłf±˘đńúÔŽÇŹŁQŁF Ą’ÝĂÄÝ—""" ŁŁ <˙ý÷[ËŽŤŤĹęŐ«qęÔ)ÄÄÄ ^˝zčŐ«|||$űKMMĹĎ?˙ŚĂ‡ăţýűHKKŽŽ5j„ţýűcÎś9eN ťźźŹ;v`Ďž=¸qă^ż~ CCC´iÓăĆŤĂČ‘#KŤ!ţ·¬·Ô}űöaäČ‘e®/éňĺËđőőEHH^˝zSSS¸ąąaÁ‚’0# ęęęxőęoşwhęÔ©§NýĐÖŐýťě3öYNř@áŔ˛âľŢ'GŽ‘ŚŃ5iÁúw¶ßđ‹'p#¤pZřÍ›7óBPťçĺĺ…””Ş«Łw9ť˝ Ůyyř+&ĐŁGÉçi15VQˇ & gĎžEăĆŤ%Ë·lŮĄŰËĚĚÄ/żü‚€€Vž^#^źÚÓ°aĂ2Ř[µjUjťxšSyX[[—Ůu¤$ńśŢŢŢ>L0&Lű5ĺ…đđđ»¬:ŕČ‘#e®?tčo4"""ŞVéééHIINąJ ACU¦ęęHĘÉAtttéĎ쬢B‹‹ĂŚ3Ť””lٲ?üđ:wî OOO©í===Ń«W/©©DIq®Ż‘üŠ?,Ö“sŕr˘šf®© ŚŚ,µŽ-3ŠřúúÂÄÄ;wîDóćÍ‘››‹† ÂËË _~ů%444¤¶˙ý÷ßYi |Ťx}*ç•HÄń2H!$Ťá" ĂŚ"***?>ćĎźĎĘŕ5"""""zo=NM…Łľ>+‚jUšH„¤rŃg7""""""’x•›‹űś5ŤjYč‹ĺ®gADDDDDDR®˝|‰G©©¬Ş!IIHĚÎ.wv3!"""""˘R.ÍnŇHOŹ•AďDv^®Ą¤ŕIFĆ[·U–qqq066VČ MLL„@ €……ﮨÖ/‘â°ŠŽJn€Â@ă\b"˛óňX1TŁR˛ł/ 2432`’Pćö f„‡‡Ú¶m«•zç΀“““rŢ))8tčPµ˝ľŞőQÝĺý{÷—eő˙qüuł÷Ţ (‚ŠÍ™ĺJMëgšmWe¦f¦e*V–_WnMMËUŽ4m¸3sĄ™g‚‘˝do~ÜÜ·Ür ŕçůx|ß<\ç\ç:׍r˝9ç\B!ŁŚtĽ®^EżhšDf&Ű## IK“Á\v~>gŮCr^IÉx_E·ŚMÂŚrčŇĄ ………ěÝ»·Ć}0ňóóéÜą3‡~`ő«2ş=ńä4h …‚%K–”z̲eËP( 4HŁ\ˇP P(řâ‹/î{žĹ‹«Źż·ľB!Dme’™IĂ˙®`‘” @na!˙ŢľÍďjb\LNf{d$ÁĹögq ŹŔ3$˝‚‚2ëW«0Ăßß_îč¶lŮ2.]şDëÖ­Ký‡Ýžx˛?Ű 4ŕ“O>áÂ… %ľÄÇŚ——Ë–-ÓÚĆĘ•+ÉÍÍ-ő………,]şT[!„O$˝‚>žŽ;2aÂ4ŽŤŠŠ"::[[[ęÖ­[áúaaa¤§§ăĺĺĹ AčŃŁQQQlÚ´‰””ÜÜÜ:u* .$(([[[&OžĚďż˙ÎôéÓ™7oW®\ÁŇŇ’‰'rčĐ!uŰŃŃŃDGGccc§§§ň&„‡“’’‚««+S§NeѢE˘««Ë”)S8zô¨şţÖ­[éÓ§îîîqţüy˘˘˘9r$§OźĆÎÎ//Ż ŤďäÉ“yçťwđőő%88[·nŃ A&Nś¨ž6öěY"##°µµĄ°°rŹmiőµŤGUúŁ­˝ăÇŹăďďONNGŽ!22’6mÚ0iŇ$>űěł Ť¨Ý|}}Őßż~řˇşüłĎ>ăěٳ̜9łÔ_řůůńÍ7ß”Úţ7ß|CóćÍ100ÁB!ÄĎ61±D¨‘[XČÍŚ öĆİ;2’3‰‰lŤăx| žb~áÂĚË/żŚ‘‘‘zĆCdd$Ó§OÇĂĂ^|ńEFŽÉ´iÓÔĺ}űöŕźţ)łçĎźW·;sćLÜÜÜđôôT×?qâ™™™Ś=Ö®]‹‡‡ŽŽŽ¬X±‚M›6QXXźź_…ÇW5í}úôéŘÚÚbooĎĽy󰲲R÷ŕÔ©SU[mőµŤGUúso{ąąąĽőÖ[ŘÚÚ˛aĂ4h€ŤŤ łfÍÂÜÜśăÇŹWřĽ˘v5jýúőăűďżgëÖ­9r„9sćĐłgOĆŤWj˝ÜÜ\ Ŕ±cÇ8wî\‰ŻßĽy“Ý»w3pŕ@233e …B!î 5<±N¸­.OÎË#85•˝11üÁ™ÄDbĺç¨'Jv~~‰ăfF†z†N^>v±±ř\ĽTéCEďa\ŔÖ­[K”988ZlcŹâłĹ{Z‘ú÷>żđ %ĘUÇ*ééĘ{饗°±±Q—«Xttîf<§Oź.ő|đŕÁX[[«ËUަ¤˙úëŻÄĆĆňÁ`bb˘>ÎŇŇoooÎť;W©MOÍĚĚHIIářńăôěŮ€úőë“””¤qśŞď•[mőµŤGUúso{7näĆŤ`dd¤>ÎĹĹ…”””JŤ¨ýV­ZĹŮłgy÷Ýw177ÇŢŢžuëÖ•ąIg^^oĽń,]ş”ďľűNăëŞ}6Ţzë-¦L™",„Bq‹´t,ŇŇq 'ŃÎŽD[˛Šž{Ňóó NM%85}…G## q31ÁL__݉ÍĚ$.;›đôtőŰHJ|V’’±LNĆ61ńť÷‘lZPP@hh(€z)¶ ˘2ő‹·1dČ­ĎĎ>ű¬Fůµk×´–«fr4lذ̰EŐnçÎť5ę߼ySŁľęíO?ýt‰>«¦­W&Ě?~<}űöĺ­·ŢâČ‘#ZŹÓTdlµŐ×6UéĎ˝í©^ŮÚ©S§6˘öł¶¶fÍš5$''ÎüůóŐ!]YÜÝÝyöŮgٸqŁF–™™ÉęŐ«éŇĄ îîî2ŔB!„eĐ+(Ŕ!.ŽFW‚đąx —đŚŠďgVXHDf&g““ŮĹĎaa‰‹ăbr˛ĚܨaT3/.&'ł?&†Ť·nńW\ďÜŃ2tňň±N¸MÝë7hxĎdŔCš™±sçNľýö[Ξ=K||<ęß6oŢ\kQü·"ő‹·ŃµkW­ĺm۶Ő( ŕ©§žŇZ^|釶ţ©ĘJ«ß˛eK@ąO@Ó¦MKô9::úľACiĆŤ‡‡‡_}őëׯgýúő<óĚ3lŢĽYăNsHEĆV[}măQ•ţÜŰžęFŤ=°qOŐ¬#Ö¬YĂkŻ˝V®×§2„°zőju@¶qăF:t¨ l-”žžNDDDŤě{DDDĄ^ lll,ÁśBGÂ0'‡¸8ââČÓŃ!ÍÜś;VV¤™›‘kh¨nD 2¬ôôp46ĆÚŔS]]ŤŤe0«Ap‘ś“C\v6iąąÄeg“^ĆŰkŚ220KMĹ"9ąJËG[±bĹ FŚA«V­Řşu+ľľľŔŚ3hѢ…úXŐćźöööę˛*RżxÎÎθ¸¸h„ŃŃŃ888¨ßšQüxGGG\]]K”ŰÚÚâááˇŃ†ťť]‰˛{ëGFF‹‹‹ NNNęuęÔŃčs\\áááíVT˙ţýéßż?GŹeěر:taƱsçNőąprrR_EĆV[}măQ•ţhk/&&@=†UńdرcË–-ăí·ßĆÎÎŽŮłgłxńbŤMAËú Ť5ŠĺË—3nÜ8 K—.ĹŇŇ’~ýúÉŕÖBË—/WĎĐ«i~ţůçJ× @C!Ä#ĄWP€Őť;XÝąŁ|860ŕŽ•™ĆĆá(÷ŰHľgŮ»©®.ÖX`ĄŻŹ©ž6ĹęZ¤ç瓞—GlVI99ę}.î^Ą¦a–šŠ^AÁŁý|=čçÎť Ŕ?ü@ăĆŤŐ媍‹?0k[bR‘úĹ۸w#Mm{;”vÎŇʵµqżv‹÷#ż(µş÷MŞWAVf‰É˝:věČöíŰquue˙ţýllË;U鏶ö,,,H¬ÄôŁŇÎ+jżččh† F˝zőX°`ěÜą“‰'ŇŁG|||ʬobbÂË/żĚęŐ«Ů»w/–––ňŢ{ďa,ż¨•Ä}µup}dýuőh€ľą9Y•nĂČČHcď&!„âqPÍÚPQÍÜČ41&ŐĚŚ ŤăÓóóIżg€ľBˇžÁa¦ŻŹ•ľ>::X`X´ˇĐ”–›Kz^ž:°HËÍ%=?_ăí"eŃĎÎĆ8#ăĚ LSSÉĚ‹Gf¨–OÔ«WO]vîÜ9őëJ›5kVćnEęW4ś¨hyYKLĘfxyyqéŇ%nܸˇ^6‘‘‘ˇ*f|úé§l۶Ť;v¨Ň Š0GGGőqŞ7Ľ(*2¶ÚękŹŞôG[{Íš5ăđáĂܸqC¸;vŚđâ‹/ňí·ßV輢v+,,dĐ A$&&ňË/ż`nnŔşuëh۶-o˝őÇŹG˙>›L 2„Ő«Włyófő®,1©˝† ÂĽyóÔŻ‹¶¶sá©g_ŞPadlúHűÉő˙NŞŚńăÇcgg'7_!DµR|ć†sQY¶Ůú¤››“ilBގzSQ•ÜÂÂ2ÂMuu1ŐÓĂ@Gë˘_,« V…ŞĐM›•U˘¬ĽLRR0ĚÉĹ83ŁŚtL22ů¬‹Çftîܙݻwł|ůrŢ˙}ţţűoBCCŃÓÓ#77WkPüa¶"őK tqż˛Ňú1bÄFŹM@@+V¬ >>žyóćaggGHHĆuwďŢť+W®Üwwaa!7nÜ࣏>bíÚµčëëóé§źđŃG•8>66–´´4ĚĚĚ*<¶÷Ö×víU鏶ö&NśČáÇ™6mË–-#44”aÆ‘¨ń¦šňž·Ľă*j¦ąsç˛˙~&L ±il«V­}:ëׯ'<< Ľ˝˝>|8 RwóćMúôéõk×hѢ§NťŞĐŘj«ŻíÚ«ŇźŇĆr×®]Lś8‘«WŻbaaA›6mřěłĎ46r-ďyË;®˘ć9{ö,íÚµŁQŁFś:uŞÄr®ÜÜ\Ú´iĂĹ‹9zô¨ĆçGµbńżţ¦M›ĆçźŔ×_Í'ź|RęńÚꋚ'==]c††Wă6Ő>Đ(OqôŹŤdZgűöíěÚµ €·?^üČÎ{ćŘnÎWľťnĹŠr#D­Ä‚ đ ®ËĘ+ĂŘ<]˛LLÉ×Ő!ÇŔle qďŇ•'…IJŠň˙33ŃÍĎÇ8#ťüĽXD;;[´7ć˝?đ0C”íÖ­[Ô­[???őĚPnH———Ç| ôɸÖއP???BCC9uę”Ö·\ĽxÜÝÝ9w¦Ą†aaaÔ«W…BADD„Ć&´Ą…Ą‘żV%Đ C 3„0Łz€:ô4‚€=˝ËZŞK0ˇbž–¦ţoěôs”ł9Şër‡fčÉ·íĂ1tčPöíŰǡC‡đööV—Ż\ą€7߼űĂrff&ß|ó űöí“{€d\k/SSS‚ď{\łfÍČÖ2UO[Řŕîî®Ţ´÷~ÇKXQ»>KĹ—ś¨–gÔ´@C‚ !„â>@±ĺ&• jRĚĘż_V®!9†ĘYæ÷ĽˇĄ,5mÖÄă&aĆCâääDTTŁFŤbŐŞUšš˛uëVćĎźOűöí9r¤úŘ‘#GŇ­[7ŤW˝ŠŞ“qB”GM4$ČB!ľŠ é2`Ź€„ÉôéÓ±łłSż5//OOO&MšÄ„ 0,¶QÍš5kdŔW!DyŐÔ@C‚ !„B<©$ĚxHttt?~<ăÇŹ—ÁB ¦d!„â‰~ć–!B!”T†jyÚő˙Nrx÷új×O 2„Bń¤“0C!„(¦şd!„BH!„B”P] 2„B!”$ĚB!´¨n†B!„wIQAqqq( śśśŞE˘˘˘P(ŘÚÚĘ8 !ÄV] 2„B!4IQA—.]Ŕ××WŁ<11‘ß~űí‘÷çĚ™3´jŐŞFŚ“BÔ4Ź;Đ C!„˘$ 3*¨K—.˛wď^uY~~>ť;wćđáĂŹĽ?Ő5ĚĐ6NBQS=®@C‚ !„Bí$Ěx–-[ĆĄK—hÝşő#?·*Ěđ÷÷—!„ŃŁ4$ČB!„(ÝC 3®\ąÂkŻ˝†““ĆĆĆ4jÔYłf‘——§qÜž={xîąç°¶¶ĆČČ&Mš0gÎrss5ŽËĚĚDˇP`mmMdd$/żü2–––XZZ2f̲˛˛JôˇĽmW¤ż...( BBBđóóCˇP0fĚŢxă Q(4hĐ Ä9’’’°°°ŔĘĘŠäää_ŻĚ5j›™Q‘ë¶´´ÄĘĘŠú÷Ą%ÎÎÎLź>˝JǧĘÔß˝{7m۶ĹŘŘ'''ĆŤGnn.mÚ´AˇPpýúuůîBÔÚ@C‚ !„BÇf?~rrr8rä‘‘‘´iÓ†I“&ńŮgź©ŹŰ°aĎ?˙<éééś:uŠřřx:věČ„ ĐhóćÍ›ęęf͚ŗ_~IXX/żü2K–,)ń@\‘¶ËŰßččh˘ŁŁ±±±ÁÓÓ“łgĎ €­­-………„……ˇ««ËÍ›7K!K—.%55•±cÇbeeUbÜ*zŤQQQDGGckkKÝşu+|Ýááᤤ¤ŕęęĘÔ©SY´hčęę2eĘŽ=Z©cď§ŠÖßşu+}úôÁÝÝť   Îź?OTT#GŽäôéÓŘŮŮáĺĺ%ß˝BZhH!„BńÂŚÜÜ\Ţzë-lmmٰa 4ŔĆƆYłfannÎńăÇŐÇţúëŻXYY±lŮ2ĽĽĽ077ç‹/ľŕÇÔh÷ęŐ«čëë3{öl7nŚĄĄĄú}Ó¦MÇ—·íŠôWŰ,S§Ni”âééI^^ˇˇˇęă233YĽx1–––Ś;VëŘUôµő§"czţüy"##™9s&nnnxzzŇ·o_Nś8Q©cµő«Ľő333=z4¬]»Y±b›6m˘°°???ůÎBÔĘ@C‚ !„BÇflܸ‘7n0hĐ ŚŚŚÔĺ...¤¤¤pčĐ!uŮÖ­[ILL¤yóćę2RSS5Ú˝páď˝÷&&&ęr777ÂÂÂ4Ž/oŰéďéÓ§K<¤«ĘŠďYѨQ#®]»¦.[µjńńń|řá‡ZgeTćµí—Q‘1UŐ}pqqA__}}}u˝â3 Š? 7iŇDŁ<<<¸;{ˇ˘mW¤że=¤“É˝aĆO?ýDhhh™ł2*sŤÚúS™1}ę©§4Ęhٲe•Ž-ŢŻňÖ?{ö,M›6-1>ŃŃŃ%ÚBšhH!„BQ ÂŚśśśĘnĹŠôęŐ‹/ľř‚ pć̆Ό3đńńˇK—.;vLăˇwÁ‚řűűăďďO˝zőřďż˙Řşu+oľů¦F*Ňvyű«-<2dŤ7ć‡~ŕŮgź-f€r˸qăî;n˝FmAeĆ´xpż€˘"Çj›™Qžú#GŽdúôéüý÷ßxzzňńÇ3iŇ$nÝş@›6m4Ú¸páB‰ŤM«ú5!„xX†B!„†˘°°°°:w0&&gggŤŤ­1۵kW8Ŕ¬YłÔpÖ¶k|TnÝşEÝşuńóóS BQ¤§§3oŢ<"##5Ę%Ȣb¶oßή]»hŮľ×#;otŘ5b"®Ę_ Q›±`Á<±HK—AŹ]´ł±..Z˙Ö«îť/móČę쯿ţâŔ4oŢĽ\ł2jâ5> C‡eßľ}:tooouůĘ•+JĚNBęN5CŁx !A†§ÚÔ đź=2 B!ü2“­¬˝*Ş›ÜÜ\Nź>Í!C°¶¶f۶mčëëתk|śśśŠŠbÔ¨Q„‡‡“ČĘ•+™?>íŰ·gäČ‘ň+„¨qT†———BT’ŻŻ/FFFŹíü÷.'BńřUű™_s…» IDATĄí·PůřřAçÎťY¶lőëׯu×ř0Mź>;;;~řá7nL^^žžžLš4‰ &`hh(ß±BÉÔÔ”O>ůDBJ˛łłcѢE2B!ÔŞýžB!„B!.Ů3CTGe홡#Ă#„B!„BšD !„B!„BÔ(f!„B!„˘F‘0C!„B!„5Š„B!„B!„¨Q$ĚB!„B!DŤ˘'C „Bńŕ„……qőęU®^˝JFF† x˘řúúŇ­[7ěرc?~śâîî."f!„B<AAAěÜą“k×®É`'Öµk×$Ěx¶lŮBVVóćÍcüřńh„B!ĘFff¦ „¨6lřŘνcÇvîÜ©Qfîŕ!7E<1˛Ó’Éɸ#ńdee©˙_ !”$ĚBˇŐöíŰٵk— „¨1\]]ůüóĎůyúé'<€®ľ!N>ípöi‹ľ‘‰ÜńÄ <@ä…C2Ź€B(É B!´ –A5Jddä#?g`` :Č0±v¤yß‘¸·ě"A†⡰¶wîaaa2(â‰%33„B”I?% ë Ó2˘ÚJw­Oş«çc9÷–-[匌Ć=†J!„x¨<ĽšÓÄďŽţ±Qfh'ž„B!ʤČÍĆ89^BT[Y6ŽŹĺĽÇŽ#11Qů€Ńş—BG˘ał¶h'ž,3B!„¨„sçÎ``b‰Ł·ź â‘iج-ź{%'âÉ%a†B!D%\˝zk÷F2BGN ń¤“e&B!„• zU˘ž± F‘ä„XömYÉ•3GąAFęL-¬đhŘ‚./ Á·C÷uŇR’¸zî_üžî%(DÉ’ń$“™B!„˘Ę"n\áłAĎręŔvúůŻ>É’=A ˙|)1a×Y4á-ţÜňťFť‚ü|fŤęGpŕq@!*Ifh'•„B!„˘Ę6, íN"Ă&-ŔŻSOŚLL165ŁI›Î ˙b)ÖöÎDÝş¦QçŔŻk‰ ˘žŹŻ  U †xÉ2!„BQe7.ťŔłqÉÍP˝šú3˙·@őź§íέ«Ő^ńĺH¶,›ĆüßIg×ú%\:q¤ř ŤM¨ßÄźľCĆâݬµşNaa!ďwŻŹ±©9ł~:ÎĆEźqćĐ.lťÜřrí~.˙‹?·|GČ•@r˛łppő cŻWčţĘpôôô5úwář~~_3źđk˙alfNŰî/ńňČ)ĚŃ—›WÎ1kóqÝę‘–ÂîőßpúĐNnÇFbhd‚wóÖô4–úMd#Xńx %'B !„B!ĘÍŘĚśÜÄ,BĎÓĐ·]™ÇN]ó'Iń1Śű?_Ě,mX˛ű?Bţ dîŘŘ9»3bę·¸yůÂĘŻF3{ôK|şdŢÍŰp;6‚ěĚ <4ç»iđT·yóŁéäççsüŹm|7m4^ÍZóů÷{±°¶góŇ/ٲl©É·8ęsu_NÜÁňφă˙lŢ˙júFl\8…çN$4č<ćV¶ę #.ňs>@NV&?ť‹Ź_b®łâËQĚů"/ÜLŁ–íĺĂP Ě›7Oýşc!†÷#ËL„B!D•uů}:_VÎ""$¨Ěăo)_m[·asr˛3Y6ĺ] 7o#ő›¶ÂĐČw寮6ć+ňórůmő\uýëWżńz ¤u—Đ74ÂČÄ”łGö`bfÉ[ăgáčVcS3^:€ţŘŞn#';“őó'cnmÇŰ‹°sŞĄŤ='ĚáÄţ_),,ÄŁA3ňóňX6ĺ˘Ă5ý{ü:őÄŘÔśz>-yuôTe˙ľź#„J:wîś0Đ%'âI 33„B!D•=˙Övţ°ë˛cÝBś=ĽéŘűş˝ü6†šo} :Ż 3µP† {·r;6‚®ý‡beç¨ql˝˘cB.ź˝[?ř‚: iŮé9ŤăGM˙ľD˙,¬í”wi겳‡÷’O·ochd˘.71łŔŃÍ“°k—đ( [NŘέ«iä×-Új´]żi+n^9'„JĘČČP˙·o»ž2 Ąhâ׹\ÇÉ !a†B!„ĺ P(č3čCşĽ4”3‡wsúŕ.ź<ĚĎË˙ÇÁßÖ1yů¬íťJ 3ÎŮ @óvÝJţŔj`@^nN±0CYżcďWîŰ·‚‚b°wńP—_˘Z‘@CÔff!„B‡ÂÚމw1ö…ćW—'D‡“š|K[l\H»ŁÜ/Aµ¤¸°k—€»Ë9’âcHNĹĘÎQc¶Ŕˇß~`Ýś ÔmÔ‚‘˙űwď&™°mĹLvţ°:^wĂ Ől WŤ6R’HŚŤÄÜĘ;§:ýűv†Ć&rsEŤ!†¨­dP!„BQiß~1‚ű6#>Jű†Ć¦ŔÝĄ"pw‰jóOcS @s)‰Ę…öĐŞłrůjF…jsÎâölZŔ;Săݬµz/Śë—NPÇűîĚŹ‚|ĺ˛]}ÍWµž>´SŮ~±ţééő_!÷\Ô<˛)¨¨Ť$ĚB!„•¦§o@Jb<˙ţů‹ÖŻźý{M[ßݸ0ĽčM$Ĺ—”4öď@Pŕ?őoÇFrňŻí¸z6ÂŻS/ŕîćź [”8ߝ۱ػÜý­sصK\»p7Ouą[]â#CŐeŮYěݨ DЇ-ő›´*jë˛Ćů.˙‹IŻu`űÚňaŐš˘¶‘0C!DŤ4ůfŞÖ˙Mşq‡Źońú†ť4đ(üśU©S™6d„xĐ^:s+[¶Ż]ŔŢMËąAnN6·c#Ůżu5ëçMÂÖŃŤŁ>/Q7%1ž¬ŚtŽü#S~Z2•Đ óädgróĘ9M„‘‰)#§­DGW·(Ě8_f”ś™ŃĐ·]GNv&—N"äż@tuK®®~ö˙°mĹ,R“oz•Ť ¦`neŁ 3Ý Kú˝3=}6, üúdgfpöャüj4·c"qײׇhńđČžB!j…ŽĆV6Ômß™şí;ÓđąŘ:âu óóep„x\ëňĺÚýěް”ĂŰ7đËĘŮäçalf“{}zż9š®ý‡alj®®Ó±÷+ś>´c{&ěú|±j/vÎuĽ|ŰVÎäë_&'+gš·ëFßÁc±´uP×/k™Éŕ sX;ű~[5‡}›Wňlż!ô4†ś¬Lo_Ď×c0tâ<Ľ›·ˇËKCČLOaßć•Lxů)´xŠ7>šÎô}đlÜRÝný¦­řvŰ×ĚgöýÉJOĹĚĘßÝéůÚű¸Ő÷‘¨1”ÜCC 3„BGhF=sŤ?ë`áě†Ďó/Ńaô'xwëMëÁ#8ąziµěݵµ˝3oŚý_ąŹ·wqgúú#%Ęëx5fě×?Ţ·ţ‚ßĎ—ú5ĆÍŰP˘ĽÇ+ĂéńĘpŤ2Őëdű úP]–NJb< ›kĽ=”35ĆĚ^'7\ÔxÚŤ—^zIFÔ(˛ĚD!D­’ź“CŇ­ţY6—ýÓ&Ь˙ë20B «¦ČG/ú˘Q~ř÷ő´{®ż ’¨Őî]r˛uëV e`DŤ!33„BÔZÁě ×ŚĹŘzz—řšj݉ŇfJÜďëu;‚„kA|׫-Ý?›ŤĎó/a`bÂś&Nň!5–Ą­É 1ü8oC'ÍÇĐŘ„ÓwňÇO+đjÖš.ý†Č ‰'"Đĺ Ťś囄$Đf!„Ź™BW9±ŕî—Q—‡w·Ţ řv#Š˘Íě6¦ËÄi¸úúłíý7+öĂdĎyé›uíéŕĐȇFMhůęÖüß3¤ĆD•¨›ź“ŤG»§yuíŻč{őĄSS_^\´šĽś‚÷ţ@óođü¬Ąç1µłÇ»űóxwëÍS?ćĚ+5ÚĎËĘ@ßŘvĂÇâ?ř=ů`‰ZáĄá“0ł´áź˝?3ĺͧÉĎËÇŢŐťç}@ď×Gˇo`($ž¸@CˇPPXXH®ž<& 3„BǦA÷>Ä]ąřŔÚ,,(ŕą©s9·y'W/%éVF–V4}ńşNžNĂž/âőěs\?řGąŰ|ćăĎPčęrüŰůśÝ°ŠÔŘhôŤŚ©Ó¦Ď}9KWw:ü9;?ˇ%Ěȡ÷Ě%ś˙ůŽ-ťKz|,6őĽč=s n­ÚŇzČ‚÷ţŽ•{=zM_Ŕ?Ëćr~ËŹ¤ÄDbfďD“^ć鱓éţŮln=Hbȵ»íçć…¦ř˝ů»'}Ŕĺß·<Đ·Äń8čččĐóµ÷éůÚű2â‰gëੵą9Y( ňud7Qţ—!BQ«ţaÓÓĂŞN]žzw ]'O pÓšÖľ®'Ůđ!·o\Ą /ŹŚŰ ś\˝”żS†Mţď• µiU§Ç–ÎĺNDąąd§¦pýŻ=ü:z0Yw’0ł×ľ¤ĂŔĚśđS˙°wĘG¤FGR—Gµ ţš1›˘%6ţ†Łk`Čáů˙ăĐś/IşB~v6w"nńϲąü˝x:zz´|m¨ć Ц›ŘÚqußNÎý´–ÜĚ r‹^§)„˘fK g÷ć%äćdýµ_QŃ’!Ş3™™!„˘FSíAQšs›×qń—Môśg~üNků•ťżĐnÄ8ś›µ¬P{·o\ĹÁ§)˝g.a˙´‰¤ĹŨżuî4ó}ÝˬjͲeńW˙ŔÔĆŹöť¸řËF­m\úm3ťÇ†ÇSK=ĎĄí[ä'„µ¶ CQŤfŢ˝úT‰˛Âü|˛SSIŤŽ&ćü®ýń±/VËńUőąků°=f!„¨] ÉJI&úâ97®&hĎoüqA—´–'†ŢŔ̡bcîüdŻoŘAă>ýńéÝŹ¨ó§ ýç0ˇÇvň…÷ŮóăöŤk%ĘrŇ”!Źj +7e ňÁńŕ2۲ň¨Wę×’nŢĎ—BÔ÷Ý»wçĎ?˙¬öýVčębde…‘•ö>>4{ő"OźćŔÔ©¤ĹÄVąýfŻ ¤ă'źHQH!„˘F+ím#“*(¸WnfúFĆj/ćňyVt÷§Í°Ńř<ß×–mpmنŁ>!->–#ó¦qnóşRëçegÝ÷&fĺꋡiéă™™ś(¸&9!–Ź^lˇ1Ë˙ĽŽN±Í_k“”¤>ěÓ {íx<żˇMŠŹaÜ˙ůbjaÍ7{®Č‡OÔ¨ cĐ AŘÚÚVŰ0Łx° ĐŐĹĐÜ[//ę=Ó™F}űâęďĎKkÖđˡ¤ĹV-а÷ń‘„B!DÍ¤ŁŻ_ö?žFĆZ÷ŚĐ76 §űI¤ÇÇqpöçśý9ÖuëăůtW|z÷Ăý©Žôžő şĄ.o)ŹśŚ4 Í-™×ÂŤě”;r“źˇÁ¨ăŐři)I\=÷/~O÷z,×uďů#C‚p÷núŘĆúVŃX×mŘ\>x˘Ć:t ((¨Fôż0?ź¬äd"Oź&ňôiέ_OŻyó°kŘî3¦óëŰďT-Ěh$a†„B!Dµţi¨ t ÉĎÎÖř’]ýeVµónDôů3%ĘU›m¦DGV©kIˇ78z3?¬¤ĺkCé5c1mŢ]Ą0#)4§f-±őô&ęÜią˙O[ÁçđhŘěµYźĎ¬Qýhâ˙ôc 3´ťß§UGÖ‹y¬c­ Žę6’0CÔĽ Ł&K‹‰eçy}ŰVśZ´Ŕ˝}{ÂţůGýu#++ZŚG‡;;ŁŁ§GFÂm˘Îžáěšµ$ÝĽ @Ë!i;z´şžjż‹?'p}ßľ µĄŤ[›6ř ‚]Æčr'<‚ŕÝ»ą°qyšËI+zÇfÍhţúk85kމ­ yŮ٤ĆÄpóŕA.nŢBVrr‰ţ86mJ‹7ßÄŮ×#+K˛SR˝x‰ó7ućLŤ¸÷ň6!„O¤ě˘Ą"NŤ[”řZŰ÷>*ł®ßëĂ´–7é;€¨ó ^\¸Š1'®áâë_âk—wlŔÜÉĄJ×{óŘAĺµ˝;Fë×=;wcÄ@:Ź˙L>µú»a‹Öć_×D=ßÇrMŹűüĄŹőů˘0Ł…|đ„ŹXćíŰ\üiłňßł.]ÔĺfŽŽ ܸß7ßÄş^=ôŚŚĐŃÓĂĚÉ‘˝{3ŕDZoܸ\ç¨l[yůÔ}úiú,YŚkëÖZX khŤW}ÚŤů€î3fTé<ž]şĐďűďđęŢ3'Gtôő103ĂÖË ˙wße঍:8hśŁaź>ô[ő=ő»uĹÄÎ==Śml¨Űůi^üv9M¬÷]ff!„x"Ĺ_ĆÍżÝ>›ÉžÉc¸r ;ÚŹü7ż6dÝIÂČŇZË%yÔmß™®38żů’#najgOĂç^ Í°Q{ŠB™˙·x űľřđ3˙’“ž†…ł+íF(•ř ËUşŢłVá?xŤz÷ăĹ…«8˛pw"Ă0±˛ˇAŹľt™řfćš[Ę‡Łš»pü/ţÜň!WÉÉÎÂÁŐŽ˝^ˇű+ĂŃÓÓ\"u+Xą„ŤŁ+ßN}ź‹˙  ?źö=đĘč/00Ľ»żË‘9şű'˘o]''+;ç:tč5^oŚBˇP0uhwn]˝»ĹŠ/G˛eŮ4ćýz–÷»×ÇŘÔśY?gă˘Ď8sh¶NnôoRąűz•íkćsĺě12ÓR±urŁcďWčůÚűčęé•zţůżňŃ‹-HNeö–8¸z(ܢĂٵ~ —N$)>Ccę7ń§ď±x7ÓÜŘodoP(ąń(?ΛħŹ``dL×ţĂč;xląîË--ÁQyîUDHź˝ő Žu<™őÓ?m¦§$óq::ĚÝz sK2ŇRŘ˝ţNÚÉíŘH ŤLđnŢš>ĆRż‰źşnaaa©÷ĺ˵űĺI‚ŚZd¨Ü<|V-łďÝ Ó˙Ýw0up îŇeŽÎ›Çíë×°kĐ€ŽŹÇŢLJ¶ŁF±cÔ(×®#píşRß@R‘¶4ĐiÂ'\ůýwÎoÜHJD†ćx÷ęEűÇŕŮĄ ;pëč±Jťç©‘ďŁĐŐ%pí:.oŰFz|‚Iדő÷eüŢx‡Ľě,ţš9ĄJ×{'"ŚťźŚ  7—&/äýçx5‘1'ŻÓó 003'ćŇ9ĎűJ>ŐŘń?¶±đ“7ÉÎĘŕóď÷˛xçeĽ›?Ĺ–eÓřeĹLÍ{žOR|4z†ś>´“ŢoŚfÎÖS´éú"~YËŽµ ŐÇnývkfŤĂÝ» 37eζS8Öńäçĺ˙ă·ďç0uÍźĚ˙íf–6¬9ĂüßąAvf®őřnÚ4mÓ™żźŁk˙aĺîëőK§ůňíçČËÍeŇŇß˙{ žŤ[˛őŰéüúýě2ĎźśKrB,¦Öę #äż@>Ü…—Î0bę·,ý#O—l#1.’ŮŁ_âÚ…“ęsߎŤ$3=k{'~[=—×ÇNcęšýčččňËĘY\=âľ÷%)>†ä„XĚ,m°s®Sˇ{ĺčV]]˘ÂČĎËÓh÷Ż_Ö•‘FŹĂ11·$.ň_ éĘß;72pÔç,Ţy‰ńó7}ë:3GľHPŕ?Ĺ®Kű}™´ěwůF’ ŁÖ)ŰÚ¨Ë,ÜÜČMO篩_{ń"y™™äefsţ<żš¦ülVľ˝v*Ű–Žľ>1.pxĆL’CoQ—OfR6näÜŹëđîŮ«Ňç±puŕĚš5¤FGS—GNZ:·ţţ›}“&‘ť’‚‰ťťúř¦/D×Ŕ€“ß®ŕÄŇe¤DDź“CjTg׬áôwߎ٧Kă~ýŞý=—0C!ÄéżŰŘ1~8±W.’—ťEVJ2·Žaă[/zě9™ĘM<őŠŢL˘gh¤ 322¸qřO6ľŮ—›G’u'‰Ľě,â®\bß—ź°ëÓQ• V=ßžăßÎ'áz9i©äçäp'2Ś Ű6°şo'ÂN­ň5_Ůő+«útŕ¶ ¤D†“ź“CnF:1ůkFëúw%;5E>ŐŘŮ#{01łä­ńłpt«‡±©/Ŕ?lŐ8Vµ_†ˇýß›„»wLĚ,č3čCNţu÷ˇöŔ/kxiř$Ě,m°°¶ăŐ¦bbn©^>p3H&ßä2âşňÍá7ţŁCŻ´îňú†F\ü÷@ąúš——Ëw_ŤĆĚ҆÷ľXŠ“{}Ě,¬0"#3®_:SćůCďŮx3';“eSŢĄ°°qó6Rżi+ ŤLp÷nĘkcľ"?/—ßVĎU׿ţź:đŢdl\ppőŔ·CŹ˘`äě}ď‹¶Í?Ë{Żô ±wń ??Ź„puyNv&űţc3 z |—üĽ<–My‡„čpFM˙żN=165§žOK^=Uy]EÁSY÷ĹČÄTľ‘$Ȩ•ת~ŁńÝgŰGĽĎ÷ťź!9´ä Äĺń&&ĺjż*m]úy«Öňëű•oŹqhěSéó$…†đLŔdŤĐ îŇeVwéĘÎѨËÜZ+—´ďÚĄµOW÷ěŔĄ•_µżç˛ĚD!DŤô ^Ézń—MĄ. Yѵ•ĆźsŇÓ4Îvâhą†{űYZżÓâb88ű Îţ⍶ŻÇ_˝ÂÎŹG<ň±ƨéß—(ł°Vţđš•‘¦QZ´Ä¤]ŹţYÜ]2eíŕ (g$¨›’™žĘŤK§iÖVąŢÜÁµ.K÷k¶Tr_âaBËNĎU¸Ż˙îű…¸ČPú‹~Qh`mďÄň?Żß˙üę2eđĎŢ­ÜŽŤ k˙ˇXŮ9jÔŻWT/äň١O‡^1µ°ş{°B€ŽÎýßswóĎ•şWÎ^Ƈ‚Ł[=ŽěŘDjňm^:sKţÝ÷ ·®^¤‘_´h«Qż~SĺßW7Żś»ď}© ˘Â®É7»†FŘ9Ö‘ نćdßŃ|c—‘•%M_[›Ö9:blmŤŽžŠJĽá©˛mÝľ¦ý3}'L`ŢBTä<ľü’–-Ă«GęwëFÜĺËDž:EÄÉSDRŻąą¨ą‹r®A»w•Ůg 77 3„B!ÄĂQPP ţmľ˝‹‡Ć×Tł·~ZŁ­:UŞŻŞ6ť=ĽĘfh;ż*̸uUůç:^MJÔOŠW>äXŰ;—¨ďŮDsJµjłQŹ÷­í--33*rŻ\ę*_®śB~âŻßHWĎĘ(şü´d*?-™ŞýaÎÉíľ÷Ąşňőőe×®]ňM~ŮŮ™d”Âł«rVYń׊¶3S{{RŁŁ9±t)ŃçΓuçąąäçóţ©“ĺnż*mé’—YňŢé-aÍĚŞŇy2oßćß%KřwÉ,ëÔˇN»vÔďÖ??ž™2]}.ýüsŃą2103cŐ3Ď’“–VŁďąź° IDAT„B!„5Ŕˇß~`Ýś ÔmÔ‚‘˙űwď&™°mĹLvţ°:^wS“o“X´ŚÄÜJsúrTčU­éţĎôÁ˙™>\=‚M‹?'čě?¬ž1–±s”Ô%D‡“š|K[u˘ÚřŇĘÎk{§JőőÎí8,mĘ~@Ór~ŐćźćV¶Ř9)§ß§ÝIĐaT®]î.ËPŐ·°±×8’âŁIIŚÇĘÎ K۲űĄs+[l‹Â„Š\ż2Čń .RąF~φĄę˝2TT×őíţ ŤMĘŐ§{ďKućîîN@@ĄüűI·`Á‚r÷¤6őëÓ¨Oŕîžpwť|Pb 3§Š}oTĄ-OOâ.—|+™•‡r–VZlĚëóťđpsiË÷ëGç€É´xăuuq'<{¬ęzwérŤľďf!„BÔ{6-ŕť)‹q­×P]~ýŇięxßť‰ š• ŁŁąß»ęMMÚtÖzž-žb̬uŚű?_.źţ»X›%—R¨~űo0R‘ľ™š“ž’tßë×v~ŐL ŹbeƦ¤§$‘—›®žćŹşţQľ’´UçŢőď]Tt] +7+Ł"ׯ 3”łRnÇ„sńßDܸ CĆiěᡧo \Ł Ü}*Ϭ’ęhĘ{R +zÍ›‡Žľ>7öď×ŘźB·hiYz\|‰z­‡ż …… P Ł§GÁ=oRčęjě7Q•¶šĽô’Ö0Ăë9ĺFĂĹżVŃótűß4\ýýŮóńÇ%‰kűöŃ9`˛Ć’•'±÷ńÁ÷Í·Ř7qb‰sÔi׎Nź|Ěő?÷srůňj}ďĺm&B!„5ŔťŰ±Ř»Ü}ŕ »vIN¸yú{˝¨ţďđ˙©˙;7'›“ýŽ©ą­:?Ŕ–eÓřt`[őŚ €Â˘˝,­ďţ^ôvŚâË7Tˇ‰GĂ•î«[}ĺÇE†j.c_hÎş9ĘuţzĹ‚„Ćţ4^S Ę OOţµWĎFřuęĄQżî=ýW_WćĺYJľÉ¤"×`bf•ť#IńŃěÝ´cSsz핡RżI«˘v4V.˙‹IŻu`űÚ÷˝/B‚ŚÚBßÔǦMi?v,/Ż˙ 7Wsxú Ťă’n)g6´= #+Kt phŇ„žsľĆŔÔŚ”Hĺ 6Ď®]ĐŃ×îîcQż[Wtôô0¶±©t[yů¸¶ö§ýرX׫‡ž‘!ćÎδxăuZĽöÁ»vWˇĎ Lěěč>}:;``f†BWsggÚ} |‹Éíëw7SľĽmyYYÔďÖ•n˙›†eť:ččéabgK“ýynÖL,ÝÝ103«öź™™!„BQ4ômÇ…ăqđ×u<Űo0WĎź !:]]=ňór5ŽUýfľM—řińĽ7u9FĆl[1¤řh†MZp÷ť……ÄE†˛ińĽ°]==~^ţ?€Ô)‰ńde¤cdbŞž-qď †Šôőů7Gř;Ö.ŕ­ńłH gőĚŹHOI¦eÇĺ<˙Ý aŕČĎąřď~Z2K{\ę5 2$5łĆcdbĘČi+Ń)z#€ş~Łć÷ŚźöëŇf”Ü”´"ׯâěáÍ•3Gůďôßôň‘ć›U€~ďL řÜq6,ŕíÉ qp­ËĺÓGX=ă#r˛2q×yĘß!AFMđţéSe~=ňÔ)öMšLvjŞFůůőčöżi48¦ŞËÓbbůeŘ0Ú~0 77şOźŔr˙ÖÄ]ţ—V~Ę2e1Ëý[W¸­mŰ…#™™5›^óçÓâÍ7Jô=hÇŤ}>*zžő}_Ŕ­Mk,\]é˝pa‰öółł9ľh±úĎ©ŃŃú%Ýţ7 ďž=ńîŮłDťř  N._&a†B!„¨şÁć°vö'ü¶jű6ŻäŮ~Cč3h 9Y™ŢľžŻÇ `čÄyx7oCČ•@úŹŕÜŃ?5ę˙HIJŔÉ˝>ďM]NŰîýÔ퀡‰)Ç˙ŘĆ'Ú`lj†cOŢ™˛˝îţ ݱ÷+ś>´c{&ěú|±jo©Ë*Ň×fm»0vÎz~^ţ?ĆľĐcSsęůřňNŔbőŢ÷;ń ÁÎą“—ď`ŰĘ™|ýáËädebăŕLóvÝč;x¬ĆĄő?´Ë4Ôm T*rý*.u•a†±©9Ď˝ň^‰óÔoÚŠ€ow°}Í|fĐź¬ôT̬lđíĐťžŻ˝ŻžáRÖu=L9éɸşşĘ7«]^V6·9ž«{ö~ü¸Öă®íÝ‹‘ĄÍ^ys2nqň§V¬$=.ŽS+Vb]ݶőë«ß‚rxć ž ŔŢLJ‚Ľ|’CC+Ő–nŃŁr33 űçvŚI«·ßĆާz††$‡…qĺ÷ßą´ĺç*ő95:š-ŻżAóW_Ąnç§1sp@×Ŕ€ô„˘Îś!đ‡I Ń8ÇŤýűIşyß·ŢÄŐß[[ ňňH ĺúű¸°y3ąąŐţs (,,,”ż„BÜkÎś9\ż~Ű18ź9 "Ş­$Ϧ¤x)$W¬XńČÎűŢ{ĘN×ćĎŕ޲‹ÜQ%_ŹŔ•3Gyůý)ô~stŤë˙™­óÉIO¦yóćŚ5JnčCúűĆ·]OZučýP‚Ś   őFŁžÁÁX¤ĄËŔ‹Ç.ÚىX­˙ĆËž˙ßŢ˝‡GUßy˙r™K 3B’$$BA,$´h$´*›¸ÖRm5éÚÖj­ÝÖô¶ ­»­O-Ö^žŞ­­5 ¶kh‹+ÖF-I´JRA !PČ…‹$Če&Âţf$d€0ąÍ$ď×?2g~çĚ9ß3óńűű`í~ű ˝˙Îßź0WźČůRŔť˙‰Cű=ťv»ť: ĆjGp.¦™# ««Sµ{ßÓ?ĽWćHÝýĂ'P×ĐéjÓl–$ ÂŚa@ô ĚFŔÝňq?vXÉöT}îľir쌀:˙NW›ö•<'牞§·dggËl6sc‡Đńcőzďť­€3€ń`aYŔžű‰Cűuŕ›=AFJJ ?އAMő‡Ź]&ČŔXG0§ŽPmĹ 1ŁńŕnO!őąąąfd„Ň|ô€šŹ s –/_®¬¬,Š1Ś2€„>ŤŤU}}=…Ŕ“ ‡ĂˇÔÔTÖČ&éééÚ˛e Ap ŔEuNŇ‘EË)üV—qd~L­]»–â999ĘÉɡŔ93u&$L퓦Př  €W«WŻÖłĎ>K!0ŇŇŇ(caŔ+›Í¦üü| żD @ !Ě…0 P3@@ái&\¦ššŽŘçgggËfłq#€A°}űv•––jůňĺr8a—©¨¨H{÷î±ĎőŐW•——ÇŤhűöíÚ°a$iďŢ˝şí¶Ű´téR €0€ËÔÖÖ&I 5hRtě°}îńcőęěp©ˇˇ› ĐąA†›ű5ř? |4):V˙ö™{‡íó^üĂĎu´~…čÜ #4Ě ´Y*ůŰ ęhwh@€`PŚçYźý˛ćŘ+ëł_VhARO‡ĆöíŰ)ř1Â Ś Ţ‚Śč8IRtL Śz 2Ü4 pf`TëOáF 0ŁÖĺnŕ˙30*ůd¸h€#ĚŔ¨3 ĂŤ@üaF•Á2Ü4Ŕ?f`ÔĚ ĂŤ@üaF…ˇ2Ü4Ŕżf ŕ eáF ţ0m8‚ 7 đ„XĂd¸hŔČ#Ě@@‰ ĂŤ@FaÎHn0r‚)`¬ÚłgŹOű9ťÎ=o§Óéóą'%%qăđü!Čps/<ý:Ú]žóZşt)7 †a`L*..ÖĆŤňÜëëëőđĂű´ďŞU«”‘‘ÁËź‚ 7 ~L3ŔG–ɱ÷y‹…‡€ĺŹA†SN`xŃ™“222d±XTPP —ËĺŮnKŻąŽ«/ąÁ(ë”řa=çÔĺźÖěy‹Őîşô4—ÝĺŰTS˝Óó:**JąąąJNNćć# ťdHR¨Á¨í{ţ‚ă#&NŇŇY2ĚvÇ×éÝ·ŢPóÉăj0ŞŁ˝çď” 6Čd2Éápp`fĆ,‡ĂˇÄÄDެ¬L’TS˝S.§–]w«""ýŻ‹áRJó‰F˝ţň3:RWíŮ–žž®ĚĚL™Ífn:VaaaŻ×-'›Ôr˛é‚ăkż$iůŤź´sřÇëŐÁ˝»/kźââb „€1Íl6+//O©©©*((PSS“ŽÔUkÓ†µ0ízÍ[”0ײëí-ÚQ˛Yť=˙Wn Ś&™™™*//ď×Řşş:ą\.ť:Ń8¨çŕîŠ2 Š‹ëßô–´´4n  $%''kÍš5***Ň–-[ÔŮáŇ›[7é`őNżíŇpk8Z«×7?٦†Cžmtc`´ÉČČč÷âµëÖ­SuuőťK\\śňóóą)0‚38Ël6+''Gv»˝W—Fáoď—#íz-L»ŢďÎyGÉf•—löĽŽŤŤUvv6Ý`T#Ěŕ<î.ŤW_}UůË_$Iĺ%›u ęźZvý­Ăľđ§7Ţş1V®\©¬¬,n ő3đÂl6+++ËÓĄQ__ݦ†Czţ©u#ÚĄár¶jwůë}ş1rssełŮ¸q`L Ěŕ"l6›Ö®]«^xˇO—ĆâôO)vzâ°ťKýÁ*ýýŻżWË© I7‹3čo]/oüĄć.ĽZŽÔëd0ÝB›.g«ĘK_ÖîŰ<Ű”——'«ŐĘÍcaýäîŇ(..VQQ‘\.—vďئšęťúŘ'o’.Ťó»1 233űýT€Ń0€Ë”‘‘!»Ý®'ź|RŐŐŐj9u|Đ»4\ÎV˝ąe“ŞwżĺŮF7@ |`µZ•źź?$]öţSŻo~Fť.ItcśŹ0€¸P—†-aľ>ţÉ[.«KĂĺlŐý˝jŞwz¶Ą¤¤(77Wfł™bśEŔą»4ĘËËUPP —ËĄšęť*¬ą_Ë®żU3f/¸ä1ĽucäććĘápP`€óf0H‡UPP wß}Wť.˝úüíŇh>Ѩ˛­¦ŕ2f0ĚfłîľűnŻ]K®˝Y‰ó{Ćîz{‹v”lötcDEEiőęŐtc\aCŔ[—Ć/?Ł˝»Ţ”=ő:U”ľ¬#uŐžńéééĘĚ̤ 3"î.ŤĘĘJ¨©©IGęŞőňĆ_zĆDEE)77WÉÉÉ  ź‚(C+99YkÖ¬QzzzŻíéééZłf AŔe˘3€a`6›•““#»Ý®ŠŠ ŮívB f0Ś’““ 1i&  f€€B a(„0Ć™L&Ďź»Çóśř§±ç{Űç= ăl6› $édd$_h‰—$Y,–>ﹼ*//WaaˇŽ?N1®˝öZ­^˝šB€Źěv»ĘĘĘÔdµ(údr:) FĚá©ęî‰,ŇŇŇúĽOgŔ«ââb‚ ”×^{Ť"Ŕdffzţ\3ݦ® ~.bd´Ť::mš¤ž)&‡ŁĎ:35ľ­EćCű)ü–+j˛:,S) ŐjŐŞU«´qăFąĚfU'ÎVBŐ^wwS ›Sáf5K’d0”››ëuaŕ˘Ć;[µ…€ßjşba ’ŚŚ ŐÖÖŞ¬¬L.łY»çĎÓŚ}ű4ˇĄ•â`ȉ‰Ń‘i1ž×ŮŮٲŮl^Çf<ňňňd4µeËukR’¬GŹjęˇĂti`H´ŤŞŹ‹Së„Ivdx›^âFč%''GIII*((ËĺRĂ”):n±hňÇd=z”P˘=4TGb¦ŞÉjől‹ŤŤUnnî;2Ü3}8ĹÇÇëÉ'źTuuµşudZŚ>M¨ńbHŇĘ•+•••ŐŻcfĽ˛Z­ĘĎĎWeeĄŠŠŠú„OśPôÇxŚ+úĄqŇ$·X<ÓIÜ–,Y˘ĚĚLYĎ 7.†0pQÉÉÉJNNîj4Y­j˛ZehmŐ¤Ćăšxň¤Â::(<ÚŚF·ô„ÝÁ˝#_B 7 @żśj”––ެ¬L’ä2›uČlÖ!ĹlŔ`śŚŚTgXXŻ÷ RSS•‘‘áSáF¸,îP#;;[ĄĄĄ*..VSS“¤ľÁFxK‹Â››ŢÜÂŁT{h¨ZÂĂŐ®“‘‘}:0$)%%Ev»]K—.”Ď$ĚřÄl6+##CŞ©©QEE…***T__/©'Řp™Íj2Ągü©fE´´ČÜÜ,c[áF€r‡N“Q-áár™Í^Çą »Ý.óĆřŠ00`6›M6›MYYYjhhPEE…ĘËËU]]íÓ:!âěâŹ1’$Ck«ŚN§ŚmN…·´°¨ę ’Ód’ÓhTKD„Z"½v^H=SHěv»‡$Ŕ8a`PY­VOdž$UVVŞ˘˘BUUUž® éĂÎŤ¦sź 8Â::ennVhGko “öĐP9ŤąLfµŤršŚ}ÖĽ8_BB‚‡ełŮ†í\ 3CʽƆ[eeĄŞŞŞT[[«ŞŞ*ą\.Ď{GOGPW—ŚmN…v´+¬ŁS!íí íč`ŞŠÜťˇˇę Ssx¸NŹşŕT‘sEEE)))IńńńĂ^śŹ00¬Î7T[[«ÚÚZíŮłGŤŤŤžE%©;8¸gŠŠ"ĽĎ|ŞY’dt¶)řt·'ěúô›şŇ.Iž°˘=4Dˇaę ˝d—Ĺąbcc/‹Ĺ˘ÄÄDĹÇÇé´‘ËEQV«UV«U‡Ł×öĘĘJŐŐŐyÂŽóC·žu8>üŻ7!íí mď™®âîđp377÷ëî 7w8áÖf4ęôřń’$§ÉxÁu,.%!!A&“É\X,–^A“ż"ĚcBkk«ęęę(ÄŕdA`Ô8żĂ­¦¦FmmmŞ««S[[›öěŮ#IŞ««ë5eĺ\ťaaž0 o‡GLżÎçÜ@dĐţÎ@ŃQQQ˛X,˛Z­˛X,2™LŠ‹‹óHŠ0Ś ………*++Ł0 ¸×j¸PACC$IUUU’¤¶¶6ŐÖÖöNgŻ…HűëÜ@d¤ĹĆĆĘh4J’’’’$ÉTH ř°âR3ŁÂ¸  ÍÍü´’>™©i)WĘd±JăĆ©őŘQŞxG;7ýAŐŻ˝<¨źůÝő´¤>03â˘ŰŕÜ˙¨ĹŘqEň|ŠŚQçţďĎ” w§‡[ccŁ˝Žuw ÷ôoâăă=a…űµ?­Y1Ň3Ď2+I7?ö´¬łűţcebÜtMŚ›®9˙ö)ŘľU›ľr»ś'ŽŹĘ:,şýKúÄ÷"HąÔ÷erŚŇVÜH!Fą ‘“4!ĘB!ôËH>•ľ!Ě´¨łtűź‹e©S‡ęTöřĎ´oëßtęH˝‚‚ĆkĘÜ-Ę˝Ssn¸I3–^Łśő›´ţÓęîěuµ:ßÁ˘BÂ Š›9›BŔ‚( ýűĎ'Ă„H(٦Ç?q•Ţ^˙k5ÜŻÓííęt¶©îť2=wO®ž˙ÚčĚéÓŠIY¨ĹwÜ3*k3Ź0Ś tfÖ¬«W(&eˇZŽiÓÝ·©Łµĺ‚cwýQÓ식8WMö÷yĆŇkôŃĽ»4Í~• &ĘŐ|RGvUhÇ3O¨ę•|®ÓWiÉ÷(îŞ4™"'Éyň„•żĄ7÷jĘ޸ŕ~ÓS—食żŰs^§×içź~ŻŇǦÓíí’¤Ô/]é߼߳Ź{ÝŽçľš«ÝEňéúÂ&LÔ7ţY§†˝•úÍőK´bÍšłňS 5™´î#Sůň |‘Ľň&IŇŽg~ŰŻu0Š˙÷Ű^·/ů⽺ö;?čµÍ4ÉŞ+–ečŠe*yô!m]wżĎç™ňé[µňGŹhÜŮgÁK’Ů­Ů+VjvĆ úë÷ďÓ;ďłßUźżK+ţűGҸqžmQӯвŻ˙·W¬ÔúU+<ĆĹřr}]®žG[†ŤJýâ×´čö/ń…~i&€€ë¸J’´oë+>cňśy=] gΨôW?ŐŻ3®ÔŹ“Łőč˛ůÚşî~ťéîVÚ—żˇig?ërEÚfęúţ\’TňčCzěšz0ŮŞG>>O[×ÝŻî®.­Xó &]Ń{ ëěde|÷uwuéĺ5_×OÓőă9“őÔęëÔ¸oʦÎwčc_ů¦$©ô±‡{-úůŔĚ=03B»‹ţäóőť>»¦HѬ…ź˝C/}ç­›;…® @Ŕ@DL‰‘$5î«ňů oůĽĆŤŻŠg×kËkŐ¸ŻJ]í.ť¨= ’GŇ;OýF7N V}Χă/şí‹¦m˙@[×ÝďYĎădÝA•<úŢřĹŹ,Çgňzíwĺgż qăǫ䱟hÇÓż‘óÄquąśŞ}k»žűjž:ťmŠż*mč®ďĚI’ÉbUŐ+/Şâ˙ ÔélSg[+_<@€ŻBŚ=ĎZďŔě¸E=@ų뽾żsÓzĆ]ąÄ§ăOO»şç8ţ˝×÷w=÷lϸĹëµÝvöőű/męłĎŃÝ;µnî=ťsý°\ß® ů˛żÂš€€ŐŢrJĆČI2L”ł©Ń§cDĆő×h¨Ż`$Đ™Xď˝°QRĎŁO#m3/9>vábÝńR©ćÝ”ăŮv˛®F’={Ž×}¬ É’¤gÇ]®¦ű%I–óžVr)'jöě7+i@5ęë „€€U÷v©”lSѤUŹ˙źÂ٧\p씹)şůѧ4yÎŻ”›oéłĎä9óôÍĘcşýĎŻz=ć¸ńă‡íúFa  ˝§Z>8˘č¤ąúÂ+o)őÎ˙”eV’‚Ă 2LŚRĚ‚+µbíŹuŰ˙¦đ)1:˛«BŻ=¸Öł˙ާ«î®.-Xő9]“˙=EÚf*8Ě ¨łtő7ÖČľúvuwuiÇ3żőéüv<ó„:ťmJľá&Ýřł'5c–‚BB=E o˝CźúĺMš™ °‰˝ö+˙Ăďt¦»[ V}NiwÝ'Ó$«‚ FŮL˙ţ‹'fPÝ;oöÚ§ÓŮ&IšsĂM ‘Ů=ä×0X3ĐNŞÓú›3tóŁOię|‡Ňżő?J˙Ö˙x[őJ‘ž˙Úęr9=ŰŞ+UüĂďčß[§´»îSÚ]÷őŢéĚ˙ŕŰjŘ[éÓůť¬«Ń‹ůwęĆź>ˇŹÜ­ŹÜÝgĚ‘]Úö“Ţç|lĎnmyp­®ýÎtMţ÷tMţ÷z˝ßP]©íż\×kŰáwwx·fF éőŚ @Ŕ;YwPżËZ¦änŇśnŇ4ű•2[˘5.(HÍG©ć%z{ýŻudgą×ýß.ř•ŽíŮ­ĹwÜŁiöE=Źz=Ѥúezó‰GT{vކŻŢ˙Ë&5ě­Ôâ/Ţ«K–É=EÝ]ťjÜWĄ÷Šţ¨·×˙J§;:úěWöřĎőÁž÷ôŃĎEÓ,T¨)\§×éý—žÓöGÖőYĐsóÝ«ř…¦Îw¨»«ËóŘŐˇľ> |TůŇ&Uľ´É§}–ľ®ĄŻ_Ö>ĚŚč×6I:Vőľ^ĽďÎË>ŻýŰŠµ[qżĆ6î«ŇS«ŻŇë𬙠a(L3cJG»KőŞ)|úîüaSŽpX/<ý… €1ÍŚ V«•"`PL&Š#ŚÎ 0&äĺĺ)55•B`Ŕâăă)Ś0šáNÚČIDAT 0f$''SF¦™€€B a(„  f€€B a(Á”p1§Má:1k>…€ßjŹšLc3uÚ®“„đ#L3x•––FPRRR(cĸ3gÎśˇ PĐ™ a(„  f€€ň˙ńGĹ8ýž–IEND®B`‚ceilometer-10.0.0/doc/source/contributor/2-1-collection-notification.png0000666000175100017510000010077613236733243026230 0ustar zuulzuul00000000000000‰PNG  IHDRw°âôVpbKGD˙˙˙ ˝§“ pHYs  šśtIMEß ;.ˇ IDATxÚěÝpTužď˙—÷úÝ˝b2_@‡8!YýN"VI ΀5$^·ô’€SŽb9°&(+¬dw…pFćňÓ Ě qä‚«w&żF«$ S€I ľ0żbC4®- agjoő÷Źđ99Ý}Nw'äWĂóQe•tNwźţôůôçsŢç}ŢźŰü~ż_€ňźh=w Ü€DpbÁ]Aw ÝNDÇçó©©©‰†0 ˛˛˛Âţťŕn5aÂŔ€ILLÔW_}ĺúwĘ2Dˇ˘˘‚F0 |>źęęę\˙NćnÝ™4ZwÝ=š†Đ/ÎýÉ««íť·#¸ŰCÓźČŇł‹ž¤!ô‹źäŻŃéăg"nGYAw Ü€DpbÁ]Aw ÝN ’ÓÇĎčÜŮóşg\ŠâFÄéŢq©4 0ČČÜ‹~°BŹŹ›­s×DÜv͢ z|ÜlíŮú^źĽ÷ĹĎ/©˘ěýÇ7[ŹŹ›­‹ź_˛ë¸rU‹~°B?É_Ł7_/ÓOň×hí˘Ť®Ű”Ž+Wµg[h{lzĺ—}ÚV}i~Îb=>n¶N;C@ź ¸;Ŕ:®\Őął^IŇ©cg"ű>ýÓyIŇ=÷ĄÜđ{W”˝Żů9‹uµ˝3ŕńO®ďĎđř8Ýu÷hëńŁţ¨sg˝§™ůŹifţcšţD–ëöᓳ^-ţÁ ­=ň·sž®¶zŕÁű‡Üwn‚ŕ}ń=eÜąëÁÚáńqşÚŢ©ÚßÔkĽK0ňâç—ş‚}P ˇr÷’BźwÝ=ZëĘVix|\ŔăGtPsžČŇóŻĚµď¸rŐqűđŃ?ęâç—”3++äo VäwµŐ  šďüΤŃqÇp:úÁÝvúz¦î”G&ëÔ±3Ş­¨×ł‹žtĚ€=ׇ˛öěŃŕ`ň;†;MÖđ”G&Gµý@0™ÎNŮąă‡XĆnđw>T÷±‰ŕîűäO^I]ٲSrľŁĘňTQţ~@f¬a2>ÝĘ |rÖ«Ź~וÉ:"~¸î—˘ďý×ď„d‡šŃĚűž>~&`a´ÓÇ»‚Ź—ž˘w ·ţm‚Á·ÝÖµMđßÍżí:®\ŐŠCęhżŞ«W:5üŽ8ÍśókĆęĹĎ/©¶˘^WŻt•Š~Gś|H ô“ł^uvtęÓëĄ:Ű;öéâç—ôeë%Ý™4:$ŢqĺŞŐNçţt^÷Ü—âřÁďe^űÔ±3:}üŚ.~~I÷Ţ—ŞGfMëQ®ůÎďą/ĹjźOţäŐ]wŹVά,ÇŔ˝ůµlďş{´*Ë?ĐŠCzöĹ'C†&KőŢűRď¸rUŻ-ŢčXŻwÄĂőú®•V@đ“ł^ý$ż{á¶‹ź_ŇOň×č{Ź|G«¶.—$ëď;kK¬żŰ™—üzťFÜ1<`{ű>W– =ŰŢSÇ•«ĎŻ,˙ `źŚŻ—«˘ü}Çv˙ŕýzµd™őú›˙y»•É,Ioľ^&Iú·?ü«őďŹüQ VäkÖÜÇ­íŽÖ׎uĺ ż™z˝ÁďaĽ2o­:®\Őżýá_µvńĆ€úľµŞ×ţŠz­Űµ2ęŻůÎ/¶^ŇsÓ˙! }*Ë?PÁŠ|M"°ĚDEůűŞ,˙@3ós îş}Ţ=[ßs\lîÍ×ËôňφĽb Ş { Ý»îmeTv\ąŞĘňB¶˙Ôa°Ž+WőÜôĐ©cgôŔäűUňëuz˙ě>í¬-±^Ë'Íű¬+[eŐˇť™˙Ö•­˛2…M€Řd€šíMMŰď=ň­+[Ąue«tď¸ÔíŤ7_/ӛݗÉď÷kÁŠ|í¬-QÉŻ×YűôÚâŤAÍM˙ôKU”żŻ;“Fëĺź-TÉŻ×igm‰^ţŮB ŹŹÓ©cgÚdÁŠ|ÍĚLRW¬Ů'`µžłBŹÖ×ÚĹuńóKzöĹ'µł¶DďźÝ§•%ËtĎ}):uěŚ6˙óöď¨ăĘUÝu÷h­·V?űR+K–igm‰V–,Óđř8ť;ëUíoę{üťW– GfMSɯשä×ë”3+KW®jó?ý2 ř,ÉĘ´ľ'(°o.źw϶÷¬öy˙ě>ýŰţUĎľř¤$©4(Č €ŘFpw9ŐĐť~=Z[,üä¬× †Ú3AŇď=ňýĽ|••Őy×ÝŁµjërëÖ|4őqżlm“ÔU?wüő¬aű>Ý3.%`{«>ďőň¦„Ađöf_+Ë?Đđř8­+[ĄYs×]wŹÖ˝ăRµjërÝ™4Ú*‰ u”kSŻáńqÚú›ušţD–î—Ş»î­éOdiéĎZŰö <¸ONő„;®\µ·/˙la@]ă)9“µňzÖňŃÚăúÄ–l>ßĹĎ/éžômýÍĎ5%gňő2“•s=óőj{gŹľs©+@ýü+suď¸TÝ;.UK__¨&wíop°Ř”ľpZέ~ň©ëĎ™™˙¸őř;†ëŮEOęÉ÷}w nÚł1§äL¶‚źűm7łíťIŁ­ěÔSÇÎčÔ±3g@™ĹĎŽřŁőÉF•Bő2ď\úÁ)kŘm{S`Ę#“K<»čI+{TęÎ&¶gŢÚu¸NݲY»Űµ;ZYţ:®\Ő“ďw,Ep×ÝŁ­Ŕęi[ŮĽÖđř8-x%?äy&¨:<>®GßůťIŁĘ'ćű°˛íÁf§ö4űëř•¤ő!Ą1ž]ô¤V–,ł‚Ó}ÔÜ@&€`ť•˙v¬+׊z+yÎ,ÂeË=z «öë”G&»Ö{}ŕÁűĄmŹ™ěQĚ ř›C× .:m˙Ńő@˛)›,8¸jĘ?HÝŞ]űy^íÝŮĄöĎnß6¸ýşťÝűj2ˇgąě“ŮŹÓÇłpÍw4+˙qÇ6v*‡Íwţě˘'˙î$¶Ň›ěĽŕ›ÓEIĘ™•ĄĘňtęŘ=7ý43˙1kÁ6·Ĺă»î ·lŘś'˛´gŰ{:uěŚ.~~IwÝ=ÚĘRo đť>~V’4ĺ‘︾‡SMU+,í¸rµ»Ě‚- Ô$‹NŰ›m‡ÇÇ9f™:é¸rU;Ö•;–°;íÁËOJZďŻŮ'{ťŰ)9“#¶Őť¶×3äď9´±[9„hľs·ďĚ©Ľ ě»˝‡őýýýŢq©ZW¶J›^ůĄľl˝Ôµ¸ÚÖ÷”óD–¬ČŹz8Ä‚»Ä­†®ÔUuĘ#“U[QŻŠň÷őü+s»k®Ú¦VÍŢ0AşÓŮÁ§2[ĄŔröŔźSÖ°Űö&Řé¶đW0ł \Ç•«ş3©«Ćî=÷ĄhřĂ­ŚŢÇÇÍi'·lU)4hnm“F‡Ýó<,ŽTÁ©üC4ßą˝´FČ6\Äw{źpőxÇ?xżv(ŃŃÚăŞ(˙@§ŹwŐ7>÷§óZ·k%^€›ÁÝâTC×îŮEO޶˘^řŁrfu—1čéíôý®«D‚=đg‚ÁÁŔÓnA_—Ú¶NŰé)lwęŘ}ŮzI—ž˘{ÇĄj϶÷¬Z¸?/_˛ýŃÚ®ŇÁ™Ŕ§]JZ„+!qŰmá÷«ăĘU ŹŹë^,.B9·öŠôťŹHp¨v\ąj•´°g w/Zú>öşĚćó^üü’ľl˝¤¸Ým6%g˛¦äLÖ©cg´vŃť»ľčť[yÄT nٰ†Yŕëâç—¬€_p0Öd˘^ Z,˨({ßĘ5ĺěĂÁÁO“5ôu©më´ýťw‡ĎŽÝüOżÔ¦W~i:MŕŘ­LAĺGŕľžs[ŕ͡ž°)í`üűź¶Eŕ‚żŁHĺ˘ÍÜ5ŻwΖđYË?°ľWóÝŘł‡ËOt\ąjí·ýóž:vF?É_ŁŻ—‡ĽÇřď׬üÇö7‚»Ä5ď “ői˙Ús=€ŕś’ÓÝó‹˙ňÜSÇÎhÇş®ŕŢóŻä[Ź›ĚZ§lT§ĹÁÂŐ¶uÚŢBO?Rď÷Í×ËtńóKVů©;`y±54ă×Ô¶żnwűuďWŔăő„ď—jÂMŐnÓ?ýR§ŽťŃđř8-°µUwƲsđÖ­fr¤ď|x|ś*ĘŢřŰ'g˝Ö÷Ľôő…ÖăN5xĄî:ĹV Űç5ű{úř™€ŕ°áôÄ>Ę2 “ .08%g˛îL­/[MŰŔ`Üł/>©ŁµÔął^-úÁ Mź•Ą;ď­ŁŽ[‹“=űâ“‹™ŔާžóÚńzą†ÇÇéŮEO,:f¤şŐ¶uŰţ®»GkfţcŞ,˙@‹°BĎľř¤†ß§ÚßÔ[Ô•[—YŰç<‘ĄÚŠzU– Űt›¦ä|GçΞ×ŃÇuęŘÝs_еNţç/ţ—îą/EßűŻßŃ˝ăR]ë /}}ˇVĚ]Ł=ŰŢÓ9ĎyĺĚĘŇŐö«űµ®lU@‰ §:ÇöĎď–î;7íëĘuÎs^wÝ=Z?żd}_/˙la@{Ţs_ІÇÇéj{§Ö.ި{îKŃŐ+ť:zฆÇÇ)gVWűŮÚ»w\Ş|żN?ŁWć­Uά,MÉůŽ.~~I§ŽťQmE˝†ÇÇYĽ¸9ÜöŰá#ŐĐť•˙•qÇpýĽ|•6˝ňKť>~FoÚ˛4ďą/%$°+uS+Ę?Đ—­—TQţľ|żž•­®«K^·ĚY§¬Öç_™+©+KöÍ×ˬÇ|żĽ’ ˙ŕýZ°"_{¶˝§Šň÷UQţľµmÉŻ×éŁÔą?ťÉ6AÍŁµÇu´ö¸fÎéĘrv+!1ţÁűµ®¬«­ĚsěŻ53˙±€ý2ß‘SƲýó»Őă ö‰U77EĎ.zRíW˛ďL­ç_ÉůľFÜ1\ ^™«Ż—ě÷ĚüÇôü+sµč+®żnjŔóVn]¦Ęň¬65íj˙îŠPB±ĺ6żßď§Â[˝zµŠ‹‹%ueĆ…E©ě™´Á‹Ź91ÁË»îÝoA>ű>EzźŽ+W­ Ýh÷铳^]mďŚęó:=Żż?OÚ'Rßľí=÷Ą8.Âé»ěĎ €ŢůIţ+©ńŕÁĘÎÎvÜŽĚÝŐÓ Ýř(ëÄÔ>Ť¸cxŹ÷©'ÝľxŢ`¶ĎŤeâ»ŔŕcA5Aw Ü€DpbÁ]A·Ó=s±ő’N?CCčW;:ŁÚŽŕnŐţ¦^µż©§! *Ę2D!11‘F0ŕÂĹ&É܍¬YłÔŘŘ(Ż×Kc™™™ĘĚĚtýűm~żßO3@lˇ,Ä Ę2 GőŮgźőč9#GŽTFF†âââh@ ŠŁGŹJ’¦L™BcěŔ’¤Gy„ĆčŹčŹý¸Awµ#Gލ¬¬¬WĎ]¸paŘú @ٵk—Üőx<š7oŤ ţŘŇŇBčŹčŹý¸A˙yőęŐ«iDٱ±QÍÍÍ˝zî7żůMĄ§§Ó´Y’>űě3]ľ|™ ý ?Ňú#ú#n dî˘WFŢť QÉß»M[ËWşüů×4†ÄŔlǸ Đú#ý ? ?"ÖÜEŻŚJţ†ĆMą'ě6guŽŕ.†ÄŔ<öţoJ’.śů34@čŹôG€ţ€ţ›Á]7ýŔ<ńżÝoý› ?ôGú#@@ÄÍ‚š»ZssłUswÔ·5:RY†ĎľŇĺĎ|’¤´´4jîbĐć1˙Ďhu^ą¦Ż/uH˘†@čŹôG€ţĐéŹmw5‚»ĺ™ ?ôGú#@@Ä͆ŕ.˘Fp±>03@ôG€ţHčŹý‘ţ› Á]DŤŕ.n†™ ?ôGú#@čŹôGÜ,î"jwqł Ě Đý ?Ňú#@¤?âf@pQ#¸‹›i`f€čŹý‘ţĐú#ý±îvš@¬Ě·˙Ííž'ĎG^Ý1z¸ĆÜ;:Ş×ůâ“Kşr骆'Äéöżą]˙ń×˙$ëµçÍ›Gc7ЇÝń_4ćŢQúżţövú#@čŹôG€ţô‚»bĘ‘#GfIúŹżţ‡Î=gýű»yă5ćŢQć6ýˇę´ëߏ=Ş´´4Mť:•Fúą?^řżĐÉĎŇú#pS8pŕý`|̢ Ül|_^é“m LĽzĺZÄmâââhL€ţĐéŹýBć.€2uęT]»vMťťťŹ{<}üńÇ˝zÍoűŰ!5ˇGŽIÝ$ Šţ(I—/_¦?ôG×=ňČ#6lý`|Á]19avŇŰÁ9==]ąąą4,pfú#@@čŹŔŔŁ,Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1čvšŔͦĺĚźuů3_Řm:Żü; Đú#ý ?ôGú#bÁ]7ťÎ+˙Îŕ ĐĐú#ú#nz”epSČĚĚ”ç ?ôGôG€ţ –Űü~żźf@4Ş««USS#IJ˙^ŞĆMą'ěögŹž“ç#Ż$iĆŚĘÍÍĄŃŻ:;;ŐŇŇŇŁç$''+..ŽĆčŹý‘ţĐú#ý1‡˛ nqqqJOO§!ú#ú#@@Ä-˛ î@ "¸ 1ŕ.Ä ‚»î@ şť&z®±±Qź}ö ôłaÆ)33S#Gޤ?C´?^ľ|YŤŤŤşvíŤôł‘#G*##CqqqÖcťťťjjjŇĺË—i `ç¦---jjj˘‘€‘‘ˇäädâ:‚»@9rDeee40@<Źţţď˙žţ ŃţřÎ;ďp2  … *33Óúwcc#c!0ć¦7näB'0@Ş««µyó怋ť·2Ę2=DV0°ÂŤčŹŔŕ÷G»ŔŔjiia,QgggČcʇŔ.0Čăá­ŚĚ]ŕÜűÍJKş†úÁ'[éŹ@ŚőÇÇ&&ŃX@?hn˝˘OţÜÁX ’ËíŐ±ŹŰ˘Úö#ţFßKEŁýŕŁć6}ŐńW"Á]ŕ¤%ݡÇ'}‹†úAO»ôG`đű#}č/źEÜe,úÇÇ­W˘îţß#ţ†~ô“ćÖ+wP–bÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]Aw Ü€DpbÁ]A·Ó˘Q]ß ¦ćťo˝¬ó_´I’¦MJWFZ˛rł&Đ@Ŕ x­´J’”8"N‹žÉéŃs˝­mzű·G$Ißź®¬Ié7´/ö×űŃźŞÔ¤Q|A¸iő¶˙”WÖ…?_ÖŘoŽT~îC4dżo}ńűp\áfŕkďTu}Łšš/čTs‹$)eĚ(Ą$ŤŚęľŃąZý Ź~Ň#Izµ ď¦×˛Ů±ms§e*#}, †4‚»"N‚wב·µÍq€—¤Ô¤QÚ±j>“ä^Ř]sDľö«ZüĚt=VY×`M@gdeöh’^˛·V[÷ŐJ’Ž˝˝ę†÷ĺü—µvG÷I3Á]ÜĚěÇ{b|ś<•ë”Őoţˇ“M›ŢçÁ]_{§ 7˝ŁŇ˘ů7E›ö}µ Źů8®pKóµwjëľZëř ä±ţ/#-YŻä)/{BżĚŐťđXH†Jp××Ţ©×J«”›5ˇW}Ú<żdo­kŰ®ÝQĄ¬IéZ˙ňÓaĽ[÷ÖęűÓcPP–€ë@÷č ëµvG•ŘꖬܬL˝Z§iÓ•2f¤¤®+ťŹľ°^ĺŐ‡i¸xô…őúqńŻäkżFc WĎîÎÖ­®očŃsk5ZýšI(pcăeÁšťşő'}ÝqM[÷:ĽŞ®Áşhc蝪şUŐ5¸fKő·C'<ňµwňEŔMÄ×Ţ©Gn°~ßsł2µaélÇŚŰňęĂÖś°do­ľnżÖçwrL›”®W5tĘ1ś˙âň Ť}%{÷śďľUô\Č]8ľöN•ě­ŐkĄUňµwę©ü…<•ëBöĂĽ0XČÜbŮĆ}Ö5gĆT˝·a‘kfßâg¦«tU÷ÄaÁ g/·’Äř8+ämmsĚ&pReËň¬`pł)Xł“+ ĎPÂcÝĺ—›•U@51>NoŮ’n“"ź÷vµď„óáŶ…‹ťäîK =”eŔ^·ł'…ň_]§’˝ű•5)Ýşblg^ś-5i”ćĚŞEłsŐG_X/IZ˙ňÓJIĄÂM飯ľÁşŠťš4Jë_~ÚĘ>lň\ĐkoU3Ň’µ~éěrĺŐ‡őöoŹčo'kă˛ŮV1}óÚ‰ńqZüLNÄ}űŃźęş0βŤűtúă®,č·†<&Io˙öµň¬Ů&R»e¤%kń3Ó5gĆTÚ[\FúXĄŚ©ó_\V͡Fm\6;ěööľ1gĆC®“Ýź–VëĐIOźwŃôÓűA“ç‚ 7żc=^ÂŁź–V”5ÉËž WśkÝa°»ć^+í®ž§Ľ¬ Zżôi×É{ý Ź¶î« ¸˝=1>NÓ&ĄkŃěĘÍ Ě ßt}Ý~M‡Nzú¤<Ăîš#·Š†+M˙8˙ĹeÇţÖÔܢÓ·(eĚ ćź IDAT(ÇŰs}íťzŞp›$YcˇÓďą+'x1™ú˝]s$`\6}Ň­ß÷éĺ›öYőŁ] ŐŇżľţž‘ąz2V­ůi%ÄÇé§ĄŐu¬«Šf<Šf¬±Ź7ážë4'´ŹoUu óÚhçâĚ)‡¦’˝ű­˙ß°tvŹć…‹fçhëľ®sšpc’Ű1ěöűí6W‹f 3ë¶„ ˘š$Ł®§ÝÇ3ŽŘ·)Ü´O‰ńq®Ç8çżh‹¸MjŇ(mXú´|í×4ÍÖ6Źľ°Ţq?śÚČĚŻÇ̬ësM§ď)Ňi?6íż»ćpHůżÔ¤Qš61]˙\ëšým~#Ü·×î¨ŇéŹ[ÂÎśÎÂ}>ô‚»‚?Ťi´˛&Ą»žŚ5y.č©üEŔŹĽ}X»ŁJ»kŽčßţÇ߇LhÍŔÔÔÜâřŢÖ6ý°p›6,}ZÓ&¦ÔĄ˛ŢżąEŹľ°^n/ ŘÇó_\Vý Źü~éÁg‹­IHÂaúşăš|íťZ»ŁJUu *]5ßußľ?Ńý$ôTsKČŐÝŕÇĽ­mŽmÓäąŕřyĚgúqńŻTUßµod_ŢęÁĄ-ßôŽ•9®ďÚ3óÝ&îÁćŕă®dď~}¸˝0ęă.šľbúcČDłăšőxyőaÇ,”Şş:á‘§rť 7˝˛¨”Ż˝Sĺ5‡ŐÔ|AÇö…¶ÉŢZ-sX|Äś™ß·Ŕ4PZ4_>[¬Ż;®©`ÍNM›”Ţăßeł©SÝ>·±ŇŢ?śú[jŇČë}Ţăxqă-3¬©ąĹńd­şľAő'ić„RWf^ÁšťJ3Rď®ŃÚĆ>˝VZđškwTY´Ü¬Ly*×Y}ĆSąÎŞçićĽá~ćĚjý>ţćkď´‚ŽýĆ´yµ Ďzű\Ľ©ą…9ĺa˙-Îp¸#2{ŕî”[°ńúńh?Çxwý‹J3˛Wóž‚5;ĘHcô/Ç߲ưŕcŃţyx˝ľ°éWö1ĹĚă–ozGUu ÖŘgď—ćüĘţX$öąđ˛Mű”>s…–oÚף˛ nűa?ŻłźŻ9}>3ż.Ż9¬B—‹¦ć|×>f¦Ś©Ü¬ ×ۦ{Í{~_{§Öý>™…âĚ÷b~űţrü-kÝÔÜâz,E:0żo‘ć¸1w8JŃ7WíÍʢć$lĺ‚îŰqLzł ›·µMË7ísťnXú´V.ČSFúX%ĆÇ)?÷!ë ŞŻ˝S~ż_Çö)?÷!Ą&Ť˛N¶ÍdŰmŇ+u]ąÝż˝0ŕʸ}±¸ľ,žź‘>6 c2ęĚg{»˝»a‘?3Ýj7óąĚD˘Ş®Áő*:n‘ţŐqnżÚďT’ÁśP&ڦýŰ •źűPŔq÷ކEÖI`ý Ď€/(Ľ_écőVŃsÖ‰·µMąY™Ú˙fW–~b|ś˛&ĄëÝő/ZŻaż}Ý×Ţi}ćńiÉÚż˝0 °”‘>Vűß,´UN'Ü€}Ě0ÇJU]CŹúGyu÷-”Żäiă˛ŮÖÉąďö_?Q4PćoY“Ň•2¦{ě2ăIjŇ(«l‹ä|á§ćPŕďEđ ­=`”wýŇŢoRĆŚtí7ćDrëľZÇ»SLźn®úą6.›­• ňt|O‘ë…e»Hfîeć“ö˛MÍ-J3RÇö)/{‚µŤ}< ‚,ĘiÓCĹJMĄŤËf[Żďmm ~Ůçy–>­·Šžł~L WRaů¦}Öë}¸˝P+äYďoćâö9%2źŻăZŔyBońČ-“S’Ţ]˙bŔ9F^öŰSdÇŃÎ{ěÇ͢Ů9!‹qŰÇ0_{gČůž}ţiúUŔ˛˝ĐÚ§ňšĂÖŘ7>­ű=Ƨuť_őäNŽĽě ÉBf|ťţüzýíäëÁg‹#{ÝöĂ~^éó˝·aQ@€×íĽÎď÷k˙öBkĚl®úąR“F…¬§aďăö÷0Aňŕs…’˝Ýă´9&Ěół&uť#O s÷ťąăĎ|ľŕó‡ŤËf[çáć¸1wś8Y'\ńĂúä5Í@“›•éz–źű5 ą¦F sĽĆžId&ŘÁ˘™mp©Ĺ™źű5™ŘĹbU}ő=÷ Wësń3Ó­}sËJĆ­Ăxq *™@NnVfČńŢäą`M&?3ÝurĽrAžuÜ™“Ő ž9őS{ťď•µÂS“FYűlĎ^Ü]sŘúÝ[¦śÉě`ÁHDRZÔ}’hî8‰†˝\ŠŰ˘9éc­NOúžÉęůýÉć€Çí%LVŤ˝4Sp1'ŁUu ¶ěžŮ®ýĆ^#Ň~aÉnÎŚ‡˘şKČŘ5YAvŃ_¦Mt.5fĎ \üŚóz f^j¨ůÚ;µhvŽrł2ďšqz}űóM†eĘ‘ŽsaÓßĚoŹŰ\|ÎŚ©QÍ)ÝJG`ŕś€ŕWnV¦ăť‰ńqwV:ÝŃĚ~ŚF3†ŮĎ÷Ľ­móO§~•§Ü¬ aŚ˝•źűŽí)ŇśSCúPSs‹ěMźą˘Wç‚Uu ÖXnĚ´/bîv^——=Áq~îkďÔ«yš61=`ˇe·s…`¦Žr¸cÂ-#:řüÁ­„ý·ĎmN€Cp@Ŕ·ńuűµ~˝€âüYáë÷ÚSNW+3Ň"_…íí•í”1#Ă^ĺ5޵2Şýd:Ң2&¨uúăĎ8€oqyŮÂfčŐźč^Ü ß!k×~|ç]żŘ)X.¤_N¸]úý÷Á­/;ý>Ř÷?Ü­ŕöż‘%Źp‚Ë3,ňgs,N‹đ›o‚?n%śq$¸ľ»kě¬ŕqÎü–Ř/üV„é7fńIÖ˘ˇnűNpĆî»ŘĹ Ś5ÁŮyŃŽ5&;ö˝ ‹\űŚŻ˝S·é6Çż™ 3ąać҉ńqŽseű)0f>wđ… î9YżÍĂOö>ÍůŹ˝>p¸}7Ç ý|Ď~Ţn ܸl¶öżY¨÷6,ę—±ű­˘çôĺÁ}¸˝Đ ”Úy[Űôăâ_© ¸gĄKě}0ÜŘ•g3›Ă¶źŰÜc˙›…®ó`ok›.Řî^ł?nćáĆdűť@nçáľ?űç;5Ŕç· T0 R",`˙ű×uwS’FF|ŹŢÔ¤r›»ń¶¶őxQž˛ś^+­ ›ˇen1çöH]"^+­Ru}Ł|íťěݶ¬ §“Kźí‚N¤[ÚěŻŰäął‹ťo˝l}łJz$LHÉâg¦«Ş®Q‡Nv- R˛wżk¶]đ‰ßďO6‡=íwŘŘoŰ {ź=ÁZ(´şľÁÚ¸ť6)= ű˝ţ„Ç:Á3'ě‹‘^ď7ŃdPŤOK¶ęÚ;I"€<VŐ5D†ŞC'=:ĺi‘ŻŁS‡Nxtţ‹ËaçqöUëĂ™6)Ýaß }Édč…›Sş­“ЇÁ]·Śîpçfös_„„űńrúăϢĂĚůž˝\ÖPřm.©PU× Şú+k·Ľć°î1ĚqŇpóF§Ŕ¨[vë)QśúÚ;uęă:Ńő:§š[ÂţĆŘŰ|„dŞ”1ٶ>>~ZZĄź–†i‹Ź[¬ß@ô=‚»‚~´Gęü—­ßšĽö2Ă­©ą%$řM6ˇź®rO›”.•v€ýÜ n ZsfLµq©Şk°2ěu3ťjíöôdÎ>AŤ6Ŕ4”ąÝ1ôViŃ|=řl±ľî¸¦×J«•›5!ޱ#8»¶ŻäeOĐîš#Ş?᱂»&3(/+S©IŁ4>-Y§š»Nł&Ą«ÉsÁú]ČŤp÷Ť›ľĘ>K1Licuč¤G[÷Ő*/{^Äś×J«T˛·ÖqĽµăv7:6Ů/=™SD2˘›gť˙˘wc‚9–˘ąű1üąČ…¨ĎUz2†9ťď EyŮ”—=A‹gçhú ëőuÇ5mÝW«ĹĎäô¨ŹôöÓžĚe 7˝ăZV%eĚH%ÄÇőy’‚ý÷Śąôŕ"¸ @nÖmÝWkÝňŮ“ŚĽżťüce¤%kÎŚ©µ»zjÚ;ał©{ű™zËľŇ*IjŇ(ĺfeŞşľQ[÷ŐZÁ]{ŤL·EWz„±_µč>ŃƧ%Ô 'qÄ04DŐW.ČÓňMďXĺ˘ů=µ /ę1°'w«L›®Ý5G¬€® Ü&ŚfŤóÓ&¦[Á]IŞ˛ÝfŰŰ O_dš…S’F)-ď'úşăš¬Ů©?Ľ˝j@n]úBAńN+čbąÓ&Ąw•KKVFúXŐźđ„d=öö®4'Ą«ćG•ůg~Ă0¸cąŕÖ›2öőnôĽ*RpŘ>/š3cŞkA°Áž?–ěÝŻęúF=đí䨲p3ŇÇjĂŇŮ*¸^n©§ ?_wô_F|pů˘„Ă4mRş2ŇĆ*#-ŮÇ×î¨ęóŕ®}ćĽupÜ /{‚u dyÍmŚ2¸kpjjnŃ÷Żß˛©Ô‚ÝůA,+Đ“Áv &»Á“˛“ĐŁ>ś5AŐőŤjjn±2oě %D›=n;{Ŕćf8ĽŇqŤ~†>çTža°~óó˛'¨`ÍN+KÝŞ·k{ݬIéÚşŻÖĘöŞľ^¶!?č‚Yp5š;|ĚIäřPŮx4sok›Öú¶X`0UŐ5XÝܬL˝Uôśă… §;ŢěŰEĘŕ<ä¸fEwßKIĹXCňgLµ~ďĘ«÷¨Ö¸˝śŤŰEýpěóĽÄ‹lŰFÄőřłż~¸Ä"ok›Nܢ[íÖë—Ýăs´cIOÎm»ź3R:]ć|¤,i7»k[Ż?gĆÔ€ĹŮzrŢyČV–ÉÉ…?_ŽřZ\<,¨ ä¤ŇüČoÝWŐ­5ľöNýô­jۉXNČ„Ňi§€ ©í6´ľĚRFSsKŘě"s‚Ű›‰DoĘ[´[]řv+(Ţ©‚âť®AÜ‚'ąYőŐŞëV!ΓM°xF„ŰŞjugôőD¸ÓężýašËBSNżqŹľ°^Ë7í‹Ř'»Ň˘ůV|­´ÚńÄ(5i”5îFę%{÷뇅ŰôZiUŹĘ7t­4žiŤ+¦ŹŰOâěżĺŐÝ'ŠÁcźyN¤EÝ|íť¶Ű‚űf\_üĚtk¶î«ĺPÄ{PgĂŇŮ®çnc¤é»á28íýÍÎ>Nďvą]ŰX»ŁJĹ;­ňN\sftĎç 7żőo~yőaë·qÚÄt×`[¸ůL@ćo”őŐíóĂpŻ<†Ů_?\ôĐ Źž\ľMÓź_ő‚˘áäegŚ­Qť×Ů‚ŻŃ–´ľHsČîĹ3{ôYŞęşŰ=\`×i~oźü>L-Ü&ĎÇc°'ç?,ÜÖuCőa:x? ¸ „ýöäďţhMÄ“·ÂMÝŽ93¦Z“Äř8ëjńîš#®ŻÓäą`©·ŻČ=Öî¨rť„‰†}8{\źŰ›[R3ŇÇZ“¤·{Äu2WÂŁňšĂ*·]­Ě éw&X”0bXŘÚfö ;?}«ÚőŘ-Ż>l“ůQfăŮíÄ´ţ„gPŽa{6Ë‚5î«—ě­˝žyYŰoµ˝qs2ĺĚxéö{núlSs‹ëIŹŻ˝SŻ•VweVv=awë»&(űű“+d?é´Żd]¸ůI]Y8ÁYTöú»f»Hăj^VßŐU´Ě\ĎFbť·µÍŞŤĚÔÉö¶¶iů¦}ŽŰnzÇő7Čôëęúưsń×J«T^sEz‡Äř8+PçkďÔS…Ű"5Ë«[eF SiŃ|×mßţíÇßO{ŇŽÓŕÄüĆ{[Ű\ĄľöNn~'d ËH0˙tc˛‘ŁÝ§hćĘf,YľéťÇ®>ŇÝ.ö‹Łö ÁwŁšEM%…˝p˛vG•ő}ôĺiźk»-dfłęOxŰÁ|w7zţ`îb^” }ŕ.€Đ:{‚ôđµwęÁ­QAń΀ÁdÓg®°n5sŞ[ąxvŽ5 =şpCH–Mý Ź]¸Áš„¬,Ȕϼu_mČ„ąŞ®Áš ŤOKYíÜ\U­?áŃÖ˝«yoÝ[«‚5;]W¨µ«®oС“®iG_{§ľűŁ5!WzëOxôTá6«Ý^¤vĂĐd&iMÍ-Úşď@@đ(sÜy[Űôč ëCNđĚqm&¶ŃÖU3'–ŢÖ6cŞë¬cy Ą&ŤŇ˘ŮÝÚŕĎÜL«˛&ăÓ&¦sK+zѧGĚ|ZüL÷XY°&4sÎôIÓw‚ÇZ{ćŚ)­0AŮ&ŰĘÝÁ'ČÓlYąöç„ë7?,ÜЧMż1'âÓ&¦÷é˘9©IŁ‚]ngˇÂ~ŰóOK«.˙mᆰfĚśłdo­~X¸M‡Nz¬lÝG_XďşR×ďĹÓVß|táÇ9Ą™‹KbN9ÄÎÉĚďmSs‹üŃ-ß´/ŕ÷˝kŃÜ=úÂzkŽfƉH%¶ść=öǢ]Ź`ń39Öqľ|Ó;®cXw2ĐC®óĎn ٧‚âťÖŘĺv|ľ]sÄqěs“đů ÖěÔŁ/¬×îëŻcö§şľAĹ;őŕŹÖ¸ŽÁöĤęCŤçu‰ńqÖŐÔÜňů$Ě5ǧ%÷¨GđŔéĐîš#!sműďÍâg¦[‰kvjů¦}:tŇc}ţG_XöNű÷÷Ý­ ŮvwÍ+8ś0buž‚ľEÍ]ŽŢ*zN #âlőw‡ť8ŽOKÖţí…!Y·¦řüňMű¬ ąýăü—­Á-aÄ0mX:»O®ÄöFÂa*Ů[«’˝µĘš”°o)cFę­UˇWľWäéĐ Źľî¸¦e›öi٦}JMň™ \2ÍÂWMÍ-šţ|מĘuJ˝^­tŐ|«Fâ ·)1>NiÉű&IďnXD}#„?ĚBćX‰f"•—=A–>­ĺ›ŢQSs‹Ňg®PFZ˛ăăĘ—¤Ś©w׿u–ýâgr´»ć°ľî¸fý–d¤%,ü`Ţw m\6[çżčĘŞ?áQúĚÖo”ý3ŹOKÖ»^äŕBŻ”Í×ĎëëŽk®'™ű·Z+qŻÝQĄµ;Ş”5)˝«ü-ł}Ńěś`©=xd‚ťÓ&¦k˙›…Žż Á''ĄÝ˙vËÎ߸l¶ľîčÔîš#ŞŞkPU]uác úÍĘyŞŞoĐ©ćmÝW«Ľě \xÁ•źűJöŐęTs‹ĘkëĐIŹŐgís:ű8xľµM˛Óo=§é/¬×©ć«ĎĎĂ%9.–”‘>6dN))dľ+u-şĆśrhٸl¶2Ň’­ó sľâ&eĚH•=ń7qüőyXúĚŽc͆ĄOG}a.1>Nď®1d 3­öם3cŞuG‹}ţąhvŽ¶î« SěÂ93¦>íłf~<öEę›’¬¶­?á Ä4çvNíbÎmíýó/Ç߲Ƭó_´Ś™NóëńiÉzw}ĎÇL3Ď>˙Ĺe•ě­Uu}cČoLÂaV›ďÄ~Ś9Č©ćÇclÎŚ©:ßzŮ1ű×ţý™@ľ9oµ>ł@*‹ˇö2w„ťL|¸˝0lÝź”1#µaéÓ:ľ§Čő‡:?÷!í·˝Ž©j/ĺ°{aŹŻRöĄýŰ ­ĚŞŕ};¶§Č1čś‘>6ŕyćłI]Űc{ŠÂ®JĽ˛ /$›Ë~›J~îCúĐöúf!{ŤŞ·rB ç‰Ţěî`n¸škˇÄéúp{ˇu˘ŘÔܢú]BfbčÖ'ܤ&Ť é+özžű·j|ÚŘAk«÷6,ęZEüúDŘüF™Ďl~ŁŚ˘·ěĺÜd¤ŹŐ±=EĺBě%KRĆŚTéŞůŽ ż¤&ŤRéŞůw‹8Ő÷µ÷A§lâ¬IéÖk$ڶźżUô\@ż1'ĹľöNĄŚ©W ňÂÎ n”ý˘+ĺ0Ô9ÍÍśnÎŚ©ňT® Čž3 ˇÚgÇ÷©tŐü€ľŰu×Üőyř¸°A¬Ćaűśr|Z˛>äą8öűóT®ÓśS]ď ź–¬ŇUóulOQTç–ÎÖ«yJ1,d¬ywý‹!w,F’‘>VÍU?ŘǦ斀×ݰôiך°—ÍÖ»ë_ SÂ=71>.dě;tŇÓçmkć‚Çöąöűľ[s][qđiź_›1s˙öÂ^]\1ÁuűťrÁż1Çöiă˛ŮÖg ®Ám.2oXút@­ns\˝UôśŐ¶ ‹ě™¸y®9oíľ(Sű·Z"×­ŕ6żßď§ŤęęjŐÔÔH’Ňż—ŞqSî »ýŮŁçäůČ+Iš1c†rssoşvxlb’źô­[ćľ’Ů›1Ż ›«–aíŽî[`Ě•Uok›`íɾٟ×Óv1mîý†R» ”ťǬ˙óÍ7éŹŔ~\›ŔO_ľćP\U·?>óÍŢźţyë˙K ¤±úaĚíI_1Çđ@»ö~“!(ŚŢy˙Ägúŕd«ăĽš±0vôç|núóëuč¤'bćâ­8§ě ·^ŃżüöO’¤oűŰZľ|yŔß=Ź6mÚ$Iş÷›#´$÷ţ~Ű—&Ďůlw‚ÜČď˝ýxôűítîÍÖÓqˇ7cJ_Ť}ÁmŰÓöŤćĽ®?ÇĚţśÇţíäKę*Ťî‚uđ>ôőoĚ–ę3úäĎ’¤ĄK—*=ťąşDY=ĐCb|Ü–¤&ŤęU°©·Ď‹¶M†z»áćt#Çő@ľć­´`ĚŠÇ0ýčżyđÚUşđĹe}bškÖ Ż˝S§>î ĐŮłîSŢśú2Ř“ăÁ^> ?Ď{3¦ôŐ8tŁmM{öçŮ›×^¶qźn»-|Ť|{ů–ÄřaĚ † Ę20D•×¶jć:Ů]sŘú[ô—SV†/™Ţ7“ó_´©do­ 7żăúcD“ś[Ĺŕ#s€!(/+Óşţ»?ZŁW ň”’4Rcż9Rţ|Y»«ŹX‹çfeF˝ “n_L+%i$ sÉźńŞëĺmmÓS…Ű´ř™Ťýf×w|áĎ]‹´™2‹fç•;DÜ`ĘH«ŇUóU°f§Ľ­múqńŻ·ËÍĘt]¬ č­’˝µÖĹcÎ Ţ»™äeOТŮ9ÚşŻ6`!»`sfLu\ÔCÁ]·¬”1#W †Šü܇4mRş^+­RSs‹u{üř´dĄŚ©ü‘±‹~ażM|Z˛VäQúă&´qŮlĺeOĐîšĂ:tÂc-6>-YiÉš3ă!ľ÷!Žŕ.€[z˘ě¶00T¤&Ť"3nă˛ŮdkŢ"˛&ĄŔŤa,¨1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ "¸ 1ŕ.Ä ‚»î@ şť&zďrű_ő˙}ŃNCôG}čÇ1ޱ<ź˙ďΨ·ý÷żţú!ĐOţýŻ˙‡Fp@p¸Ç>nÓ±ŹŰh€ţ@Ň5gi€±¸Ą}ţżŻ1P”ezhäČ‘40€ľő­oŃ!ÜĂőQý?Ť‹‹ŁQ€A–śśL#lÔ¨Q4Âudî=”™\6ľu IDAT™©––µ´´Đ@?‹‹‹Ó#Ź6·ůý~?Í€hTWW«¦¦F’”ţ˝TŤ›rOŘíĎ='ĎG^IŇŚ3”››K#}$bć®ĎçSSS“$)+++â Ö××K’222”Ř/;mö)!!!äĘu]]ťµ‰‰‰š9s¦]·Lf?ŁiW°‹Ü­¨¨Đüůó%I;wîÔĽyó\·­««ÓĂ?,Ię‹„`źĎ§ââbÍť;7 (»eËëń]»vYŹďÚµËÚW#%%EŤŤŤŽŰ”˛˛2}őŐWZ˛d‰ő×ëUvvvźµ€[KÄŐ­˙ăŤ7˘Ú6##ă†wĚëőęďţîď´sçÎl[ó>ÁŹżüňË’¤™3gjóćÍ***Rff¦ëöá‰'žĐĽy󔚚ęřČÚĐ3wMŇdŔÖŐŐY§nŰöEµ®®N>źĎ1řązőj-Y˛$ŕ}ĺóů” ŠŠŠŰł/Ám–™™©ö[é 7·Á]SvÉ’%zůĺ—µeË×னÍëö÷ž0b§×r ŇÖŐŐąţm°ęěš}JII ⦦¦†dó@´Âwí%–,Y˘Ő«W«˛˛R^Ż×10.s×ëőęŤ7ް¶ILLÔ¬Ył4wîÜ€íĚ‚h•••’şĆĹĹĹš9s¦Ubˇ˛˛R)))š7ožµ˝ ¤ŢvŰm*..¶ţĽ}°˛˛2UTTČçó)11QŮŮŮz饗ŰĂç󩬬ĚĘ*–ş‚ĎsçÎ hŻ×«˛˛2ëłď“$K’ŠŠŠBާ˛˛Rňz˝’şÁsçÎu t›÷2ŻíőzU\\l=wŢĽy!mloëĘĘJk?SSS­Ď`ó‡±yófż$VV–ßď÷ű_zé%ż$˙ÜąsC¶=xđ _’ßé%—,YbýM’?%%Ĺú˙ěěl˙W_}em;sćĚ€mÍ ~żßď/**ňKňżôŇK~żßďĎĘĘrÜ~ćĚ™ŽŰ ţÔÔTÇçfffě“ßď÷ďܹӟ踽$˙Îť;­mó›ß8nSTTd˝·$BBBŔ{|őŐWţěělkű„„„€çoŢĽŮő;***ňŻ^˝:ěűÚ™v ţ>$ůçÍ›çx=üđĂŞ««SVV–>ýôSů|>ůý~íÜąSRׂqćóG•••ÚĽył6oŢlíźŮ7“%lNqq±222ôé§źĘëőĘď÷ëŕÁJHHĐ®]»´k×.®|CXŘŕnp ÝÔÔTeeeÉçóiË–-Ű:,ëęęôĆo(!!!d!¶ĚĚLë5Ţxă Ç×r*C`ö)ř}$çÚ¶NŰżüňËňů|š;w®¶lŮb=ÇľOö’ö «WŻxŹ%K–XmÜ•ş‚µćßÁźĂéó­^˝ZŤŤŤVŔŰ^ćaŢĽyVů†ŕv7źďÓO?UccŁ–,Ybíź Ć3ďYłfĽOvv¶U6"8 `h‰*s×5ò˛˛€mť‚¨&(şdÉÇ:ĽŮŮŮJHHLt«Ýëőz­ŚáŕLÜh·ollT]]ťBĄf»˘˘˘€ŔčÁŐĐĐŕ,=ţĽkŰedd„üÍ,RgöÉÔ#–şŻNm%ÉŞE,uÍűlٲ%¤˛ů›icĂßx㍠îęŐ«uđŕA×Ď `hp]PÍôłMgÍšĄ””y˝^íÚµKóćÍsĚRőz˝Ök„ fffZÁN#8c8xź‚K?8•^pŰŢ”5kVH–ŻÔ•ťl/ÉĽź^Ż7 C·®®Îú·S6±SP;8s×t322Ş‹ô: Ž‹Ĺą˝çÍ›§-[¶čüůózřᇕťť­˘˘"egg[‹ĘÚ\3wÝ©Rhö®=Čh‚“öĚU§ ŞśőjĎF Jş•kp ;m®ä›ââb}ă߰ʬ^˝Z«WŻÖćÍ›őé§źJ - )ŕlßŢ<6kÖ,×}0űť’’ňZnźĹí‰jllÔÜąs­í~řa«ć/€ˇĎ5¸ë`•ş2?M]{†®S©„p]{¬ @ÚÁĎu[,Rm[űö&ě–!lÉ’%Z˝zµü~ż^zé%«Dßď—Ďçł2f{pţ ‘ö)xq;űcnÁ]§RFbb˘víÚĄO?ýT/˝ô’ő}>üđĂő† M®ÁÝHA“eZQQŃ«lX©»DÂĚ™3­ÇÂeŁת•zVŰV \ô,ĎçSqq±Š‹‹­ ±©…[WW§-[¶(;;;ŕőL Üŕ÷pŞ lß_§v5Ďqb˛¤íŮ˝áľ#§÷ňů|*++ XŔ.55U[¶l‘×ëµÚz»ŔĐçÜ — kş´eeeV6¬=Čhţßi±1ó&ČhŻ©$˝ôä^ţŔm{Ŕt ¤VTTX Š%&&‚ť¨uuuŽűks*iá”ŃköĎ-đlč Vp7Ňwd/˙`/•1oŢ<Çŕmbb˘µŔ\¸ 3€ˇÁ1¸kÁQ»ÔÔTeee©±±Ń1Ŕ9kÖ,%$$ČëőZACĂçóéᇖĎçSVVVT٨‘‚¸ŃnoˇöěUó:/żü˛¤îŔµSŔÖţď'žx"äuť¶µn·¶2űäô>óçĎ·öË|ćó9e,»µKvv¶$)ä; ×f†žŰ˙˙öî>*Ş;Ď÷ý'ť´ŽX%ś}­@´lő„‚éuî‰â™6̸ ŃčIîş±čhΚé$Ŕťľ­=‰c§[ĎqZéž5m‹™u“Ą#Ü#FŔłfşˇđš EޱröR%Mr˝”{[EíâI@ ߯µzĄŞöÓo˙jÓ1ż|VÔ: \YY™Ůú ĽBÔPYY©’’•——«¶¶Vůůůęéé‘ÇăQOOʞ˛˛TSSq̵k×$I%%%ĘĚĚÔćÍ›•źźłőqýˇ,¦&…RŹÇcö—ÍĎĎ×ŮłgÍ{÷î5ŹIJJRQQ‘jkkµlŮ2ąÝns1˛ššs[,mmmZ¶l™˛˛˛TYYs‘ąââb­]»VŐŐŐ× ×ćÍ›#*nűŽbm˙NŽ;fnohhPCCÍpŔÄeîÔ÷6\qq±222ôÉ'źÄ\x-33Seeefx(…‚ŕţaĄˇ˘˘Beeećbk;wî”dÝÎ` Ţ¶±4KJJRCCĺ*++#ŞĄP_ಲ2UWW›Ő®Ú»wŻÂöööĘëőšcp»ÝjhhбcÇÔĐĐ`¶g¨2Öăń(33SŹ'˘ŞvéŇĄŞ¨¨şŹÁŞlc…ŢĆwRQQˇšššp˝¨¨H•••C^lŔ­sÇ5ŁTvŚ- \.WDŐę@űeżŃÓP‚khhPffć°Âϑ܇lwş‘j•¶$ýć7żŃoű[IŇĽ˙©‹f¸˙‡'>Ö™˙~V’ô—ů—Z±b?qŔ(ąkĽ.”””4¬°r¬Ý‘Śi$aëHîc¸ňÍ ż.źľÁ@ü!Ü€8D¸ qpâá.Ä!Â]C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@ş‹)@}}}Úżż.\¸ńy˙÷ĂqâÄ }ôŃGźĄ¤¤hĹŠJIIaŇ€a"ÜE”ßýîw:qâĨžóÂ… –áđ·żýmĺćć2éŔ0Ń–QÇ ű|ËyϨěCŐ.02w\»víÓ€ţš››U]]mľżkĘ]úł‚§oNąS‰÷ŘőÍ©C+úţňĘWęý, /Ż~­?Ô˙żúęęWć¶µk×Rµ Ś•»°”››«µkךú•>lţxXÁ®$}sę]JĽÇ®›?&ŘFá.bęđ~~>¨ăű[ô啯†|Ž/Ż|Ąăű[ôůů ůÁ.pów1 › x v€±C¸‹AŤ$ŕ%ŘĆá.†d8/Á.0öw1dC x v€ńqǵk×®1 ŽććfUWW›ďgĚ´éˇŐ9’D° ŚÂ]ŚUŔ+‰`'„»±ţo8‚]`lŃs#ÖżŻ`{„»¸)ý^‚]`|Đ–ŁÂď÷+!!A)))L0w Ń–âá.ġ»‚řä÷űőĹ_0âBJJ =ąe„»qhßľ}zď˝÷q套^’Ăá`"%´eC~żźIwĽ^/“Ŕ(˘r7ÎMëűBßřú+&Ŕ„tuĘ}9u*Ŕ ÜŤs˙‹ß/[0ČD:ÓÓŐý­t&€1@[C„»‡w î@"Ü€8D¸ qpâá.Ä!Â]C„»‡w î@ş‹) ŐŐ©SŐ›8CA»]_ßyç ű_™2Ĺ|}âÄ }ôŃGL"& ‡ĂˇŐ«W3âá.`P_L›¦˙饠Ý>âs\¸pA.\`21a|ôŃGZ´h‘“ .îô?gÍŇSďa"0)őőő1 âá.ŔŇ×wŢ©s™ęMJŠřÜ~O†ÓďÓTŰÝšjKb˘wÎţKťú.u1âá.ŔҧłfE»S¦'Ę™÷¸Óîcr×îüćT&Ŕ¤@¸ Ň™ž®K3SĚ÷ió˙Ů˙QwMů&€ ‚páęÔ©ęţVşůţnÇ|Ý÷ż>ĘÄ0Á|)„ëJO3_ß9ĺOäĚ{śI`"ÜDčIL4_Ď[ö$­ w¦Ţ¤$ýw…:öÜ9ĺOX< € Śp`ę›6Í|ť”Ę„0± ŔRbúÄŻÚ­}cÇ€Űl‰š—˝H÷νÔŻą¸pŤf¦;†tLăˇ}şĐĺ×`bëëëÓ{ď˝§””-Z´ ’waľH¸Qą;=ů[z¬çÚO©fĎŽ!í;?'WĎ˙dŻě‰7÷Ö^óšËźX?äăjßءóť~mŘőŽůYËűuŞŮłCŮŚ8Üí ôęÍŞMĂĎáý»u¦µYĎ˙t/=ĉßüć7zď˝÷$IgÎś‘ŰífRá.ŕ†ŻďĽÓ|}甩z¬çÚ?$MłÍĐ ?őDl;ßĺ×ůNżú‚˝:˛·N·4«ćŤzŞtë¨\3%Í1ä ¸/Đ«óť~IЍ’=ÝÚ,I7UU<’ńHŇ™Q¸6`|ůý~óő‰'$‰€îâÓąöS’Bĺ@­f¦9ôÖk/«©n˙M‡»F ;?;wŘÇ8ś #>a›ç¦çŕF@ĽpXÇímęâ€8GŔ $TÄ)Łju° Ő~Ă+hG~M#P^8ěqŽE•¬1žá„Í€ÉăĉňxťďôëŢą÷+ďŃ5Zľ:şď­Uu­qýţç—B ¤5Úg·3ÓĘy¨PEßű›¨Ö áă9×~JµoüťZŢŻ3?{ň…W˘ŞšköěĐoł®1{ýöOËűuŞ}ăďĚ1ä=şFOľđŠeë‡Ó-ÍjŞŰ§–ăőę ôjfşCOľđŠr–š×zň…WhŁ(áî4ő] ýĽÜިÜÄťÓ-ÍćëÁŞhí“ŮátKłN·4«/Řk}ţÖĐöŰŤ0ó|§ß¬ü}ëµ—Ułg‡RŇr8ę\ű)˝YµÉ\Ü,\xkhyż>tý°ŕY’vý¨DoVmŇĺ@ݞ*ĐĽě\]ôęđţ_jűó+Ő¸1ŢđJäó]~mvWź|tJó˛s•’ćĐąöSÚő·%QŐĘ­ÇCמ™ćšŹ[˘öĽZŞ]?*Ńĺ@Żćeçjšm†íÓ®ż-±śŰíĎ?®ĆCű”’:Ëﮕ¨ńĐ>5Ő틺ŕćeţűBýé—ůž ^n_TîâŽQQ:ŘBbFŔ(IŻyÖü|°ĹĚn´RXuMIşvíšţëŰ˙jV7Ú§=Ż–ęČţÝzxőłćçç;ýf ^AkŐŢátKłZŢŻ“ĂąPŻT˙ÎüĽ/Đ«mĎ=®sí§ÔxhźyĆ=HŇ›U›ôĚ‹Uf%®$˝ĽöĎĺ÷} –÷ë"îÝęÚĆýůőnĄ¤ÎŇĎ3çć\ű)mv7"P—BŐÍ{^-5´ ońÚŹJôÖk/«/Đ;ěĹŢCăĚ{\’ôÇŻ$*x¸]Qą ;F9ÝžŞ˛í÷ż#űwk׏J´çŐRIŇĽěÜŕÓ¨¦ŤŐŇáŚEřk\sšm†^Řć1\)Ô¶Ŕ¨ 6Ú"„Ž9e^?âüŢĐ€‡ľćbmýÚ($ŘÍp¶ĺx}Ôx$é…źz"îĎ8N’ú‚7ŞŤ€¶ŕjĚÇ´é3´ńőw"îŰ*ď ôjĎOĘ$Ië^¬Šs‚=Që^¬2Cíá.ö:gŢăTđp›Łr@˛Ó©TW–¦Řl vu«»­MÁÎN&¸ĹŚ0ň\ű)mîń÷]\¸FO•ľbľog`<č4ŰŚ×_Ă+sĂĺ,)”ß÷AdjŃo7Ľš7<85Îyd˙nM·'jůëÍ6ďŃ5Qá­qîĹ…k˘aIşĐşÇđ;Ö‚pĆçĹĎü ŞĘÖŘ6Í6Ăü¬ńĐ>őzĺp.TΒ¨kĎLw(%͡ ]~zíŔŁ‚€Űá.FŐ˙ŢpT’t%Сg˙ó‚ĐÝneą×J’ţ)٨Ź)ŮéÔŐË—ŁĆňpUĄRł˛ÔÝ֦åeۦÚíúîÎź)ŮéŚřüHYąr7nyÜ­´`ŐJ}řöĎŇ\.-ŻÜiŽ˝Ëëĺ!ŤŽĽ<ů™ú˝fŕčp.´ü•˙ůŮąš™î0˙ΨxŤu¬qîţU˝F Ü?dČŤö÷Gťż5oŢŁkĚĹÉjöěĐáý»őđęő!ŻŐxr–XΑU€m5žđ0ŰęެŽ1*š‹éöD]čňşŕŕćđpű"ÜĹj·+wĂu¸¬ü–Žá§źÖ‚'VéHYů°*n˙ěűě^ňuhzZŞ‚ÝÝn®“ťN-Ú¸AÉNgT¸‹řbKOWî†*Ő哿č,ÂŰ„÷¦Şˇ÷ŰŤ dŤJU«Ş]cźţÎXTîZőó5¬{)Ôâ fĎ]čňG„ĽEßűą_xőŻUĺ¬qŤţíÎxŻ·~ȶh#„µ »­‰‹y\Nü…»˙÷// Îđp{"ÜĹIuą´ŕ‰Uúđ×oß’ëß=gŽ<±*ćö‹íľ†›µx±$)ŘŐĄCëźŐ•@`HÇÝ łňň˘*Ś WAu·µ™Ż1±Í~䥺\LÄ Ś0Ňčq;Ňăcő‚mm UĄF¶3…™- föŃ˝~\xŘQ);H¸l´`hyżN‡÷ďÖ™ÖfŐě١iÓgD-¦6/F k؆WóZ/îf=ž3­'˘ćË–c1úŹô;ş•ü~?Á.€¸EŔ Ŕí‡pcę§ź–ż±iBö©ýýëŻÇÜ6Őn—$uÔżěvÜDsŃç›P­#€Ń0X9Ł­ŐńF/ŮĐö…Q׌Ąĺý:őz5Í6Ă NŤ@8j1µ~áňůNżŽěß­sľSÚ°ësżś%…ĘYR¨_ý¸TMuűÔrĽŢ wcőÎŤžŁčŠáţë`‹ËŤdľŹüz÷M}G·R__?d&´„»Ó4=9=ćv^n/„»—|şŰ9gB´g0ąaäÝߪzU UŁÖľ±CRt;#őű>P_ 7Ş‚·öŤż“ZlíĆ1Ńí Â'3‚Ďľ`Żď˙Ą9†ţçÎ{tŤšęö™ăO¬đÔŞm‚UĹp¬j^ó‹Ëe?T Öăőj<´/ę¸ĆCűĚăb…ĎńÂ~O†î/|†:q‡€€Űá.ĆÄąĆF»»äXĽxTÚ3LµŰ5ű‘‡ĺČË3? tvéł¶6uÔ×Gíźĺ^«„Ô4óý}Ź<˘TW–$©ÍSúCoaˇ¦§ŢŁËÝźÉWW'[zşć<ňpÄyŇ\YşăúboďV°ł3ę8+ÉN§ć­\){úŤ1řőń»‡Ł*ĂÍ)(Đ=YYÇ] ŐíőF›ćr)Ő•eŢ—qß’ÔímS—×qOĆřcÍmŞËĄ)6›yMc“ĺ܆_;|îć?ţ¸’ç:‡użIv:ĺČ[ŐŞŕb»Oź65 ş@Üś‚9ň›÷u±Ý§ÓďĽń=se5/ł/Ž8~$ó2çúÜ4/7ö‹ý]FďëŠxFüŤMú´©é¦ć;śďô[.6-ď×™ýjO·4ë­×^Ö´é3,Ď}¦µY)iő{őÖk/뙫$…ÂŃ=?)ÓąöSr8Şř™„ÝÎŔ*”˝wîýJIsčB—?âÜĆů›ęöIŠ ‰Ď ĐZ"|ެŰ/ ^ÍŰ˙ů‹żµŻWSÝ>ýé·ZţÄzťďň«őx˝jöě¸7Ŕ­AŔ ŔípcćĶíJ}ëMM±ŮnŞ=Ă‚U+őŔÚµf«CŞKrhţŞ•:±m».únôŔ}°ßZť…7V“7ÂÝŮŹ(5+KÝmmˇ .55ę¸T—Ë Ńş˝m vvFnŞÝ®E~B‡źëµkŐň‹żŹ:.ŮéÔŇ­[e uĂ9ňňôŔÚµúoĺ˙‡yź÷¸\zđzŘ˙ľŰ<Őˇp7잌ńGÎKˇrţúŻ˘ćÖ¸ćk×ęئMs~mŁźŻŐ9Śű óP 4ŹĆą<±JľşzťŘľÝňřďîüYT/bă¸Ű˙‹ů=s5Ôďc¨óěěÔ’­Ż i^¬ž˝ţߥń}-ÚđĂĎČ•@`DóObő±Łęt׏J4?'W}Ďu®ý”–Ż^Ż[˘üľ,«mďť»P9K µçŐRµŻ×˝sšŞçBm|ý˱ö_MŠK_ضWŰž{\Ť‡ö™çí˙ąŰSĄŻDś#Ľú×ęşý[Ł'đpú˙ĆŞÎYR¨ĺ«×ëČţݪٳà t§ŮfźKńąL&ĽL~„»3W5oŰ®üoq{gaˇľóÜs’BŐ‰'««u©Ý§oÚlrä-Öś‚%;ťúîÎźéĐł˙Ů /Ű<Ő˛ĄĄjNA(Ôí¨ŻW°«{Ŕk»»Íŕ׬šlkSW«×Ü>đ@ŃßÔ¤Žşz} Ę–ž®Ö>-[Zšmřˇ‚ťťf`gKO×_üj·yŹőďęÓĆFIŇÝsťräĺ)5+zK IDATKSív-ÝşUź|R’ô™×«6Ź”–íRjVdUňgCX (<(ěÝYyyZ°jĄléiúîΟŠ§ß“j»ľ}@—»»$)ć‡*<Řő75éÓë1đM›M©.—ć<˘)6›ś…úăÉ“aůT»]Eoţ_a}“ëőYŰIIRŇś9Z°jĄmřaĚ Wăy2Ž˙đíęözőe0¨{\®!Ď‹ěvÔ×ëRGÇ€ób<{}—i.—ů}u·µéăúwÍçÝ—j×>őżMÚ Ţľŕçš—ť{SU»ë^¬RÍ;Ôň~˝>ůč”ćgçęÉ^Ńüś\˝YµIó˛s#BÉóť~ÍËÎUÎ’Bĺ=şF ¶ŞŮłC§[BŐĽyŹ®ŃĂ«×÷kăpĘěµŢÎ@ őŕíîŢ;÷~íxű_ux˙nťnmÖ'ťŇ×ď5硳׮$őžľ°ănÓk†˝á÷Ö썺ßp ö×ď˝ jŰSĄ[•óPˇZŽ×é\{(7‚3îpëđ0ąîbLůĺojQ{[zşf]ňučHyyD`ĺolÔÇőďjyĺΨđř¤ÇŁ4—Ë w?®wĐ_ávvę¤Ç#éF¸ŰŐę5?Ě‚'V™ÁîďţóČűôzĺolTńőJć?{îűúçuë%Ißyîű’B둲ň°°ËëŐ‡ż~[ą7hNAléiJv:uŃçS—×{ýžÜf 8Ô±NµŰ•ó׺ď®.Z˙lÄÜvy˝ú¸ľŢśŰX­-=MWA|ň©ŞŕţcväĺÉ=8L¨C(ŘýđíQ Ř…ľ÷z3ź•·8"Ü Żň>±ýżDUIw{˝ć_8XYş5´-Ö÷ńicŁ9/ßyîű–aaĚË?Ż[q|¬yąńěĹţ.g36‹őŢĘ˝säŤuţđq…·ue~N®6ćĽ3¬9J°'{<±¶źďô›őüśč`بžO¸ /“×7ڵ۶ëj0(Izŕé§Ł~U>–ů+WšŻ›·mł¬DěňzÍ ÇT—kČç ł …o—|–ö•@@'«CcMv:#ÂĹPĎÔĆżN˙qý»ćkŁ˙ëMŤµŕ‘č¶í–s{Ńç3Çk‘Vţđó_X¶Ű8ýöóőÝĂř^’çÎU°+Tü˙\żľŐŘ.ů:,çc¶Ńc¸ľŢ2ŕô76šĎLŽĽ<łĂÉęjËďă˘Ď§?üü>s±Ž?YýŹ#š[zşů¬X}_ľş:µyŞ-ŰLŁ©ńĐ>•,NÓ¶çŹÚvľ3ÔwWŇM‡đ€ŃĺĚ{\:çFĎţ'NČ3ÄÂ0qîbĚí¤ë˝T7nŇqĆâR—|öýřÝÁç¬äXłĄ§›!_Ç»ő±ÇZ˙®>ů”ţ)™Đ5Ľř’öýĺ sŽĆÔ»ş ĂCĺţ‹šb?ŇľŻľş:üOOFĚ‘•«—–÷e„Öác虉|ć\–÷Ţ_xr¬g®»Őz^FŇwZ -'…ţb ˙Ç[-C哏Nz<#ľ0FŐ®ß÷ZŽ×«/Đ«sí§ÔrĽ^ŰźľËWŻŹjG¸őx|hË€qŢž!ŮéÔn÷ -Śđj(í iŮ.ťĽ>µĄ¦šŻ/µÇ5ŻöBµĄ§kú=÷(y®SS®÷Ť¬Ž”ń«˙F/ŘĆ{Éס»ťs”<×9čüŹ•Ô¬,M±Ů”<ש»ťNÝ=ÇiąŘYx%ě@÷kĚĆ=^ 4ĺăC[¬gn´5;}ŕ€ŮkŘ‘—'G^ž‚ť]f«ĎNžś´}v1±ä=şF§[šŐT·O»6şŁ¶g?T âďý€‰€ ęV¶hđz˝ęí퍹=++KIIIă6ŽÄÄDą†ůçěžžµ]_T¸˙xŹ;&IĘČČPffć„'`r!ÜŸ9±m»RŻ÷śÍrŻŐ§´!g´t@Ć+[zşxúi9ňÇě;.¶aţ/oÉś,XµRłŻ/77tNµŰőŕëAěěÔ‘˛rĺnÜh>ë¶ô49Ó ä, ő–ö76ꤧzÔe żu/Uéá5ëŐň~˝Îµź’ę3śł¤@÷νź € Î*ŕMIIŃŠ+Ćôşeeef:—ËĄââb•––Žz0iŚcéŇĄjhhÖ±^ŻWË–-“$=zTůůůć6ăőćÍ›UQQ1aÇ \w1nŚö ů?-ĐłhăsQ±›őÍéÓăv^yyćś.ů:ěîŇĹvź>»^ąĽĽrçmńśLµŰőÝť?‹uŻşÔѡ‹í>őtt¨ËëUîĆ fňh vu©c€¶ áúş»Çmn.ú|úíşuJv:5» @i.WÄ_j˝ /mňvŔHÝ;÷~‚\c3Ňî3Ă]Işpᄟ×ë•×ëUeeĄŽ=:ěĘUn„»WVícKK|źëż˘ßŐ:ń’šj·›UĄSív-ÚđCIˇó?˙…ĺ"`iŁü‡Ů«Á ¦ŘlCjc1ýžÔqťźy+WŢč_\_Ż“Ő˙8äÖ_†U';ť#®`˝ Ú6äVşčóéâ믛Ď#/Oł 1Ăîď|˙ű„» ¦Ď|­ęh:hľź5k–VŻ^=®c8zôhĚm^ŻWŹGmmmęééŃcŹ=¦ÖÖÖ ßZ`éŇĄ’4*-nVRR’9Z2ŔäF¸‹q׿=ż©Ér?ŁŐBjÖŔÁfx…ç—·¨…@x×Yyy1űŰŇÓőŘ[oJ’^Ú¤©v»Ů†!V°k7šşŰÚäXĽXIłg¸ź-=Ý Î‡ŇÂa4˝n]].2g:‡/b–šíŠî:b,‚v±Ý§Ô¬,%;ť!Ľ•,÷ZuµzőĺĺËcŢaŞÝ®¤Ůł•<שKľŽçëJ _]ť|uuĘőÇr,^lŮŹ@˛v˙ćoţF ă:ŽÚäç竬¬LĹĹĹŞ­­ŐŮłgUSS3.}oĆp['Ś%—Ë5ˇĆ;ß` 0ŢŚö ÇâĹ–űť»^yhKO°rőA÷Z󵿱é–ÝÓ%_Ç€÷#IłyÄ|}©ŁC a ± Tť:ű‘‡Íףô~z}ž¦ÚíZđÄŞűÍ_ąŇ|ýq}ý¸ĚĄv_ ÝAšËe`^ôůĚďaţă+cö/f"çĺFµëĽ°{ďĎYX¨Ýn=\U©äąsÇĺůz¸ŞRßyî9Í.x$ć~ăŔ€ř4Q‚ÝˇŞ¬¬4_{&đoUp+îâ–0Ú3 äĚćbjK¶ľbą°Ö‚U+Í*ĚŽúú€4ĽşqVŚJÍŃÔvýś¶ô4-Ú°!j{˛ÓiVĄv·µ)ŘŮiöÓhŚ‹6lPjX¸÷7ÔĹŘ|uu vuI’xúiËđÜYX¨«VšăŻEşşŻŻę›4{¶eťětjÉÖWĚ÷ýű-˙ţz»[zZTďŢ©v»ţâW»c.ŇÖĺőš×ĎrŻU–Eśět*çŻ˙JR¨}C¬j둯<ď˙ťt\×çXVOµŰ#ž/€pńěJ±Ű444hË–-Ú˛eË€Çű VÁZSSŁÇ{LË–-Ó˛eËT^^®łgĎ{Ľ]ݧ§GŐŐŐ×Y¶l™¶lŮ2äk544¨¤¤Ä<ö±ÇSuuµĺľgĎž5ÇÓ˙üý?ď?®’’’!UýÖÖÖĆ<Îăń iî7ʶ ¸eÂŰ3XąčŘK›´ôÇ[ÍPÎßبK×CĆYyyfHwÉס?üüQç0zË.XµŇüuőĘ_6&÷ăolTG}˝ćČYZěĘßب//u·ÓirWAłrąËë5ŰO,XµRÉÎ9ęözěęÖÝsćČ‘—'[zšąŹ•đ@pőoţo] túŔ;öŚ=öŇ&-ŻÜ©©v»–WîŚ9·Á®.{iÓ¸=׿«Ô¬,MµŰőč/˙AgP°«[¶´Ôy4ć¤PŰĺőę÷?˙ąľóýď+ŮéÔ_üj·®ú2x٬ö5ľ'+Í۶ë/v˙RSl6=čvkV^žş[˝–ß㑲ňQ˝÷Ka•·Ë+w*ŘŮĄî6Żš·m×Éę”#/OSl6ĺ˙x«.ú|fĄqBjšy‹Íp˙÷»^ç˙`€)Ýá®$mŢĽ9ć~ć>±Ú@”””DU744¨˛˛ReeeÚąsč‹Çşž×ëŐcŹ=fâ644¨˘˘bĐk•——GT2jjjĚEçÂ{ëž={ÖĎŇĄK#‚ră󬬬A®Çă‘ŰíÖŢ˝{ٶőôôhٲeňZ´˘óx<*++Skk«Ž;6ŕÜF•»¸eú·g°ŇĺőęHYąYeęČËÓn·t»ÍPďĂ·čHyąeÔđŔ×öF»m¸ćmŰŐć©ÖŐ`P¶ô4-xb•t»#É#eĺĆÍ۶™÷—ęréA·[ą7hÁ«4ĹnS›§Zż]·Î¬ĆtäE¶}ř¸ţ]łŞŕLžët¬}>)+;oôÜvÔ×ëĐúgě=;Ú|uuúđíć˝óaĚcw[›ţyÝzłRZŠ®rýđ×o«áĄMćĽNµŰeKOÓŐ`Pmžęź»`g§jž|Ęś—d§3ę{ vuéHYů¨W3wy˝f…®ńĚĎk°łSGĘĘÍďÚXđA·[ÎÂMµŰÇl\¸ţ‹ÓT˛8M}ŢqżöůNżyýpŻmt«dqšďű%_ŔR<»555ćë±X¤ěرcňx<ĘČČĐÎť;uôčQíÜąS’Bm!Ś ô¦ţ QR˘łgĎ*11Q›7oÖŃŁGuôčQíÝ»7âZ±Ş\Ź;¦ĘĘJeddÇoŢĽŮ<Öëőެ¬lŘă*//WCC˛˛˛´yóf$Ý;÷~ľ$@”xvĎž=«’’óýX-¦–••Ą††łę5??_n·[ůůůjkkÓ–-[äv»G.744AhMMMTkqq±\.—>ůäUVVƬrµgYY™ylmmí渴´4˘"¸¸¸Xn·[ŮŮŮ’nTđ*++ÍűŮĽysDřm›źźŻŢŢ^~`śîbTŤ$\ę1]# Ż‚ťťúđ×oG}>Puă@×JUä•@`Ř˝XŤ0r űhŃ5«ůą iÎ.ú|ĂŞöl,7ó< ö˝ vţ«Vęb»O=¬+€ĺ}}3¬Č—1Bú[9/]w¸ăÂÍK°ĎPŃ÷~ ™éŽ[rýsíÖ!îâÂ5’¤ů9ą|I€=ب_®×덨Ú-**ł_íŻ©©‰hg IIIIŞ©©Ń}÷Ýgî3’ĘX)ÔÂŔ`'%%™é@÷kśn·[[¶lQOOŹĽ^Ż\,DÝ_FF†e«—ËĄ˘˘"ŐÖÖFUŕVUUI µz°Şjvą\ެ¬Śćc‹pŔ¤ňťçž“j×a,®Öź±PÜŐ`jW É˝s¥ձ§[›ŻŹ#˛r·ř™đ匡ľKÝú ţ &Ŕ„’ś±@é  ¸O{ö¬Ů7x Jj·ŰM¸ ăpŔ¤âoj’cńb-XµR—|ľ~=lÓőŔÓO+őzEŃŰ·Źľ@ŻÎů>P‚m†îť{żÎwúŐT·O’4/;Wół­+`ÍpŐąĐlËpľÓŻó]~ÍLshfşC-Çëĺo?Ą”4‡ćgçZĺ{®ý”δžP_°W ¶De?TóŁýBx…n˙ë[Ýź$s\R¨Ňw qťďôëtkł.tů%IŮÜÖ-ľţňßôy÷Y~pL(źwźŐŚÔű4=9Ír{Ľ/ž–(—Ë%—ËĄâââ1]Śk°s»\.µµµÝT˙X—ËĄĄK—ęرcňz˝fkÜ[QQŃ -úW쎖ᶚ_n°cŤ`0öwL*'÷z”š•Ą)6›r7nPîĆ şčóizjަÚíć~ţ¦&ťŁ*L\-ÇëµçŐR-_˝^whżďŹ\,gIˇžůŰĘľş}^mîqIŇަ.óó_˝ZŞ3­Ízţ§{µç'Ąf_^Ă3/V)ďŃ5Qc8×~Jo˝örÔţoVmŇSĄ[µ|őúĎĎwúÍEÜÂVăúĎĽXĄĽôĐuN·6k׏J´¸pŤ^ł^{^-3űőJRÍž1ÇőVŐËQóQłg‡ćçäęůźě˝%˝†o‡ĂÁ € ď««_X~OÁîµk×nů MGk7Łň·şş:â3ăs—ËĄ˛˛2­]»vB?wŢaüĆŰXŇ€h„»&•‹>źţyýłzpíÓšSP IJ[¨î’ŻC§vOdLF ÚzĽ^—˝zćĹ*ÍLsč\ű)ŐĽ±C-ď×)%m–ž*Ýjcô»ŤµYí§i¶zţ§{•`KTăˇ}jŞŰ§=Ż–jfš#˘Úö\ű)m~Ąú˝Ę~¨@Ż~V ö:×ţެڤ7«6)%m–r–ŢóőŞáyýŞŠo,¦¶0j¬}Á^m~Ąćąiůęőš™ć0ÇőÖk/G„»}Đľç®W?UúŠf¦;Ôř\5oěĐé–fíúŰmŘőÎmńŚ$$$hçÎťňűýüŔPöď߯O?ý4ćöxŻŘťĚ’’’äńxTQQˇšš544¨ˇˇÁ\tĚëőĘív«ˇˇA{÷îťĐ÷xwL:ÁÎN5oŰ®ćmŰ•ětjĘőÔ.utčJ ŔÝĆŚ@ôr WŻx~g¶(ź“«{˘öĽZަşýáîŤ~·7ŞfĂ«iSŇfé…msŰüś\ő{ŐzĽ^‡÷˙Ň wű˝ÚőŁőzUô˝DôË˝wîýš™ćĐöç×[Ż˝î•·á!n¬j^c¬­ÇëŁ*tçç䪩nźyśáđţÝ:×~JçBm|ýť ÝćîŐËîďętKłÎµźşmZ4$$$hŢĽyüŔP¦M›sÁîČ ÖŁ¶ˇˇARhá±Ń™™©˛˛2łoCCjjjäńxÔŰŰ+ŹÇŁŇŇŇa-Š6žÂ+™›;Z2ŔřůS`2»čó©ËëU—×K° 3(}ŞtkTďY#í ôF´L° WŤĎ¦ŮfDÁ†‡W?+éF,IŤ‡öé|§_çBË…ĐćçäjšmF¨—n§?ěZF…îýQ׏UÍ»¸pMTë…ţˇ® ‰kßŘ!Iza›'ŞőB‚=Ńď­ŰŚs§¤9bV ‡3X«7ÖőŤ±†W‡łj/qˇ+4®Ĺ…k­ÜŤuŻ€ń÷G_«ţŘqŁâôvkĹ™™©˘˘"ŐÖÖĘëőęľűî3^Ł7##Cö~ÍĘĘ’×ëUvv¶Š‹‹•™™i.v&I‰‰‰–톣¸¸XK—.Ő±cÇäńxÔĐĐ ââb%%%©§§G555fliiiDČF ^YkŐĘŔQĄP8ÚżâőtKłÎwú#zűmŚ6ýCáľ@ŻţĎ'ţ˝ú˝úŻo˙«yN#Ž —®ćí˙yřuĄČĘŢyŮą:ÓÚ¬ó]~Ë9yłj“Žěß­ĺ«×[.¸µâ-Ř­¬¬µ…¶’’’TSS#Ż×ěćçç›mŚëő݆ ˙<33SĹĹĹŞ©©1Çćrąb¶bpą\:zô¨ů:śńy˙ë%%%Éăń¨˘˘B ˝jŤë[őÚ5Ć9PŢXăÉ8ĂąÝî[Däçç›÷bÜŹËĺšĐ=`˛"ÜLzálëńú¨ęŮ=Ż–ę|§_çÂp×*\5ŞiĄPµořąú˝zëµ—%IŻ~Ö qďť{żRŇşĐĺ×áý»Ł®˙Ök/«/е°ŮŤV ÷G}6”jŢţŰĂS3îëLkłŽěßŐ.˘ńĐ>Ůż[Ól3ôđęgy`‚‰ÇŠÝѨ‚µ:g¬óőóĚĚL••• ézIII1CĎÁúĺfffZM<ÜůŠ5ž›§1V«đ×čqś‘‘aîÓżţá5`lî&=Łw^v®ÎµźŇöç×ňëeÓˇ}jyżNÓl3´îĄ šśk?Ąľ@oÔbfFŕ›ýPjßء ]~-~tŤÎwúUűĆťďôk^vnT€űTé+ÚőŁŐľ±C_?Wö’ůŰ?Đáýż4ĺ~zc‘#DNIsD¶gZŤö WóößŢżőBń÷~ ĆCűt®ý”6»ż«ĺ«×+Áž¨Ö÷ëĚ6O•n´/`ě…‡¸·k+ÜzTEE…ŠŠŠb.śŢvb˘/“á.`Ň3*Ws*PŢŁkôfŐ&íÚč6·ĎËÎŐSĄŻX¶_čçZ÷b•ެzYŤ‡ö™A¨¤- r–ꙫôfŐ&Ţ˙KŢ˙ËëżđÓ˝í¬@ ˙<Ľrת7o¸Ó1Z6$صńőwô«—ę\ű)íyµÔÜ–’ćĐş«&e/ŢľKÝ7ć€`@śX±b…>úč#Ą¤¤ěâ–ÉĎĎW[[›jkkµeË•––š­zzzTUUĄŞŞĐ_–Qą ăŕŽk×®]câËŽ;ÔŢŢ.IržůH¶`I0*Úż=W—ívIŇż{¤D‰i÷MŠűú~Á<őzµa×;šź“«ľ@ŻZŽ×ë|§_9K bö©5zę•«§[šµýůÇ•’ćĐŽ˙*)¶ájÎ’ÂA«\ű˝:ÝÚ¬sí(Á6Cósr‡|ýľ@Żen¨Ęřó}ĂŰďť»0ć"pĆ}ô?×üěÜI»ŔÚ•`ŹZüĚ|˙˙đüŕ0D===fŔkČĚĚŚXTM -H×ĐĐ@^Tî&5«ĹÄě‰Q=fűł J­Şiďť{ĚvVě‰ĘYR¨ś%…Ăľ~‚=Ń2těúCßpď#^]ľŘeľž6m? CRR’TQQ!ŹÇŁŢŢŢ»*++rcŔÍ#ÜLjF ;/;wÎ5po[L|˝ť›ŻÇba&»¤¤$UVVŞ˛˛R^Ż×¬Řuą\TęŔ-@¸ Ôn˛ oú\gĽ÷¶ĹÄÖŰő?Ôuúż›ď w¸9ü»n˝o0€É,ÖbbĂŐčŐůN˙ős-dbăĚWW˙MMÍ÷sçÎĺ?HÄ=*w“ÚĆ×ß•ó$ص·©‹ ŤC_]ý7ťý—CşĽ±ĐËŠ+qŹpLZ˝]˙CM#‚ÝŐ«WkŢĽyL€¸G¸ &•+Á]ľŘ©‹ç>Ô;ĽŰ-Z¤?˙ó?g’L „»Kg˙ĄNwMů&qĺňĹ.}ýĺżYn[˝z5Á.€I…p`şóëŻÍ×}—č/‹ÉaęÉ-[IDATîÜąZ±b­L:„»SĘ…‹ú<)‰‰@ÜKNNÖĽyó´hŃ"B]“á.Ŕ”ŘÓŁĽmúbÚ´Q9ß…”]š™")ÔëtѢEL2ĆÔĚ™3•’’ÂD¸-î"ÜůőײŁr®€ÝnľNIIˇ‚€Qô ¦âá.Ä!Â]C„»‡w ÝĹÄ7߼o3 ŔmĘ]C„»q(77WÓ¦Mc"ÄŤäädą\.&€Qtǵk×®1 _¨Ü€8D¸ qpâá.Ä!Â]C„»‡w ý˙ÖAí™6»8IEND®B`‚ceilometer-10.0.0/doc/source/contributor/1-agents.png0000666000175100017510000014157113236733243022531 0ustar zuulzuul00000000000000‰PNG  IHDR—űa»3bKGD˙˙˙ ˝§“ pHYs  šśtIMEß  >–„ IDATxÚěÝ}xUĺťď˙ŹsüéŹ<@¦ PBCÇlńŞI€˘cÂUsô˝¬„ ‰ĹË  aÎH‹­@­txf-f(0zZHR™‘ŕ@ˇ<$ôîxÄ„H,ḷלĂďŹd­¬˝÷Z;{…<’÷ë%Y{gí{Żďşďű»î‡;nŢĽyS¸đgŔ-’Ë×H.\#ą píNŠ ˙¨ŻŻWyy9 G¤¦¦:ţŽär?qîÜ9Ť7Ž‚Đc˘˘˘ô§?ýÉöw,‹ŃOěßżźBĐŁęëëuäČŰß1rą;TĂGĄ t‹KV©ą±%č1$—űˇéO¦jnÎS€nńă¬5:ęBĐcXŕÉe€k$—®‘\¸FrŕÉe€kwRčÎźş K«5flĽÂ"ÂtďŘŃ Đ‹ą<@ĺ|oą›©ĺĎ®éđŘ59yz|l¦voz«KţöŐ+×´ÇŰ?|l¦›©«W®™?kjhVÎ÷–ëÇYkôĆk;ôă¬5Z›łÁńřžŇÔЬݛËcăKżęҲęJóÓëń±™Ş8yŔ-#ą<554ëŇĹ*IRĹÉ &?ů°Z’4ćţř[ţŰűwĽ­ůi‹ŐÜŘâóóŹŰÎ'<2LĂG5~âđďuéb•Â#ĂôDÖcz"ë1M2ŐńřžđńĹ*-ţŢrť(=đ»KŢÖ˛zđˇúÜwn$á»â{XcşÔ–,Ź Ssc‹J[¦D‡dčŐ+×Ú“’]°Ĺť%&_‡ŹŞu;V*<2Ěçç'·&pÓžLŐ /=kţĽ©ˇŮöřžđţáßëę•kJ›•đ»ËłZËŞŹ%pŤď|XěPE 'pËH.@çŰF*Oš6Q'/¨t™ććr¨ÔáýG5wŃS Kc”î˝÷ŹöůySCł^YĽÁv˝ćÁází×+Ě„äÇ«ôă¬öŤŻ^ą¦g­Ń#ÓľŁ•›–I’ůűíĄůćď­Śç˙fť"‡űo=ç…µ{ó[jjhöyýÂ>çdŘúZˇöľm[N‰= —ó—šď˙úO·#ą%éŤ×vH’ţů4˙ýţáßkÁň,Ízöq󸥧´u]ˇĎĆĆzÍţĂđŇÖŞ©ˇY˙üÁ?jíâ >ë;—ŞL‡ö—iÝŻW„ś`6ľó«µ×ôÜôżń)ź…•˝ďîÍo™ĺóöĹ˝úçţQs=%I*đK˛ ˙"ą<ŔŘ­ˇ<˝-‰[şß7YůńĹ*3kÝ ÎHŇ>2í;úEáJsTëđ‘CµrÓ2sé#ůi¬Źüymť¤Öő“ŰFM[ĎiĚŘxźăÍő™Ű–Ź0–đ?Ţ8×…¦u;VjÖłŹkřȡşwěh­Ü´LĂb‡šKRH­ íŇß–)<2L›~»NÓźLŐ˝cGkřȡšţdŞ–ü|ˇyśÁş„Ĺůž“ÝzŇM Ífâř‡?_čł®ő¤´‰ZŃ6jűDé)}lm|ľ«W®iŚ'^›~ű MJ›Ř¶ŚÉDĄµŤümnlqőťK­ ň^zV÷Ž­{ÇŽÖ’×ęÁ‰­ç러6–±ŰśĐiý슶×<‘ő¸ůóÁáš›ó”śř€Ďu€ţŤäňc$W­ŁQ'ĄM4“݇,‰?ăŘa±CÍŃą'/¨âä…G†™ XĆć{'˙Ţü™1W ÜTÎř;ţKoŘŤšv:ŢX†aŇ´‰¶K8ĚÍyĘ=+µŹ¦¶Ž<¶jrHÜ:Ťćm/×öDěÂjjhÖ°] břȡfb÷Ľ%‰mĽWxdĽ”đ:#©ćę;;Ôgů ń}XéÖd·]yçk—x–¤ĂűË–&™›ó”Vä/5“ăčßXsy€1ţ ŢYYŹiëşBŢ_f&B/›ŔYFź8Üşöď¤i×ű}đˇ¤Íľ?3FĎÉTźßŮ$‘­Éh˙ä¦Ýńď·%˛Ťe+üů'wŤĺ7¤öş­çY­¦ĆöѵÖĎn=ÖżüÚ“­íçjŚźĺpNĆyś?ĺ; ŮřŽfe=n[ĆvËQ„ňťĎÍyĘö÷vIjs#ljöÚ=¤¤´Y©:PxP'/čąéŁ'˛37 tÚĽýÉĺĆi4pÚ“©Ú˝ů-UśĽ «W®iřȡć(ÝDK‚ńü©‹’¤IÓľăř7ěÖÔ5“‘~ÉÚ¦†ćöe.,Ł`Ť„¨rÓîxăŘđČ0ŰQ¶vššµu]ˇí Öd«5yú±Í’"ţçkś“uťăIi;,«a–÷3ŘŹŘ”±Órˇ|çNß™ÝňƧża~?~żżwěh­Ű±R_ú•>Ż˝Öşąß¦·”ödŞ,Ď yBô}$—§5”ĄÖuq'M›¨ŇýeÚ_ř¶^xéŮö5w- [sÍć IÂó6ŁŁ+lFöJľËIXŹv٦ťŽ7’­NĎů36$ljhÖ°ŘÖ5–ÇÜŻđÁáććÇÇf”“Óh])0io&Ťc‡=ăuF˛şŁĺ(ě–ßĺ;·.mpŚM"Ůxŕôw‚­ÇśřĐúőá|ť(=Ąý…uţTëúÖ—>¬Öş_Ż Á p› ą<€Ř­ˇl57ç)•î/Óű‡Ż´YíËH¸]Îŕýw[—¨°&Ť$Ş2ňĽSŇŮamc»ă?·)mUqň‚>Ż˝¦{<ńşwěhíŢü–ąň/ W˘´ué˙‘Đç– ¶„Çw?ݦ†f…G†µoVŘÁrNĺŐŃw1Ä>ˇŰÔĐl.)b)Ýľibŕß±®Ëm|Ţ«W®éóÚk ‹h/łIi5)m˘*N^ĐÚś<]jŰtŃiyô/lč7€8Ť6Ě]˝rÍL8ú'Ť‘¸Í~›µöďxŰ)k,a1íź|5FÍ$ťÖ6¶;~ŘČŕŁ_˙ÉŻ´ńĄ_™‰V#qí´LÄťŰţ†ďą^rÚ`Đf=ici kâŮß˙´lBč˙u´E¨#—Ť÷»díóY šß«ńÝXGOű/˙ŃÔĐlž·őóVśĽ g­ŃÖ× ţFâChVÖă>ç€ţŹäňb$Uď 2ęŐŘ|nw[Ń?Á:)­5!»ű—˙+ŕµ'/hëşÖäâ /e™?7FۍƵۜ.ŘÚĆvljŘó§.¬÷üĆk;tőĘ5sů ©=azµ6pÄł±î´ő}ŰËŻýĽ|~nłžô˝cG›‰x#kµń'żRĹÉ Ź ÓKYµŹŘ¶O;­™ÝŃw¦ý;ŢöůÝÇ«ĚďyÉk ۭ͟Á,µŻSm.CbůĽĆůž?uÁ'9m°{ ú7–Ĺ@ŚŃŔÁ““Ň&jXěP}^k¬cě› ś»č)ť(ý˝.]¬RÎ÷–kú¬T 9T'ź27Ç›»č)źMěŚÄâ'Ţjm}­Pá‘aš›ó”ϦwÖD®ÓÚĆNÇ9TOd=¦…µř{Ë5wŃS ¦Ňß–™ Ü›–šÇ§=™ŞŇýe:PxPwčMJűŽ.]¬Ö‰Ă§Tqň‚ĆÜožť˙ůË˙Ą1÷Çë‘ď~G÷Ží¸žô’×jůłk´{ó[şä­VÚ¬T576űś×ş+}–(±[çÚúůťF€űÎŤÄúÖu…şä­Öđ‘CuőĘ5óűúáĎú”çűă¦ćĆ­]ĽAcîŹWsC‹N>ĄđČ0ĄÍj-?ëFŹ÷Ž­'> ó§.襬UÚ¬TMJűŽ®^ą¦Š“TşżLá‘aćfô$—ër­ˇ<+ë1s˛":bp¸~Q¸R_ú•Îźş 7,ŁTÇÜX–Z“ąű ęóÚkÚ_ř¶śř€ćʲ®ŻĂ:ĚN#‡íFőľđŇł’ZG żńÚóçN|@ ^ĘňIĆ&>ô€,ĎŇîÍoiáŰÚ_ř¶ylţoÖéýÿץ«FAIŐĄ§t˘ô”ž×:ĘŰi ŹÄ‡Đş­eeĽĆú^Od=ćs^Ćwd7bŰúůťÖcö÷±ąnrĽćć<Ą¦ĆfźQÔĂb‡ę…—˛ľŻÁáZđŇłÚúÚźó~"ë1˝đŇłĘůŢň¶÷íóş›–ę@áAłLŤrµ~Ă;XÂýÇ7oŢĽI1ô}?űŮĎ´zőjI­#ű¦hÖ‘Äţ›ßŮ1’§ĂGí¶$Łőś:ú;M ÍćĺPĎéă‹Ujnl éóÚ˝®»?ż›ňéč!őŘ1÷ÇŰnŮŃwÝŰźťóă¬5ć Ęű·ÓÔ©SŽaä2:ÍmŇ01Äu‚{ęś"‡»>'7 ĺ®x]o–Ď­$…{â»@ďbC?€k$—®‘\¸FrŕÉe€kwRýĎŐÚk:ę [47µtx Éĺ~¨ô·e*ým ×°,F?E!čqNąIF.÷łfÍŇąsçTUUEačÉÉÉJNN¶ýÝ7oŢĽIÜ`Y €k,‹ť;wNź~ú©«×DGG+))Iaaa¶Ü>,Iš6mtâÄ IҤI“( €ű3â ʏ-\FPÇŹ׎;:őÚ… :®Çr»űőŻm&kjjôü€‹ił^^Ż—ëŕţ €xGÄ#n ˙ĺg?űŮĎ(89wîś*++;őÚo~ó›ňx<ş"¤O?ýTׯ_°‰v\ń€xGÄ#nOŚ\FȢGQLÜź=¦®ćOş~ĺK*?ĆĎxâČőŔőŹńH<Ä#â· ’ËYLÜźkě¤1AŹą¨K6ąě_Śzŕ›’¤ËţH…ŔőŔőŹG€x@<â¶Cr覊`ü_=`ţ› ëë â‘xGÄ#n7¬ąŚ *++Í5—cľĄˇ-‹ńéźtýÓzIRBB€XsąŁŠ`Ä_ UKĂ }y­Ik&q=p=Ä#@<Źń€xÄíä2‚"ą|k ®€xGâ ʏ]‘\FP$—o˝" B aŔőŹńH<Ä#â·#’ËŠär×TT4 ¸â ‰G€x@<âvCrA‘\€ †×@<Ä#ńŹńH<âvBrA‘\îÚŠ€ †×@<Ä#ńŹńH<âvq'Et®"¸ó®;>$LŢ÷«4hđ˙«÷Ćč˙ą»ăúěăkj¸Ö¬đ!aşó®;őź_˙§$™ďýü€Âîç×ĂŕˇáqďĐއëŕţ ŹÄ#@<Ä#ńţŠä2‚ÇűT’ôź_˙§.ž¸dţűáŚDŤ¸7&čű\ţ˙>Ó™w.:ţţĉJHHĐäÉ“)ô>ěřńă]r=|öqť>(:Ďőôxäţ Źýâ ~Üű3Ščőź7txLsĂŤŹ Ł0ČőĘ1¸?Ä#ńŹńH<˘Żbä2‚iÓ¦iĐ Aş~ýşĎĎ˝^Ż>účŁN˝ç}÷ݰ&utt4ë$ő“'OÖŤ7ÔŇŇÂőôx”Äý Ш‰Gô’Ë€Ë Á_g+ŹÇŁôôt ¶7ŘąîĎG€x@Şm9ąüŤ»C ›TÖ6\öò×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\»“"Đ_—ťUyeŤŞkŻ«úł:IRĘŹ’┞:ŽzÁ+E’¤¨0ĺĚIsőÚŞÚ:íúÝqIŇŁă=JťŕąĄs±ľß3˙m˛FÇĆđá¶ŐŮř),>¦ËĽ®QߌVVú 2„ű[WÜź®+Üę\vNĺ•—UQY#IŠŁřŘč®é[m«•ťöę˝3^IŇËŮ·]˝ţŢ™J۲MOIV’g ú,’ËúE#|gÉqUŐÖŮ60$itlڶ®śO#˝v–W}cłĎ™NaŔµGΚ ŕ™©É®: ů{Jµio©$é䮕·|.Őź]×Ú­íťv’˸ťYŻ÷¨Č0y¬STdXH÷üŁgĽJďéňär}c‹r7îSÁŞů·Eĺűrví p]a@«olѦ˝ĄćőëËkţ_RBś^ÎÎPĆÔqÝŇV;zÚk> é+ÉĺúĆ˝RP¤ôÔqťŠiăőů{JËvíÖ"ĄNđhýgM2oÚSŞGÇ'FŹcY }ş3ăĹőZ»µČL,'&Ä)=5Y/gg(eĽGń#˘%µ>éťńâzŁŕ\ńâz=żúźTßxÂ@§,Îl­\\vÖŐkKŽž3ăšF0pkőeöší˝ze§˝ň<±\…%ÔĂp;)÷^6űd†Ä„8Í›99 OV^YŁďçnÖ˛Ť{DŮTŐÖÉóÄr‡Äpčý1ëëSĆ{”2ŢŁôÔdĄŚ÷hHÄ łžť±0OĺŢ˶çńĐÜŐZşqŻę›čסç1r@ź5ăĹő*o™žš¬Ů¶ ¨ü=‡´lă>IRöšíŠŹŤa$‹dp+2¦ŽÓ˛Ť{őeÓ mÚ{8äđEGΚŤ¬ jťStä¬ŠŽśu-ÖÝŽžöŞľ±…/n#őŤ-š±0ĎĽż§§&+oI¦íăÂâcf›0O©ľlĽŃĺ3YR&xô˛úÎrŐź]żĄş/Ď!źţî¶UĎĚBŞolQţžR˝RP¤úĆ=ýŁ_Ę{`]ŔyďôF.č“–nŘkVófNÖ[y9Ž#Ď™®‚•í —˝Çşn«›Ť^^ˇü=‡”:Ác>1·26˘đßptlŚćÍś¬śĚ4ŰJ}Ć‹ë%Ië8[ń±1ĘݸOEegͧřŁcc´ţ‡łÍŃ—ĺŢËze[±O‚;)!Në—d,×QX|L»~w\ާ K3ÍÍŚ÷ŽŠ Óâ9ižŰ3˙m˛ăĆLK7ěŐůŹZGżł%7ŕg’´ëwÇÍť—Ťc:*·¤„8-ž3]ófNć˘ŕ’<Ł?"ZŐź]WÉŃsÚ°43čńÖŘ7sŠccűŐ‚b=ăí’ë.”X1âŃ?Ę˝—•űú>óçe§˝zµ ČgY™Ś©ăôňóéć ‹ť%ÇőJAűzńQ‘aĘH§őKf;vĘN{µio©ĎňQ‘aJ™ŕQNfËý H‡sşľlĽˇŁgĽ]˛<ĆÎ’ă>SuŐ•F|TvÝ6ŢĘ+ktţŁĹŹů˘ô IDAT±ť]ßآ§s7K’YÚÝŚYIţ›•ťöjWÉqźzŮI§¸ńŹée÷šë]†şA°‘Č˙˛íov´ÉমZ˙Ă٦W Š}Ö1÷ß,-”ú(”şĆZß{­]›ĐZż9ëÓ® µ-N›˛oĘßsČü˙Ľ%™®Ú…9™iÚ´·µO¬Nrş†ťîßNmµPę0cßž`I\cSë†çÎuŠQŹXŹÉݸWQ‘aŽ×0ŐźŐuxĚčŘĺ-™­úĆJ±”ÍŚ×Űž‡]ík˙:3µ­­i÷=uTgZűÁFůď,9°üâčŘĄŚ÷č§Ů鎣ߍ{„SxíÖ"ť˙¨&h{Á®˙ěóˇk\Đç9çS‘‡*u‚DZ3X§ôKźJĆZ ­ÝZ¤ť%ÇőĎ÷× jŁb,ݬ±}ŹŞÚ:}?włň–ĚVĘxŹĎşdć߯¬ŃŚ×ëť-ą>çXýŮu•ťöęćM项«ÍFĐAú˛é†ę[´vk‘ŠŽśUÁĘůŽçöčxçNpEeMŔÓm˙źUŐÖŮ–Mą÷˛íç1>Óó«˙IEe­çĆčÓžÜJÓ˛ŤűĚ™Áb×:3Á©ăŕß`÷żîň÷Ň;[rCľîB‰#şM7Ěźł…St䬎žöĘ{`ťr7î ŘÔ¬ľ±E…%ÇT^yY'wŻ ,“=ĄZjłůŤŃ)3îN‰q `Ő|=4wµľlşˇě5Ű•2Áăúľl$zíâŔ©®´Ć‡]ĽŤŽŤn‹yŻíĂ•Ł–‘qĺ•5¶ťĹⲳ*;íŐA>ő uĎ»,:rVY3§$µ­çĽvk‘ĎFJUµuŞ®­“‚$—Ť ‡Ť:Ű®~:ø.Źžńę•‚bŰöä÷s7ëť-ąŞ®­ Z}°ke@'{ővÇM7­őÍ›ëÔăţ×˝ýVTvVŹŽO0۵ťiSş­ŰŃ˝ŠËÚ7^vJ:Éš9Ůlď•Ů· w•·˝µ{śÚjˇÔaů{Z“Ćďüj™í=»čČYÇ奌sĘ[2[‹çL·­űŚŘ°»ţť¤Ś÷č诊ËÎiÓžRĺĚIë ˝=Ýńľávík§‘äĆ(ő¬™SęjëgÍßs(°ÎlKŚ;Ý#üű›Eegmż§{„µ?|ł­kWĆÁľ?ëçëęuŔŃŠe1ô9FŇ31!®K—Uµuš±0ĎLžľśťˇ«ďţľ:µMWßýsttUmťžţŃ/׫Ě}˝5qörv†NîZ©«ďţ VÎ7wđ}Ą X3ćéćÍ›*X9_Ţëä=°Îgôµu€˙g.ݬQbBśĽÖéóË×W§¶)oÉlł‘đôŹ~Ůeeś·d¶Ď“ěy3'ëť-ą>?3Ę­ľ±EC")oÉlłÜ¬źË¨Č1°YG [×S¶‹GŁŃąŘ¦]î˝l6 ‡D RÁĘů>ם1ýżĽ˛ĆíŘ“˛×lWbBśy^'w­4GZŐ7¶čágÖ¨°äRĆ{ôΖ\]}÷ôΖ\s6EyeMŔ:y…ĹÇĚÄrbBśŢ\żH_ťÚ¦ŻNmÓÉ]+Í%˛×lgÉ8cNYîěňÖNyNfšNîZiÖ•F}ç_W&%Äéť-ą>#ŤúdŢĚÉćő+µ&’uŠë×m7ޱŽÔjÝ8j_‡qSXrLK7ěuüĚŻiHÄ ĺd¦éĺě %&Ä}8f—X桺ڲŤűtóćMĺ-™m¶'Ť6ˇÔ:21{ÍvĹŹÖ›ë™ÇXëŁW Š|ŢsíÖ"3‘—žš,ďufĚx¬3×s5ÚĽv÷ë~(ĆýÁ·úĆ3i7–¤ŃËŮćß·¶ĹË+khSöÖ{q’ÍŚĐŽX‡NÉζëŃÚÇxsý"ĹŹîT»'{Ívźe<Śkô«SŰĚ:Ě˙Z´~Ţď·­/mÄ•µN1ÚqË6îSŃ‘łfÝgŤKŁeýYG¬máĄ÷ĘóÄr-۸×Ő˛Nçaí×YűkvźĎh_–S®ĂC[Łżk­3ăGD+=u\[Ů´ď™dŤq˙żQßآµ~÷'cŁBă{1î}_ťÚf¶ŁË+kŻĄŽúĆý­Ł6:Źä2€>+*˘kF-;ëťŔ Ú§C$VŐÖiŮĆ˝Ž ăĽ%łµbA†’<٦¬ô)ćäúĆÝĽyS'wŻRVúŤŽŤ1;űFcß©Ń-µ>ą>´%×gd€ułÂ®ÜĽ!É3ĘgÄhü€‘ßÖr{3/G‹çL7ËÍř\FC¦čČYÇQ ńŇuní`·$†Ńˇ1H‡¶ä*+}ŠĎu÷V^ŽŮ -;ííń MüĎ+É3JŰV=gv„Şj딞š¬Co´ÎRŠ SęŹŢ\żČ|ëňőŤ-ćgNLÓˇ-ą>‰­$Ď(z#×L”Ůuřkťa\+EGÎşŠŹÂâö)¬/gghĂŇL39`Ôw‡Ú:ŞĆ0ăw©<ŠŃ^wőÉčŘsŮÉţÁSÉQßű…‡Úš°ĘhëŔZă&~D´cÜŮM{Kmgç1]Yô mXš© 2tj÷*ÇŰ$–ѓڶ—Ńž´.Q^YŁřŃ:ą{•2¦Ž3ʱÖGţIciĘxOŔ¦lŁcc´ai¦ůţUµu>É7k;/oÉlm[őśy0â-Ř’Ë6î5ßďť-ąZ± ĂüűF[ÜÚ¦äAjď«oşáÓOč Ł>rÉ*Io®_äÓÇČ:N'wŻ2ŻăPŰ=Öë&'3-`3xkVßŘĐßł¶?Ť¸ň©S¶äšçTXr̬űÚ˙FbBk˙ĘÍL–Ś©ă|+őëôÖëî‰Ď롹«;L6;ť‡µ_×Ńç{+/Ç'ÁěÔŻ»yó¦mÉ5ëĚʢ_htlLŔ~*Ö·ţ #IďßWČßÓ^OׄńúÔ ­}ä” łŤŹĆçóď?lXšiö‚µ Đy$—ô­†ŚĄ!;$rP—Ľ§QŃĄ§&;vłŇ§ŞSblHÄ Ű©HÖ‘TFß_(Ť˛<‡µXłŇ§Ť™ť!l–ÖUßń·‚­őşxÎtóÜśFecŕ°&~ś’ZF")=59ŕz/÷^6ł‹çLwlśŻXa^wFgą'“wvqj]ç}…ÍZńŁccĚs¶ŽŢÜYrĚĽď­˛ 1˛… KŃ‘‚UíťTcĆM(¬ËŐ8mÚ”äe&ÜÄž1Şé˝3•>?·.ÉdŚ*˛.Ťĺ/Fg¸čČYËč¦LǸ±®j}°e5oć”fIYËƨ(Ëč.)ăí—złŽ ]<Ç~?Ł]jMčŐ7¶('3Mé©É¶ł†ěŢßúzc„iüh۶°oƽǩ->oćäÚ”NKw çT÷@ň-=5Ův¦HTdĎĚR»-ţ¬×h(uµżWU[çÓţ´‹«¨Č0Ą§Ž šŕ쬬ô):ą{•ćÍśCĺ•5f˛ŮóÄňNő‹Žś5ëÚ`uć¶UĎYęLű~]ĆÔq¶íóúĆ˝śťˇ”ńźŤľťú ţŚu´]N#ÂýűNK¸Xď}NmtÉe}е˛ű˛ńĆ-żźĎć©Á×o¶&ĆěžÖ&%tüşłOöăGD}ĘmŚđ쩝­ťůŽ652’jç?ú” x€Ë:.čŲÓí›kdŮŚZ¶^ßm{ś;$ă:ż=ŇáwëýÁ)–íîÖó6ßú;f ˙ĺ1„8ĹܸS:¸çÉ'§%,ěő˙úţF]cM¤ů×sĆ˝Äú@Ęç^$nŚÍ$™›Ö:ť[0ţ#–ßĚË!±Ś^©küG'†ZףßĘËqŚ™úĆݡ;lg<JŇ–ŽŠ łm+[묎sĆçö…Ţí“u[»1ČődŤPú?Öőˇť»q Zű{Ö~O°:pĂŇLz#WoĺĺtKÝ˝mŐsúüßňőΖ\3QkUU[§çW˙“˛W»[:ĆÁꮨČ0KťY´üśÚ‡ŢČulWŐÖé˛eöžőçFŰ Xťlť ĺÔöýY?_E÷6ô0`Äw°…ő÷_Ú¬»ÝáßčĚšdN'Uµu®7ŐpËšđzĄ (č5cŠ?Ó‹ µ>yĄ HĹeçTßŘâÓŔßiUb׹­·ÇemâGDkHdX—’°ŢĎhK÷’ËúśôÔqÚ´·ÔśrëfDâÝźWRBśćÍśěłv›[)}¬ĂhM’wö3u–u§a #Łcc”žš¬â˛sÚYrĽ}„˘eŤT§MÜ$¬Łz:&şCbBśĎú°ÁDE âBCH±¸bA†–mÜg.ŹĘýüĺěŚë@7łuRĆ{´łä¸™P6ÇC"™ő|ĘxŹ™\–¤"Ë4çÎ&™şb¤±‘g|lŚ2~¬/›nhÁšíú`×Ę™:t…ěŐŰÍ¤Ź‘HN™ŕi]š-!NIžQ*;í őŮŮYyv VÎiäŁqCďÖ!ĆżÎ,SbÝâVűU%§­í˘y3';bđ×ŰíÇü=‡T\vNŢŇ(ä$Ď(ĺ-ÉTvŰrWn}ŮÔ}3ü—Ź1H)›şéwµ,‹eçňŻwř^<ęlč ĎImEa4LB™ÚTßآW·[:‚i Z» Ć|Ä–i€]9J#ĺ•5AGWěÎ4d:łĽOą ^nŮ«·+{ővǤ`g$}Šąľ^qŮYź]¸ł‚Ś&ńŮĽĄim%GŰG4ş¬c\ÜÁ=˘;¤8ltfwŹ›ńâz-۸·Ă¬ VÍ7ăń•‚bŰŽŮčŘłŢí(ň÷Ň÷s7ë•‚"WËgDE†)˝mŁÎ˘#gÍ·v"­÷€ÂâöŽŞÝgĽ¦ŁMë[,Ó˛»¦^_›ö–2ý‚5©”·$ÓqÄ˝SiÄn°¬Öxł˛ÖÓ;¦ËÖn-RöęíćňZč]óf¶·çr_ßň=ż°řyoLďqLökĎřŚ|q}}kű0Řűú×aÖ÷–€=zÚ«§–mÖôÖ‡Ľˇm0S“}ęÖúu–äo¨Ë1Z?_GmČöÍ;“]}–˘#íĺ,±l׾·¶AŢ ˛rą÷˛í5č¦˙đýÜÍ­ł8ŠŹŕ]Śä2€>É:=üágÖtŘyĚÝŘŢŕ™7s˛Ů‰Š 3ź–ď,9îř>ĺŢËć& Öé{ŇÚ­EŽŤ َcm„ř6ĽŽŻíĚ”ŕ$Ď(ł‘¶ëwÇ“e§˝*,9¦BËÓjŔčqg$«†D ş¶ťőÁҫۊŻÝÂâcć5™âhăzvę—ťööĘ5lÍł`Ťóîßů{JŰFž–vŰÚî¸=ËcőĄÓýÜŮňĘÇNW}c‹^)(nY|Ě1aŕ»FRř˝3^3 eíôZwrĎ}}ź¤ÖQHţŁČ¬ë/ÇuTŻf¤vÝşšÖ„ý‚¶ŃŘ@WU[gîŤŕĎX⪪¶NË6îµ=&wă>Ç{×Ĺeç‚¶Ĺ_)(RaÉ16‰î#˘"ĂĚDa}c‹žÎÝÜaRµ°řąlĂA*X5ßńŘ]ż;n{˙´˛«ě÷řŞÚ:ÇDm}c‹r_ßP‡%yFů´?ťٱC=§PÚĘF]˛lăľž­1Ň^.Ö‡łÖ¨ţłqŤMu%}płvk‘ů}teťimk;m¤g +;íµ-ă»»Őţ1‹ĂS@Ü:’Ëú¤Ś©ă̤K}c‹zfŤ˛Wo÷©Ś„°ç‰ĺćT?»uKg¦™ꌅyŁŚĘN{5cažŮZ‘ťŃ+źyÓŢŇ€{Ń‘łf-1!ÎlÜ·w°“ÍϰiŹďnö›ö”*{ÍvÇš­ŠËÎęčß ß(ÇúĆ=üĚš€'Ýe§˝z:włYn/÷Rąˇo2‰ĺ•5Ú´÷°ŮîqÝUŐÖiĆ‹ë:Ćum4¬C]WĎčŘVŐÖ){µo2¨¸ě¬y-÷´Ń±1ĘÉloPűćÖd^‘ŮHďaJ1:ŹÓ;ůµxN{]™˝&pä “Fěř×µÖ‘CĆŇţI#)\nٹ޿žb•l}M°¸ů~îfź6âĆH¤Ś÷té¦MŁcc|’mN‡ľÂ:íüŐ‚bۤĎ_-Ě ú`Čhsćď)Ő÷s7ë诪jëtôLkÝUdTrŢ’ŮflÎXgۦ4Úâ’hSö±>™qż-ݬŃCϬѲŤ{}îď­›6źŐŚ×›m4Łžčh‰3»vŹőgˇîG±xNšyť/۸ϱkŚ4űýůýÜÍç”˝z»Yw9]ź»JŽŰÖ}N˘"Ă|>_öšíšńâzíl{ă|ŠËÎ*{őv=ôĚÇ:Ř:0Şřč9ź~]TdYg•WÖ|>I>mÍÄ„8WK ř·ě@í,9ĐÖ¶ŢoĎ™nÉ^ł]Ë6î5ď1Ƶl¦őű{ř™5Çî,9n&§‡D 2ű)č:¬ą  ĎÚ¶ę9 ‰ł¬ż|,hĂ51!N‡¶äŚ:66?X¶qŻŮ`1¦ßTvݬ\‡D RŢ’Ě.yÝC")O©ň÷”*u‚ÇçÜâGDkŰĘŔ'˙+˛3tô´W_6ÝĐŇŤ{µtă^ŤŽŤ řLŮ#"ŤŤ×Ę+k4ý…Ö \ĽÖitŰšx+ç›kd~?wł˘"Ă””çsn’ôf^ë[! ůblc\+ˇ4ä2¦ŽSŢ’ŮZ¶qźĘ+käybą’âćł|Lüh˝ą~QČł ĎIÓÎ’cú˛é†y/IJóŮxÄř»=mĂŇLUÖ:b¬ě´Wž'–›÷(ëgNLÓ›y‹¸¸Đ)«ć롹«őeÓ ÇNîˇ-ąćNôk·iíÖ"ĄNđ´.?aŮź“™¬µ&ŻŚdkĘxŹ˝‘k{_đďŚútP Ú˙í4;aĂŇL}ŮÔ˘ť%ÇUtä¬ŠŽś5ĽôDܬXˇ˘˛łŞ¨¬Ń¦˝ĄĘ:Ž?賲ҧ(o©**kTXrLGĎx͵¶é¬ő`umťdষ­zNÓ_\ŻŠĘ3ćüŰá’l7ëJňŚ hSJ hďJ­›ţѦě[6,ÍTRBśŮź0ú+NâGD«`ŐsŢŰÚaž'–ŰÖ5yKf‡ü`0*2Lo®_P‡#z­ď;oćdsFʵý™“™¦M{Kęk˘rŢĚÉ>‰WëaŁ}é_÷u›’̲-;í šD5úvvĺbôm­ńůŐ©mfťUýYťOťi׾NLÓ›ëÝ×™F;»úłëĘßSŞâ˛s÷!Ě26ľë5b´A**klݱy3'«şöşíčgë÷głqŹ˘1ŚÎ˛.Źá$É3J•EżĐĽ™“ÍQĚÖ%câGD«`ĺ|ŰŤ‡FÇƨ`ĺ|źŮ2vë;[cĐn4uęŹůC"Ťóm«žó‰ŁS^ßآřŃz9;#hŰŕVYú˛<ú:»v°Ń¦›7s˛ĽÖůŚ46â5DE†éÔîU*X9ß'v[g ¶µĂ#ścÍżMéßŢMLÓ;˝Ü‡‚~Ţë|ę‰ q*X9_'wŻ ©o·$S/gghHÄ €şćÍő‹flvÄ®+ݬńy߼%ł×ް4So®_P§{mTdX@ÝwôŚ·ËËÖh žÜ˝Ę1F¬çn¶u-ٍýëLkűÚ¨3mÉíÔĂ#ąoť)čŹ9ą{•6,Í4?Ł˙ěĆCîĽ%ł}Öj7®«m«ž3ËvÍ&ŹFŢŔx­Ńomź •¬C[r{m Ůí7oޤस¸X%%%’$Ď#Ł5vŇ Ç_Éx˛\U[g&xÝś›őunËĹ(“`Ż/•[OY\pŇü˙7ŢxxěÖëÚHÁqÔd}c‹*>jMZGҦĽ=ue2ŇÍő`]­;űŤť©SşŞşŐ˛ Ą<»łÎěĚ{/ݰWwÜ|Źëň9Q‘hô1,‹lEE†©°äąf˛ťť%ÇĚß‘8Fw©0—¸ qx;©ţ¬Nů{J•űú>Ç{ڱ!źdżŮ/z#—€­ŚÔds)‚‡źYŁ—ł3­QߌÖĺ?^×Îâăć¦Űé©É!o„ÂoÝĚ->6š‚ą­î1ăT\vNUµuz:włĎIÓ¨o¶~Ç—˙ŘşI ±ĚINfŁ’ű ’ËŔV’g” VÎWöšíŞŞ­Óó«˙Éö¸ôÔdÇÍŇ€ÎĘßSj>Ü0Ě›ÉĆŹ·“¬ô):zĆ«ť%Ç}6Rô7oćdŰM…ŃűH.@/Šíłk6Đ×dĄOQĘŹ^)(RyeŤąűŃ_7jX¤ćM»ßöwŐWµëÝuôüeľvP˙ňę,  ‡ä2€[¶»­óś“‘¤W÷śŇîw˝!uvď‰ŃżţüI‡ĆAf˙ü *>©Ó ˙®Rx ŹńH<ťTňÁ'’¤ż{ţ/ő_ş_»ßő†”ĚŠ©źÎyČń÷Ź>«ţţ]=EĹď_˛ťQ€xngF¸ĺ—®©üRť†„ߥEéI~—Žžż˘ňK×né}ă‡ÖżĽ:KCÂď’$·5@ŹńH<ně<|QőÍ_)ńžĄ<8RنEšł nŐĽicÍ™źÔQŘń 8$—ÜcJSúĂcq·Ňăóó[q·Ů8¨ľÚ@aÄ#@<Ź€k»ßőJ’ži›NźÓ¶yńó[e<ě©oúŠÂG`Ŕ!ą ŕ–SšŚĆAú#÷?ďŠ ˝ˇĺkł# €xGâpŁújŽžżŇ‡ßăóß®M I—?o$â°H.č4cJÓ¨a‘ćŽô鏌1§6pé–ŢßR,µnŢ€xGâpĂŘXsćĂ÷ë“ǬGż+éÖgĽşç¤ŞŰ’Y3Ű’dG` aC?ťfL]ňß±wŢ´űőęžSúeqE§v±ŻľÚ ÷ţP«W÷ś”Ô:µÉN €xGâծíÉ,cö€á™i÷ë˝?Ôšł śF9^ţĽŃŚ9%|b>č™ůđ=J3”G`Ŕ!ą  S¬Sšžů®oçů™ď¶vžË/Őéčů+ć¨-ďýˇVa›ţť!áwé_^ťĹ´&€xGâpĄřýKŞţĽQCÂď x 3oÚXýhŰż›ł śřTިW÷ś úwĄ'ę§s˘ŔâH.čcJӣߎ5§4Ś©MďýˇV»čŘyćŃoÇ*ńžĺd$Ľ?â ‰G #Ĺď·®}~·íhǨđ»őeó×AgŚ0 Áú»”oŹ$âĐH.č\çąmJSGŁ«v˝űˇţîůż´Yőč·cőŻ?’ÂG€x$.Ußô•ů°§ŁŃŽÁfÄ‹d$@<‚ä2׌)MFŘIĹ'uú˛ůkíz÷Cĺd$QpńŹÄ#Đ#ŚDÖ¨a‘Úúß§9·ŕďëň獝žM€x:’Ë:ŃynťŇôĚwď×Öż Ň8ř‡µëÝµą¨śÎ3@<Ä#ńôÝmɬô‡ď š¤ĘÉHŇŹ¶ý{ĐŮGÎţŚ"ŕ†uJ“˙.żţži[«úóFss#Ä#@<Ź@w*żtMĺ—ę$©Ă8ÖŤ7Ť@<Ée®•üđ»”ţȠǦ<8RنEJ’6•SxńŹÄ#Đí6UH’ď‰épsݍ»5óá{Ú^G<Ä#·H.pĄ}JÓŽ7žN—|đ‰ŞŻ6P€ńŹÄ#Đ­J>h[˘fÚý!?ŹŮń ÓXs@ȬSše$†ôšô‡ďŃŹ¶ý»¤ÖQ]ěî ŹńH<ÝĄúj‡Ö)öAăń‘1ú霉’¤/›ż’$=úí‘ú陳 :ĂxĎř[x€x$ľŽä2€%ŤŞ–˘E®^?|pŔk~:çˇ[îD»=€x$â¸ýĹÜ©8ňMĘ#n<Ö™÷G⸱,Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.\#ą pŤä2Ŕ5’Ë×H.ŕ˙gďţă¬ď<Ád›+6’ĺ-lu¶c9 E5»- W,×ün†µ $łŞFr {S$ˇăTČíĆkyKČli,WH\·3)ZsµŮ'fÍY°<ň΄Y$2[Y±R°ť:%V¤ÝâyčnuËjYňéőŞJĹýôÓĎóô÷ŃCwżűÓź/@ل˔M¸ \§FÇ˙\±çŮ3±iíÂX|ŮĹ1&p!<őň{Ĺëc®_ź‰Řý_ŢŽŠ _ţŕzČ%\†y¦ďäXôť|÷˘:¦ź/~rÜąÁőčŘŔßü™÷"˝rF€ˇŠŠŠs}@żE‡y`ůňĺÝ1]uEE\uţě·¶¶Ö óęz›pmşq=ž˙ë/÷ÚK,ŻŹĚ'™L&jjj.úăĽĐÁrMMMd20¸/®G§rćÉ›ĘĘʸ űŢŢŢĽĐ÷n\ß>ňnĽ‘3X}}ýű»|ůroó×c®žžžLoß~ÝÂřÔęŠ ×ĺ§?ýé¸âŠ+\ʏgČččh>|8Îś9^q÷Ť‹˘ď˙ďí1`­\ą2n¸á† >N®G·ÚÚÚřć7żyQËÓO?ű÷ďOo˙Ń ‹âďŢŤ<1ţÚ¸xńâŘąsçEYŔsízLŚŚŚÄľ}ű""âž{î‰ĘĘJ' .Âe*++#“É\†ÝÝÝńř㏧·“`yńeăU÷n\dőööF&“‰††'×ă,:xđ`^°|ăę±yÝ‚˘×ĺŹüăرc‡ĘE\Ź3ôḽ˝=/Xľ·aQÔ,Žt‚Ű$`>~üxś˛öďßďu.0á20cžĎ5XžěĂ´€¦˙ˇ9›ÍćË÷6˙©ďd>U» /`>räź"B™ŻŤ…“g–óšh‚Mgëł\ŠţË0»rßż^uEEŢł·^“˙YŃ{R¸°„ËŔŚx>—`ąđĂ´€¦odd$ÚŰŰÓ Ä’`ąŘbSń©Úy=`Ź9˘R J8zôč„×Ć;®›Ţ[ďblfłYíiŕMµĎr)ú/Ăě]›…í0 ?[Ţ•ůŕ‹×ÁÁÁĽ*gŕü.çd6‚ĺĽÓf¶Ü`9"âŽëN;XÎ}#ź0?óĚ3®I(0000ágöçúÚX8Á¦ţçpî×i9}–'{],ěżěş„s»6sâ›7,(úţőę‚jćÇÜŻzŕ.Ó6›ÁrBŔ Ó“Ífó‚ĺĎ×/śŇbSý ť[©ĺš„üĹą=Η-Ž{mĽúŠŠ¸ăşüţçą!605Óéł<Ů{ŐÂţËć%€s{[ŞFˇÍëÄúšüÇçźp–ó,çľi0CyoĘs'›É`9qWfâ5yôčQĎĽV¬Çy9“gNEa˙ó—^zɇi(Ótű,—RŘůČ‘#ާÂ4ŐÄ›PžÜ/I××Ĥí0Š˝&ć¶ÇčěětíÁy$\¦¬0XŽŘ°Ľ"ţ¶,žzů˝Ľ˙őť|ďś÷×wň˝ ŰýŰţ±Ř°<˙C€€&^ź_Q1«Ár"é5™üqtt4öíŰç =óîqaŹó™ ¬J)ś`s˙ţý^ˇ„™ěł<Ůkâ]™…y?Ń×Î.·ƇŤ·ş(WîűŃ“'OćM Ě®E†ŞboŽżV:DţćÍÓ®h}g,:ňnYÇvÍ5×ÄňĺËť(杣GŹNřEÁů–W_Q÷6,Šowź‰_źCßŢŢ;věĘĘJ'9­°ÇůUK#ŢüuÄS/çż>^µ4â+§_×1úÎXü㉱85šżüź|d⮕••‘Édś(¸6f˛Ďňd݉w\·0ľß;ţ>öČ‘#ń±Ź},ś(ń>6÷—w·^łpZ­jĆżÜYß{ţLDŚ·ÇČd2qÍ5×deÂe`Öüě—cQ·ĽbÚŹÎn`` Ż+"âŤ_ŽĹ˙ţÔ™ ë~xŃřĎËů™aˇď=&~rüě×çŕŕ`´··ÇÎť;ť$ćôâÜ`9"âŤÓo-ţĺčżýÍÓîíú÷cńčźÚ—®Ůl6ľńŤořrŢwđŕÁYéł\ʧjÄË'ÇâÇă_2íßż?jkkٶ¶ÖÉ€###yëkĆß«N×'VVÄÇWTÄ?žŻÚŮŮ_˙ú×˝Â,.S¶cÇŽĽ7ćĹtwwÇ©S§ftż555g­öČd2Ş–™—zzzbtttJëţúĚřŻ ¦.÷ť|oJÁrbpp0Ž=Şb„9«Ü«§FŢ‹šĹÓűĐüâ±w 8LCaUäÍś—¶5w\· ~ö˱xă—ci˙eżč|ĚűEÁtÚaş+ł0v?ýÁŻé:;;ăž{î1Ř0‹„ËŔ”MĄââčŃŁ3./_ľ•ËĚŞ‘‘‘Čfłé폯¨Ő 6ë–/OŻý`űąÓŔĚQą Ŕ¬ęěěĚëţŹ'ƢőńwňÖąjiÄÝźş¬ěÉý¦2ąćččhdłŮرc‡“3Hĺ2łŞ§§ç¬ëĽq:â'Çß+{Ű?9>µÉ5_zéĄp2` —UőőőSZoäí±˛·}rdjŹŮ°aCÔÖÖ:0´Ĺ`V}á _(yßÁăńÇź‘ýüŢďý^ÜvŰmΕ˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @Ů[844˝˝˝±yóćłnäđáĂQ__ŐŐŐłr É1UUUE&“ɻݫ«+=†ęęęŘşukTWW—\˙BJŽs*ă p±*.8p ¶mŰ?üp´´´”Ü@WWWlٲ%""ĆĆĆÎů€†††bűöíqß}÷ĺ…±{÷îhnnŽl6›.okk‹Ý»wçmcÍš5ŃÓÓStýóeďŢ˝166­­­é˛ţţţhllś±±¸PжĹčééI˙]Ü–Z·ľľţś¦§§'Ö­[Ź>úč„jăd?…Ë÷îÝÍÍÍń裏Ʈ]»"“É”\˙|زeK´¶¶–|Ş–€K]ŃĘĺ$]łfMô÷÷GWWWZq[jÝR÷—٧§'†††Š†Ż(ą~UUUZťÜÔÔTrýóĄ««+"&ŰMMM*–€9ˇhĺrŇ8ié0YőrŇ›y&*„Ë ŞK…¸RrLkÖ¬™µţÓÚ„Ęĺ$Ť—ŰÚÚ˘««+úűűcíÚµyë MÚ~˘««+{ě±tťęęęhjjŠććć ë>|8{챬“~Ék×®Ťžžžxě±ÇbÍš5ŃŇŇ’®źkEEEěŢ˝;˝żpýÂcîě쌮®®ŠńŠâűč%ëçVBg2™¸ďľűňĆŁżż?:;;ÓçZxL„ô»víš°źdÉ1­]»6š››‹íÉľęë룩©)úűűc÷îÝŃßßÖçÓŐŐťťťéşĄÎ Ŕd&„Ë…}[ZZbďŢ˝ŃÖÖ6abĽÜŢĚ…áňöíŰŁŁŁ#""ŞŞŞ˘şş:^ýő8pŕ@tttġC‡ŇĘŢŽŽŽ4XŽoiqŕŔŘşukz{÷îÝi`ÚÖÖ–VWGڦ]]]±uëÖhii™°~îńnٲ% psźÍfóŽ)""›Í¦®ßŃŃ{öěI«»{zz˘­­-]§żż?ÚÚÚŇ 9ążŞŞ*/\Š-[¶äµ"JŹi×®]yŰMĆ#Ůvooď„ű»şşâµ×^KÇ?ŃÚÚšö¨®ŻŻŹţţţřĹ/~®®®xřá‡]Ŕ”Lh‹‘„śIUnžvvv e#&NP·m۶ččč5kÖġC‡bhh(úűűă…^úúú AěâСC1DŹŤŤĹŘŘXX¶żčęęĘë]üć›oĆŘŘXZ]\¬]Fn°Ľk×®ô1ĄŽ©««+ –÷ěŮ“®?66{öě‰ń=·Zxll,·C‡ĹŘŘXşÍbŢąÁňćÍ›ăµ×^‹ţţţŠG}4ŞŞŞb÷îÝyŐäąŰzě±ÇbĎž=ńđĂO8¶$DÎ}ĚŢ˝{Łľľ>^{íµ´_őˇC‡ŇžŐ…_”2!\Nz('íÖ®]›VV L“ŠŰŞŞŞ8pŕ@^[‡L&“nŁXř™»ßbÇT¸źâ˝Ť‹­ż}űöŠűî»/ÚÚÚŇÇd2™4έ†ÎmaŃÚÚš·ŹÖÖÖ4POÂĺHCôbĎ#9ŢÜĺmmmi°ÜŐŐ•×fŁ©©) ö Ç=9Î$$Îmý‘<¦PĽ755ĺí§±±1}La PJ^¸\އr>Â…Atġl[[[Ń>ĚŤŤŤQUUůaf©ŢÍI…mUUUŃpy*ë÷ôôDWWWTUUMh!‘lcĎž=iŐoÄxĺńkŻ˝V2¬}ýő×',KžC}}ý„ű ďţţţt÷Ňwîq%Ď/©$®ŻŻź;…Ű©ŞŞ*:ĆĄĆĄĄĄ%˛ŮlôööĆ–-[˘±±1ZZZ˘ąą9Ş««‹VŚ”’Wą\އrD¤U­ťťť‘r&áhnĺn±7QXő[,¨N”j—Q¬jşÔú“µÜ(eűöí±lٲ´mD[[[´µµĹŁŹ>šö{.lÉQ*\.ÖÂ#YÖÔÔTňr'ů+ÜV©çRęąVWWGWWW477§Űiii‰uëÖi‡”-/\.Uő1‚VUUĄU»Ą&Í‹Iĺţţţ }‰' ĄKM„WŞ·q±ő“0»T…pˇÖÖÖčč說ŞŘłgOÚ"cll,úűűÓ}–x>‡łSá䊹ËJ…ËĹúM'Ş««#›ÍĆkŻ˝»víŠŞŞŞčďďŹ-[¶€˛ä…Ëg &“6 V5pÄ-*’I#&o'‘´Ë(bëm\lýÜI÷ ĹöíŰc÷îÝihťôBîęęŠÖÖÖhllĚ x“Č…űHă©¶đHö_JR%ž[Ý<Ů9ĘÝWr^úűűc÷îÝyý˛×®]mmmŃßßźŽˇ~Ë@9ŇpąXEqˇ¤/qgggZ \Ř›9˘řdwÉ>’3·_đŮú'ç¶Ţ|2żbë'j± ·ŁŁ#:::âСCQ]]ťD pKő[Nö]¬ĄH±~Ň…­D %~noĺłťŁbí7úűűŁ­­­dOéŽŽŽ’cPJ.— fs­]»6¶nÝ===E+—ÓV Ih™ŠŰoż=†††bëÖ­SŞĆ-Ő_řl˝Ť ×Oö•[˝›<çdYRą[,0ÎÝţí·ßž÷|K­›űĽ‹ÂI`ÜŮŮYt?۶mKŹ+ Š'«Řνż0𯪪ŠpN">X°T%4@1Âĺł…Śą°…ÚE|Đż.Z  IDATöbűöí±eË–Ř˝{wl۶-Ö­[===±yóćtťD2A޶mŰňú˙–:¦rz'Çśô‹ľţúëÓcşţúëchh(~řát[ŐŐŐiËŽ-[¶¤-3nżýöزeK^;Źbz{{cË–-é8• „ÓÉő®żţúضm[Ţ~†††b×®]yă}¶sTęţ$TÎ='»wďŽ-[¶ÄŢ˝{ŁŞŞJ[  ,‹’”Şú-ÔŘŘkÖ¬‰×_˝äćŠÖÖÖčęęĘkŐ°gĎžĽv‰ŽŽŽhiiIŰ>ěŮł'"Їȓő6.:WWWGOOO´´´ÄáÇóßŽŽŽ ëgłŮhkk‹˝{÷¦Ál}}}<účŁŃÔÔ_üâŃÓÓ“Cň|>]]]i{ŚÉúIgłŮČd2ŃŃŃ‘¸oŢĽ9ÚÚÚJVl—:GĹÚoDŚWI'}–sĎIUUU477G[[Ű”';(.OĹdäEڇ›I€›Ŕ–ŇÔÔT´çď /Ľ0aŮÚµkÓJ穬źű¸ä9vuuM˘'˝;::бc­®®.:†mmm“V·¶¶FkkkÚ>c˛±:Ű9zíµ×&='Éă“ó'P¦kŃlďŕběĺ{¶ęě qüŐŐŐçm_Beŕ\-0”K¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔M¸ @ل˔m‘!¦jdd$']gtttĆ÷;::/˝ôҤë¬^˝:*++ť$€óD¸ LÉČČHüëýŻg%<>›ÁÁÁhooźtťĺË—Ç7ľń ' ŕ<Ń’ŃŃѲ嫯¨öţj*ËűĎÓÉ“'ť$€óHĺ20%Ë—/ŹćććčěěĚ[~Ő±xŃŘ„ő˙yíÂX|Ů9„Ë‹#nżnaĽxěÝ ÷Ťž©7~™żĎććf' ŕ<.SÖĐĐ‘0ŹŤĹÝ7.:§ ą”ÍëÄćuůĚŁďŚĹ·»Ďä-knnNŹ €óC[  , yUÂośŽřv÷™}glÖ÷ťËośţ`™`ŕÂ.e»ł`ŕâ"\¦ĺ|Ě‚e€‹Źp¶ó0 –.NÂeŕśĚfŔ,X¸x —s6ł`ŕâ&\fÄLĚ‚e€‹źp130 –. Âe`FťKŔ,X¸t—7ť€Y° pi.ł˘ś€Y° pé.łf*ł`ŕŇ´ČŔĚ8ú_űăčí7’¸łł3">ďm˙ĎŹ`ŕŇ$\f]©€9ůwB° péĐÎA&“‰Ĺ‹—ý¸Ĺ‹G&“™WcU¬E†`ŕŇĄrÎAmmmttt)*¬`N–™ëFFFâäÉ“Q[[k03„ËŔyU0 –™ë˘˝˝=FGGăž{î™wżZ`î.ç]CCC,_ľ<""®ąćÂśŐÓÓŮl6FGG#"˘»»[¸ Ŕś!\.ˇ2s]ww÷„0###€9Ă„~0ĂŠË0ר\€”ÍfăČ‘#éí/ŠřőăŔÜŁrfHa°|ŐqWĆ÷¸ĚM>ńŔ9‰ÎÎÎčééI—ݸzAÜqÝ‚řŮ/Ç s’pÎÁČČH´··Çŕŕ`şěĆŐ â®ĚÂ÷o —›„ËP đgíŔܰ|ůň¸çž{˘¶¶vƶY,XţôÚŠ¸ăă 8sžp –an:yňdôôôĚX¸<00űöí‹“'O¦Ë>_ż0>U[z:ÁÁÁhoow2„ËP¦hooŹŃŃŃtŮŮ‚ĺŃŃŃx饗 s‚p&‘ů‡˙fŕwlŐŞ8qŐŞŰŢŃŁGcßľ}i°üáEw߸0ę––k*DÄ»Íx||E…? f„p¦¨»»;:;;ÓŰ^qoâ¸úŠŇmÍâű›—Ĺ©‘÷.ŠçP*€r —` ĺe‹#îľqň`9Qł8˘f±P€ąE¸ g±˙ţxúé§ÓŰW]Q÷n\‹/Ób€ůK¸ “ČfłqäČ‘ô¶`Ć — ‘‘‘Řż^°|ăęqÇu ËÂe`dd$ÚŰŰcpp0]văęqWfˇÁ€÷ — ‡`™sŃÓÓŰ·o/y&“‰ęęęČd2±uëÖÝg}}}ttt¤Ë·lŮ?üp¬]»6"ĆŰĽtvvFsss´´´8aŔ9.@ާź~:/Xľýş…±yÝĂ”tuuEWWפ÷'šššâŃG=ç}8p şşşbÍš5鲞žžt_I°1.>|8š››ť,ŕś — GmmmŢí'ŽľuË+âę+ôYćězzz""bëÖ­ŃÚÚZtťÄŢ˝{ăŔ‘ÍfĎą‚8 ‘3™Ě„ăŘĽysŢşŐŐŐ±yóćĽu¦K¸ 92™L477GgggDDüúLÄ·»ĎÄ˝ ‹ĚśŐáÇ#"˘±±1‹®ÓŘŘCCCŃŮŮ9#ároooú·›HÂĺÂůŔN0cüÎ 444Ä׿ţőXĽxqD|0˙äřÁˇ¤ˇˇˇčďďŹ8kepr Óßßź†Ő“­344‘f'ár©€űlr·;•çťě_„ËPDmmměر#/`ţŢógâďŢ38•°ž-ÔM‚ŰúúúĽĺýýý±m۶X¶lY¬[·.Ł˘˘"¶mŰV4ěMöY¸ť$”Î ą[[[Ł˘˘"ÚÚÚň_QQ‘ďîÝ»cÝşu±nÝşôJÇ믿>–-[–ţ2™aKKKTTT¨”€9N[ (! łŮl:Éß÷{ߍOŐú~–|Iďă ·¤íJa+‹-[¶ÄĐĐPÔ××Ç}÷Ýă“đełŮčęęŠ^x!Ş««ó‘f'ÇQUU•7™_±VąË¶lŮ/ĽđBÚ&›ÍFÜ~űíńÚkŻĺ˙îݻӺąą9Ş««Ł««+:::bll¬h«`î.Ŕ$’€ą˝˝=/`~ůä{qWĆË(J+ЎˇˇŘľ}{Ú>#™ôŻżż? –wíÚ•W]ÜÚÚMMMqřđáhmmŤl6›Ţ7Ůd~…Án±jćdÝ˝{÷F}}}ôôô¤tkkk,[¶,úűűŁżż?]~ŕŔhkk‹ŞŞŞčęęĘŰ^KKKěÝ»7"&†ŰŔÜăS1śEeeĺ„€ůÇcqFŔL* o{{{c÷îÝîďď®®4X~řá‡Óđµ­­-†††bëÖ­yÁrDDuuu´µµĹ–-[â±Ç+şĎbq±Ě…oňř5kÖDWWW^UtîżCCC±m۶™ ěŽŽŽ´*{şýž€K‡OÄ0IŔĽ˙ţ8räHD|0ßqÝÂX|YEÉÇ~ďy“ÎuąŕuuuĄĹŬYł&:::˘©©)}lČvtt}LÔćö]Îí…śň&-)еĘ( |“m´µµM““<"Ň@:›Í¦m;Š…ÇŐŐŐ±yóć8|ř°–0—`Š*++ŁĄĄ%""/`ţŮ/ÎÄ˝ ‹ŠĚ}'ß›“ÁreeĄ?ąAď®]»Š®łvíÚČd2B×Ü^Íĺ´‘HöąyóćtŮĐĐĐY{+î·ŞŞ*ý».v\ąŰO&č+¶~!•Ë0÷ — L---±xńâxć™g""âŤÓßî.0ĎE ţrä˝…m-Î&©ž,ŚÍ­".Üg±ök֬ɫD.VÍ|¶ŃĹé×_}²B&ó€ůC¸ Óđűż˙űQ[[›¶3xătÄżűŰ3ńG7.Š«Ż(0oذ!ľň•ŻĽ9¨Tۉ©HBÜb=ŽIĹp}}ý„Ç«F. v§ZÍ|¶Ç ą ŹshhhB¸ ĚM LOCCC477§·OŤŽW0˙ě—ú+Ď7Ĺ&Ö›Şä1ąý” íÝ»7"ňŰQL6™_±Ŕy*ŐĚĹžSîýkÖ¬™ôX“ăTµ ópÎACCC|ůË_ŽĹ‹GDÄŻĎŚĚ}'ß38óD©‰ő¦*éłüŘcŹ˝ż­­-úűűcÍš5i¸śŰ/9·Oód“ůĺ[©ŢĚ…Ď©pűÉv“9W6›=§ nŕŇ#\€stÍ5×ÄŽ;ň懎Ľ? `žJ±SŐÔÔUUUŃßß۶mK«‚‡††b÷îݱ{÷îčččH+Ź‹ĂCCCEű7—Ó›ąđţÂŕ9é'ÝŐŐ·ß~{>|8>۶m‹m۶Ąë©\€ůA¸ 3 ¶¶6věŘ555é˛ď÷ľ'`žóÎÖ»řlŞ««Ł««+ŞŞŞ"›ÍƲeËâú믏eË–E[[[TUUšC‡˘©©iÂ>‹U(çöeŽ(>Á^©ŢĚ“m?bĽĘúá‡ŽŞŞŞ8pŕ@466Fccc<ú裱k×®t=•Ë0?ĐfHmmměÜą3ÚŰŰcpp0""~<¨˙ň\—Édb×®]ç¨f2™čďďŹl6›NŢ·uëÖhjjЦ¦¦ ŐĹŤŤŤ±víÚĽĚŐŐŐ±k×® ŐÓI_đÜă;Ű1'÷çÚ‰–––hllL'ď[»vm455ťtŰ„Ë0*++cÇŽy3s[nŔ{.Ş««Łµµ5Z[[§µĎ¤Š¸PŇĘ"WZ—łýţţţxýő×ŁŞŞ*2™Ě„ăLÂeUË0h‹3, Up2—dłŮhll,~÷÷÷§N%ćá2Ě‚ĘĘĘřÂľ7nL—ŐÖÖ.YIEňáÇcűöíéd~Ű·oŹëŻż>""î»ďľiMj\š´Ĺ€YÔŇŇűŘÇbtt4®ĽňʉĘĘJĂ%§±±1~řáضm[tttDGGGŢý÷Ýwß„eŔÜ&\€Y–ÉdŇĚűŘÇbÇŽ…KR2™_WWWô÷÷GÄxŻč¦¦&Ë0 —`ŤŚŚäMî÷ŇK/.ik×®ť±I €K›pfÉŔŔ@´··Çčč¨Á`Î1ˇĚÁ2sťĘeaÝÝݱ˙ţ4Xţđ˘_ź1.Ě-*—`uwwGggg^°|oďr{|Ú€ňřăŹÇÁÓŰW]Qw߸(jćá2Ě€l6GŽIo_uEEÜ»qa,ľĚŘ07 —ŕËëk"îľqa,ľ¬˘čúŹ?ţ¸AyâÔ©S€9K¸ Ó422ííí188.»qő‚¸+łpŇÇĺ¶Î€K• ý`Ę –Żş˘Â Áťťť10˙»ż=t㢸úŠ ŔĽ$\†KD励"“™Ňş+2u·ŢźlnŽĂ;wĆ©ľľY?ŽÉŽ/Y~Ľ§×‰Ľ\{çgâÍÍń·;˙­p9GCCCDD0źŻ`ľ·AŔ Ŕü$\†KĐ+O>ĂÇO˝oÉĘQ»iS|hÉ’X˛jeüÖž?ŹÇ>WĽuú´ă¬Vf2qĂ—ľd JhhhĘĘĘČfł1::ż>30ß•YźX)``~.Ă%čŐ'źŠă==%ďż|éwcă×îŹÚ›nŠË—.ŤO47Çó=tÁŽ÷˙jÜâ¤1gd2™Ř±cG´··§ó÷ž?źŻ_źŞ-=•Á©Ńź/~}Ćĺ{sÔpń.ĂôÖéÓqäÁoEíÁ˙µ7ÝtAĂekjkkcÇŽńÝď~7Nť:ßď}7"˘dŔü—˙NĽáĚ!ÂeŁŢ:}:NôöĆŠúúX˛jeÉőjęę˘vÓMéí·‡‡cŕąîíµ»˘ľ>""~őóźçm7Y>ôę«ńÖéÓqůŇĄ±ú¦›bÉĘeKá㆏źÁçž‹·NźŽ%«VĹG®Ľ2ŢůŐŻ¦Ý{zE}}¬Ľ>żźtî>Îvl˝ůwâCK–Lx\M]]\ö‘ŹL—\KV­?‡ď?·gź+ů\._ş4Ş?úŃ8ŃŰ[ôźzą/ž{®čă–Ő­O—-«[cccEÇ®Řß͉žŢíď}±«­­Ťť;wF{{{ FÄxŔüňÉ÷â®ĚÄ—WÁ20S*++ á2ĚS5uuqĂ—ľXtľľôĄč{âÉřoßýîŚôjţť˝Ń›íŚłŮ Ë˙¦u{,Yµ*ţéî‰Ë—.ťp,/fłŃ›í,şíş[o-ú¸·NźŽ˙öÝ}QąbEÔ·4ljŢŢřŃ}­ewí¦MqĂżX2śëôé8úČ#%Źí†/~1>zËÍEŹíČ·ţ,®ýěť±˘ľ~¸Dڇ˝źřĂ?Śk?{ç„í~˛Ą%NôôÄó}gB»lýúříŽ=±˙¶ď˙jÔnÚ4aĂÇŽçMöű¸Ü±ŹĽ±»|éŇŘüŔź”śĽ±ÔqÍU•••i‹Ś$`ţńŕXDś)0'~ď÷~Ď„€iYľ|yÔÖÖ. ÂeĂ’Ęŕ·‡‡ó–×ÔŐĹoíůó4ôxîą8őňxřżlÚËęÖGÝ­·D͆şř/Űż<ë“®»ů樻ő–2‡ŹK+v#ĆĂÔ_ťřyô=ńDŢă>ŮŇő-Íés<Ńۧ^î‹•×gbE}}lĽ˙«Ó9k7mŠĆ?} ÝöŔłĎ¦“(&ctůŇĄ%Źmăý÷§Ďiřřń8ŃÓo˙*Vf2±¬n}4ţ铎ëoíů󨩫K?đěsńöđpÔl¨‹Ú›nŠ™LüÖž?Ź˙÷űă’UĎÉ6†ŹŹ7_y%Ţ>}:Vd2±dĺĘ “=&ă÷ˇŹ,I«—ßě{%ŢţŐpú·qůŇĄyÇ•;±d2ćÉqͧI$+++cçÎť‘ÍfăČ‘#10ż9z&ţ膅±ř˛‰ýÝvŰmţŔ%O¸ sTRu1đěłéż“€đňĄKăíáá8üőťy“ľÍƵź˝3nřâ٦®.ţŮżÝ~kVʵîÖ[âÍľW˘kçÎĽ ´¦®.~÷/˙"""®ůĚynM]],żŮ÷JüÍöíiůbvĽ˘ůź}ń iZ®Ť÷µč¶“1Z™É¤•ľ˝ĺćĽc[™É¤ÁňŔsĎĹ‘ż•÷řd| +šsĎ]rÜ?ýá#úe×ÔŐĹowěI«˙óÝ˙ŞčvjęęŠVE7|íţXË-qůŇĄQ»iSô=ńDśęë‹Ýך÷Ľž衼żŤÚM›ŇăúĎw˙«Ľŕ>óŤ÷uĽČ-7ÇOđĂyu͵´´DD¤sßɱřv÷™¸·aQŃ€.u \z–Ő­Ż-ňżő·ÜżÓ±'®˝ó31^uűbç_ĄŹÍmÓđßůn^xřé~öä]Ë-±dŐŞYN…ÁrDÄ©ľľxĺÉ'#"&„Äż‘óü Ăßľ'ž;;§u,µ›6ĄcԛͭŔ=ŢÓ“ö4Nz§Çö~+‹áăÇ'ËÉř–jĄqůŇĄéą;ŃŰ[t"ĆS}}ńßůn:.+Kµ¨číť,'ç=qeý'§<.•+Ćű>żuútŃŠđľ'žçž‹˝˝ńÎđŻćĺµŮŇŇźűÜçŇŰośŽřv÷™řŮ/Çü‡ €9Gĺ2\‚r«’'óöđpüĂwľ›Ú&ýw‡ŹźĐĘ!×ó}'joşéýÇÜ4«U¨I+Śb’¶ …VżlŻ<ůTÉö ?ýÁă“ÍÍédzS5đěł±˙¶ËÖŻ/ľ'ŽżĐ+ęëó*/_ş4·ÉŽíč#ʤ•׹rű#żřp¶äľűžx"­®^˝iSŃă|őɧŠ>ö­Ó§cřřń÷ŰcLý‹w~5ś>ÇkďüLüô‡ŹLX§ëß|}Ţ_źżů›ż‹/ŽÎ÷żÜHfk„Ë0%=~_ěü« ˇmŇÇřÄ$ˇiDÄđ±cńöđp|hÉ’X‘ÉĚj¸\ŘúlVf2i ;ôĘ+“®űć+Ż¤Ďąoť>]4°]Q_KV­Šeëם(oŮúőéż>ÉżuútĽŮ÷JÚß8‘TGD\ö‘ŹLzě§úú˘¦®.j6Ô•<‡ĄüęĉX˛reYc2đěsiXĂ—ľźhnŽź÷öĆŔłĎMúÁ|ÔĐĐű÷ďŹŃŃŃřµl€9H¸ — çżóťxóĺâŐ˝ůĘ+SšH­TEpá¶VÔ×—]ů[®S/÷Mű±łh.Yµ*~ăŽ;˘fC]¬(Ńz˘ĐeeŚŐŰżšŞŻĽţý4ţ:Ąí|äĘçĺďnřرč~đ[ŃđµűăCK–¤=›“ýT__Ľúä“ńęS?š7“ůM¦ˇˇ!jkkŁ˝˝=FGGý‡ €9G¸ — 7_î›´]ç.™śnÂŘ÷˝Ă'ŽÇŕłĎĹ•őźŚő·Ü’w©Iú¦#éé|1xöŮ8đ˝Q»iS¬ŢtSÚ$bĽ˙sÍűÍ˙eű—‹öežojkkcÇŽfć$á2ĚSS©FN&Ş+·mĹů´dŐŞÚWf2i°<|üxüŹG)čWŻ_?á±ĺTR_ö‘Ź}|Ň ăG÷µ^”cţÖéÓŃ÷ÄiĎîÚM›bE&µ›nŠ%+WĆĺK—ĆćG˙ŕ\h10ăßöööŚŐ«W愆ć—7űĆ{ŻČLއxÉŞUiîą´­ ą!ď•őźśtÝ$ /Ço|öÎô߇żľ3~ú­/Öëř͜Ы‹ôdN\ľtiÔÔMţřb÷çíż®nF+Ą§bÉŞU&xöŮxţˇ‡âŃů1đÜsﯷ2VN±•Č|PYY;vě/ů˱sçNŔś \†y& Ikęę& /?zóÍéżź}ö˘{Ż<ůdDD¬ľé¦’뵟˝sZákRŐ}˘··dk‡Ë—.-\żuútÚ΢v’c»ć3ź)şüÄ „Ř-hą‘kÉŞUń»ůńą˙)ľv˙yóŰ˙ď˙·˙ÇďÇ _úbéóňÄ“.˛*++ăĺ—_Žööö0 \ň„Ë0ĎüŹGI˙˝ńk÷ ?kęęâ7>sGDŚW:_Ś˝s˙ÇÇźÇĺK—Ćoíůó Ő´u·ŢźřĂ?śÖ¶“6 +ę닎ĎĺK—ĆĆn˙ěb IDATűżZ28~ńálDŚWďn~ŕO&¬wíťź‰ú–梏=Ő×—†Ó×Ţů™¨»őÖ˘űßüŔźL‹™V89á‰÷ż¨˝é¦’UÉëoůŕK‰Ü*ěůndd$xŕ8xđ`ĽôŇKqđŕAŔ%OĎeg†Ź‹çżóť¸á‹_ŚšşşŘúý˙?éěL[_ÔnÚ׾ßâíááč~đÁ‹ňyśęë‹#ßúłŘx˙W٦®.ţ×˙ĆĐ+ŻÄ©—ű˘fC]¬Čdâíáá>~<–¬\YÖ¶ź}.ť¨î·öüyĽřp6Ţ|őŐřČ•WĆ’U«â“ÍͱdŐĘx{x8­rľ|éŇxëôéŻ˙é‰kďüL¬Čdbë÷˙CüĽ·7Ţ:=5Ć+Ćs[¨űÁoĹďţĹżŹ-Yď˙j¬ľ©!ž}.†ŹËŰÄx÷L†˙ąýµ7Ţ˙ŐXËÍ1|üD<˙ĐCńbç_Eí¦Mńˇ%KâÓüIĽúäS1đ~Uű‡–,‰k?{g¬x?tţéIÇcľ;zôhěŰ·/oBż‘‘Ŕ%O¸ óĐOđĂxgřWńĎľř…¸|éҸáK_š°Î›}ŻD÷^”UˉdBąäy¬ČdŇpsřřń8üőťqĂ˝_*;\î{≸˛ţ“±ţ–[˘¦®.˙Ź?ť8†?|$^}ňÉřÝżü‹¸˛ľ> Z#"žčˇx{x8®˝ó3qůŇĄQ›Ó9Űä±…†Ź‹żiÝ›˙ôX˛reÔnÚ”÷řÜcxţˇ‡ftL“Ęé¤j»vÓ¦xëôéxţˇ‡ŇăúíŽ=qůŇĄqígďLżíăşT=ţř㪔ł„Ëp‰řyOOôfÇ˙=|âÄ9oŻď‰'bŕŮg㣷ܜ\ľ=<Ż<ńd^PZę8¦˛<"˘7Ű™®3•ĺSÝnî󸲾>–˝ßCúÍľľôř?rĺŠń1;v¬¬ńé~đ[ńóŢcő¦›ň*ŚŹżĐŻ>őTş˝ä9ób6Gy$j7mŠĘ+&ŰdNőőĹŁ˙ň˘îÖ[ăĘúOćµý8őr_ĽZ˘byřĉô&ű;yőɧâř =1RdťĂ_ß˝ĺćX‘Éć–,É›ĐńT__řĎçÝ?•ăšoFFFbßľ}ńŇK/ůŹs–p.Ç{zŇÉřfĘ[§OÇOđĂřé~xÎÇ1Ůń˝Í–µ|*Ű­©«KCĚ·NźŽgź-Ú&í#†Ź—Č÷=ńDZ]J±ç°˘ľ>í›üÖéÓE·‘ۇůť_ źÓ1ä>vlJă:Ů6Ďöw1ťż›ů¤XŚő5Ť]ß{ţŚ`Î.—¤îýR¬¨ŻŹçž‹®óő˘ëä·˘8Ő´7Ţ{o,«[Ż<ůdt?ř­łۉzśĐ9˘XŚ›7,[ŻY}'ß3@Ě) p)J' Ľé¦˘ýkęębăý_ŤńţËSiE1cÇÖ÷rDD¬ľé¦Xů~č\+3™ř§_¸'"Ćű/k#qé‰öööĽ`ůĂ‹"ľ´qaÜzÍBŔś¤r¸$ý¤ł3Ößrs|hÉ’hüÓâT__8×l¨‹š÷ű/ż=<GJTĎ–;˙*j7mŠË—.ŤßîŘ3é±u?ř “y‰+ŐăîĹâË*Š>fttT?f.yÂeŕ’ôÖéÓń7­ŰÓö5u„¶‰˝˝ńü·:ď•ÁĂÇŽĹß´nʆŻ}-–Ő­/ylÝ~«ě‰ą¸LÖc2ŃŢŢn¸¤ —KÖ©ľľřŃ}­±dŐŞX™ÉDĺŠ1râDďéą Áí©ľľxü¦®.V\ź‰Ë>˛$"Ć{?żůĘ+BĺKÜČČHěŰ·/ŻúřĂ‹"îľqaÔ-/ŢqŞT3pá-[l `:„ËŔ%ořرč»HĂÚS}}z*Ď1Óiqőqűu ăĹcďD¸,ľ¬"ţy­9`:„Ë0EÓm‘ŘĽnAl^g.]ćá2śĹtÚ`Ŕ\'\€IL· ĚuÂe(á\Ű`Ŕ\&\€Ú`ŔŮ — ÇÉ“'ăĐÎB¸ 9ş»»ó‚ĺ«–FÜŰp™€~Ű 92™L,^Ľ8˝ýĆéď=&Fß38C¸ đ˙·w÷ŃQÖ÷Ţď?BfHH¨™Ü;Ă ÔLRşĎ!D U1±ő„Ą]w3¸V%({c·˘ µ•nZ» ĹLö^[ń),Ľ!BŮ ať˝) ʉI4î&؉BA-çŹáşÉĚ$™$§÷k­®5™™ëş~óť‘&ź|óý~ěv»VŻ^­¤¤$óľ?7^Ňż~đŤţ÷WĚ—h'>>^«WŻÖřCóľ3çĄýŕřüď.Ö˘E‹ôłźý,`LĆ»Ë D¸ @‡śN'c2p€N0&€`×SşfѢEJNN–ŰíÖůóç%ůĆdÔśţ»L®¨†uxüźü»>jü»ÎM- Ú.ňWL€ˇŤp€c2^}őUť>žÂ,Âe®˛ŞŞ*™]ËçĎźWSSá2`@#\ŕ*zď˝÷´sçN t—¸ ęëëĺv»uňäIŠ”—čeˇş•'ĹI翦ż|u‰ÂezI¨nĺď\/e'×̉×icĹ× 0h.Đ öíۧť;węüůóć}“⤟¤Ţ ¸(ę|—čÓ§OËívëÓO? ¸˙žÉ×);yxŘă:¤ęęj €!Żý÷ŇÂeş)T·ň˙=L?q×?ŚÖ᱇˘€€Ťp€µµµéwżű]ÄÝĘ·%Wí™o) FRĚu@—€Çă‘ŰíîV·ň̉×).jN¶üťBíÜ–x]§ßSč_—č‚¶¶6ÉăńÜÇ„aĘN®¨şöMđ­¶aşŐ6ś‚<Âe:Ş[yL”ôçp9âůł=ŔĐD¸ @mmm*..Ú|/Ňne#ÂeB¨ŞŞRQQ‘Nź>mŢ÷ťëĄ‡@·2á2AŠ‹‹µoßľ€űnIđmÚG·2>„ËřŮ·o_@°üťëĄź8Ż×­6Beüńw˝tŕoßHń^˘´Cç2ĐšďM¦ŔwqÄČžçťwJ’vîÜ©óçĎK’vW}«cůV?I˝^˙0šf$ÂeřikkÓÉ“'îóßČ*R§OźÖ§ź~p_RR’˘ŁŁLMZ­V>ŔtçťwĘétް°PŐŐŐ’¤żxĄŤß(;y¸fNä \†$©ŞŞJżűÝďĚ.˝ŢpčĐ!:t(čţgź}Vv»˝ßÖ"..NgÎśáC B‘üŰŻ•+WjçÎťzď˝÷$ůFdĽűń·úsăßő ózĹEQSŔĐE¸ Iҧź~Úi°<â;#:=OWžăńxúu¸ĽzőjŐ××óˇ™čččnýŰsß}÷ÉétĘív›ÝQsú’~}ŕkşCá2$ůţÜăńŚĹ¸aäő=v”$iTL”'Ťíô<‰“ĆŞĺËŻt®ĹTŐtN__řĆ|<))ÉśgÚ_EGG+99™“Ýn×ęŐ«Ăv1/™6\Q70‹0´ »téŇ%ĘÉ7sů7żůM@Ŕś6űfŤ›šŘ­óťř¸AGö|b~ť””¤§žzj@Í\€öęëëş%é;×K?q^Ż[mĚçżľ¤˙:yIű†:˝ĄşéďŞ9í‹·~üăëľűîŁ(Ŕ5B¸Ś˝0,ě˙VîÜąS˙ůź˙p˙­¶az0%|óĎ˙®w?ţ–W á2pm1(˘ŁŁőÔSO)))ÉĽďČžOtâă†.ź`ŔPř·rѢEzňÉ'gŢ˙çĆKZ»ďýą1ôďm?j X®¦řřxŠ\Ct.#¤îv0,Š˙^†ębţAŇ0Í›ŘĹĽ±âkŐžńÝž>}:ßř˝Čn·ËétRŕ"\FX‘Ěˆ˛ŞŞ*ýîwżÓůóçÍű⢤ťĂĺ÷ýˇ¸üä“O˛y(`@c,ŠdDÁ2€ˇ.99Yżüĺ/•’’bŢw漴éĐ·z÷ăouţk~— \č\F§:ë`&X€@ŹGn·;¨‹YňΝˀŹÎetŞŁf‚ećt:Cv1ź9OmťËč˛PĚţ– XEE…Š‹‹ş%:—ťËč˛PĚ‚e-==]«WŻÖäÉ“)`P!\FDBĚËбřřx­\ąR .TTT”’’’d·Ű) `@c,şĄ­­Mn·[’ärą–€!†p1Ćb"F¸ á2 b×S‚áôéÓÚąs§Nź>M1ô‰řřxÝwß}ŠŹŹ§€py đx<:tč…Чěv»îĽóN ‹1P´µµQülú :— ¸Ó§×Äx ×Ć™±ń:Ă( ĐáňtĂ…‹˛´¶Rׄ×jĄ c1ŁsŔ óíđá:%I:7š®Űž:gąRĂęęj˝÷Ţ{e€›>}şâuč!ÂeÂĹ‘#Ő3Z­V«Zbc)ČURUUĄŞŞ* 1ŢǧžzŠBz„pŔ€öíđájLLÔ_n¤@}úé§Đc„ˬS˙#Q§ľű]ýýúŕm˘ÇŘ4ü†‘ŠIśHˇ€ËN+Ł€^C¸ `ŔůvřpťLJŇٱ3c­7Ž×Ť“Óc›¨‘Fcí.zá2€ĺŰáĂUý˝Éú[t´yßQ1rdĚSŚŤ.e€k…pŔ€*X¶Můż4ń˙Ľ—â\c„ËŚĆË“fĚŐŤŽT Đ®Ł‚“IIj‰˝2G™` o.č÷ÎGE©)áFó뤔L‚e€>F¸  ßűßö$óöűŮť?¤(}Śp@żÖj±¨Őj5żžřŹlŢĐ.č׾ô‡ńÝINŤ´ÄR€~€p@żÖj±·‡Đ.č·Zbcő÷믗$ŤC×2@?B¸  ßj‹Š2oÇŮo¦ ýá2€~ë|ô•p9&ń& ĐŹ.č·ľ>ÜĽ=|ÄH ĐŹ\O 0Ô××kßľ};v¬~üăS ‡—ô[ç¬VóvŚm"ôČľ}űtčĐ!IRSS“\.Ez€±šššĚۇ’Űí¦(@.`H"`z†pC3Đ}„Ër˘ÇŘĚŰĚ@÷.`Č™đŹŮúî$§ů539Âe IŽŚyĚ@.`Č"`şŹpC3Đ=„ËňČ."`"u=%€ľs˘ú¸ÚZż űřX›]cíWĺš‘ś»©ˇ^MŤőжŚÖ¸É·÷W­$MIMż¦ëió¶čDÍÇA뀞rdĚ“$ýµÖ#É0K’Ëĺ˘8@;„ËЇ6ţ|±šę;}Ţě…ŹhÎCO)ÚÓăkľ^𜪎VhÉ3ĘH\ÔĄcöoŃŢâ­šóĐJ3Ě=Q}\ë–ÍS”e´^}˙ÓkĽž­ÚńÚzÍČ^¤‡ź-ŕ W0]C¸ }¤ÍŰbËvÇÔÁq›·Eő5kOń]Ň%=¸üĹ_·ŞÝĆ'Ş?–$Ť›<5Ä}·ôĘzüĎÝůzŽG| D‚€čá2ô#ś•¤Šö…}Ţží[ôĆ+ĎiońVÍ^řHŹĆdˇl”etDçą_ ’§¤¦kŐĆwzm=‘„Ô9KVjöÂG—\UĚ@ÇŘĐú1Ż8ą“âŮ‹1owe„FGşÓm\yÄ·ÎöôŘD»¦¤Ą÷0\î^÷ó¸É·hJZzŻŚ €Ž°ÉťËĐG*»1˘­µĹĽ˝ăµő’¤»ď_˛¶y[´÷Í­’¤9­ĽrÍ#WFb´y[´ăµßčDÍqŤsܢäÔéJ»#;čšWFPŔĆőýĎo\űŕîbU­P[k‹ĆÚěJNMWĆ˝‹ÂžŰŃQy¤BwoWSc˝Ć9nŃÝ —…×'ŞŹëč‡ĄŠ·ŮÎéżž6o‹Ž|XŞŁ–Ş­µEi·gkFö°atĺ‘ ý°T'jŽk¬Í®».ոɷ¨ňH…Ş<Jv¦kJZú ˙Lž>}ZĹĹĹňx<ü řˇŤpúH}M׺vŤ@Ř˙ą'ŞŹ«d[čp×÷řÇ*ٶ^vÇԀǍk65Ôëźî˙Gµy[Ěkě)ޢ٠ŃË_:—8ŁŮ¸~”etPx˝ń_›ç5”ďÚ®˝Ĺ[µjăŰoŐQ_@36Ń®uŹĎ x­ĆšÖş˙PŁ#”jÇkëu÷ÂĄ’™ŻÇx˝©·gm”Xy¤BG>Ü­Uß XW›·E˙eqŔuŤő.y¦@'ŞŹkońV-yfhlčńx–0`ŚĹ€>ĐÔPo°u.·y[ôĆ+ĎIňŤĎ0şxŤŔ7ÜHŤĘ3’}Çů:…îŢ®»ď_ŞÍĄU*<بžđĘ{Š·­F í.űw@ű3‚ĺžxA…Ux°QkÝ”Ý1ŐH_î.nżž7^yNçľjŃăż*TáÁFm.­2_[ů®íťľ6ă<ŁFÇhÝăó•ěL×Z÷Ux°Q«6ľŁ(ËhU©xmmŢ­{|ľ*ŹT(95]˙úÖ¬wŰKËuôĂŇNߣÁ¤­­Ť˙81$ŚŁŰÄŹcDÎečF*…î\nó¶¨ŇsH;¶­7ź›ă×!|eTEčĐ3ÔČ ˙`uÎC+•łäĘůf/zD'Ş?ÖÁÝŰUľk»9˘ÍŰňZˇf%ź¨>®6o‹’SÓćDŹ›|‹r–¬ÔĆź/ÖŃKőŕňÖ“ěś®'^v›_G[c4%5]UG+6>”ü;ľ×Sy¤BKž)—1%-]ă&ßbnJh(yÍWŰäÔt=˝éť€ő>˝é­\đłű9Ň™ĐARJ¦ěÎň+ĐĚŔ„ËĐ*ý‚ÎĹ3l>7Ę2Z.1`ćo¨QţĚnă´ŔQ’ołˆ´;˛ĚyÇíŻołŚłŐÍl¨:Zˇň]ŰŢ´;˛FR뉲ŚÖĂ!ĆNóĄýŻŘń}KP=ĂÍv>ݸbSC˝öűfR‡şv´5Ć ¤;ŰpŔĐÓćşş:}ńĹť>oüřńš0aÂU[‡ÇăQKK‹bbbät:#>ţŔ!×ŮÓó^«uu„ËĐÚwă¶ołkl˘]i·g)íŽě MíŞÂŚ˝Â°Ć5g/\ňšŃ–ŕÍîÂm:Ş›yÜä[”z{–Ž~XŞm/-×Ţâ­ş{áR3ěmż!ž±ž´ŰłCn´g<î˙ÚýCäPőőÚÚĽ-f˛±†#ě–$ŮSj{ĺőLUŐŃŠ°>€ˇ­Żf·Ű­µk×vůů999Zľ|ą233{uyyy:pŕ€fÎś©˛˛˛Ź7ÖóüóĎkÍš5˝vŢkµÎˇŽpú€†>ţ«BĄÝ‘ѱĆ8‰(ËčÁhŘÖă»?ÜőšÚu÷Jţ!ň-A×oßÍ,IOĽěVɶőÚSĽE'ŞŹkŰK˵ăµőzŕ‰‚®kt?§Ý‘ŐaŤÚo$îľpŻÍ¨‡Ý15辎jeôÇT>°BH#2JJJTRR˘ÂÂBFx ×.Ŕ5ÖŮĽĺ®îX#üőEý»wĂuę†:.äf~aş™ 9KVjöÂĄÚSĽU{Š·¨©ˇ^ľX<ń‚9‹Ů–s¨Î`˙‘ţë řv¶ąa¨ůĐm­_uX ˙×>ç-čşţ0oذˇĂQn·[EEE’¤Ĺ‹Ëétöʨ‰«ÉX_YçĚ™3%‰‘í.ş,Ĺ•kŢ>ć.ęŇ16§S ΔŽé•ođ˛ł5*áFť;őĄjvďz<ÎáPňüůŠ›ěPśĂˇSŹ>{Ź$ux\bILÔ¤{fK’NyŽ©ŃăáC „˙ăŽÂÍÎŽʶy[t´ĽTRč‘ářgś7Ô8 ßą‚CáĘŁjkýJSśÓmŤQ´5Ć ™_/xNwo׎Âßár¸YÎí×Ű>Ř 5¤ŁÚX›ďŕ0Üx}í•ďÚ®6oKŘîpřůŁŹf§ÓŮḋĚĚL9ťN­X±B’”źź/·ŰÝŻkšźźßŻÖÓŁ9Łë( «nsąĚ˙M[¶¬KÇÜčtšÇô¶‘V«R\ą!Ď}SÖ=şÍĺŇMY÷=çpč® ż•#;Kq‡$)ÁéÔŻ·ĂăúŠÍéÔŹţ°5č~KB‚YŰűy×.żg‰‰šťżA6Ţ/`Č Ő! c|E¨ă÷oő›·<B’ů¸?#LŤ·ŮÍQˇĆIHˇ;z×-›§ŤO»‚BěhkŚ~¶ŔĽ®ćV†yŞFˇĆ_tĄ›9Ôzýr㹡Âĺ6o‹vĽ¶>č5@GóôÝIWľĎ;tčPż pýî;vđ†ˇW.şĺćóű< »wË–n…Ö·-vi¤Ő*IŞ--Ő1w‘Žą‹t¶¶¶˙}šť­»ó7!8®8‡Csßx] ËÔ{ăŚŃ†˝—ÇPHľ6Two”e´Ţx幀ăNT׎ÂßH’~¦Ŕďţă$Ât3#)öľü ŃP3˘CmşFSÎÓ•nfC¸Í Ón÷čwoWů®íĎ_÷řü+Ż‘ÍüDňý{?cccÍŃÍÍÍćýŹGłfÍҬYłäéŕŻ!óňň4kÖ,ĺĺĺuxť’’Íš5KÆ Ó1c4wî\s$G$:ş^ssł Ěë 6L©©©]«¬¬LsçÎŐĉ5lŘ0Íš5K+V¬P]]]Čç5j˙~ë4î/**ŇÜąs5fĚóĽ5Ą¨¨HłfÍ XŹńZÜnw—jß‹č¶é«Vi×#Źč‚×Ű'×·$ÚÂ>vxă&Ť°Xt±µ5豋$élM­*^^×ĺăúBtBBŘÇÎÖÖjožďĎÚZOťâŮĎź;:ăĐ™q“§Şęh…Ţxĺ9ŐW¬hëhů Tm8ˇś‡VęŤWž `=ľ?Ń~ř™müůb55ÖkJjşšęÍpuÉ3q¨qFŕÜľ›9硕Z÷ř<ů`·Ö=>Ďo´ĆW*ßí;˙Ë_4źßYŔzăľóź;ŮÜĐ8OűyĚSŇŇu÷ÂĄÚ[ĽUŰ^Z®˝Ĺ[ÍçŰSmŤQ›·…p@Äúó&_|ńEĐ}ÍÍÍćȇŽPŹÇŁčŇĄKaźłxńâ€đµąąŮÜHĐívëÝwßUlll—Öîzuuuš;wnPîńxäńxTRR˘üü|íßż?ěµV¬X4v٬¬Leeer»ÝÚżЬgŁFF@ß~ť3gÎÔ¬Ył‚Ćg´?oű5577kîÜąaŹËĎĎ×Ě™3UVVÖaíű ťË€nł$Útknnż\Ű™š5z<:SSÓÁsŞ»u\qÁëUŁÇŁFŹG­ | `€0ÂP©űc1rZ©äÔtµy[´§x‹J¶­WĽ-I/¸÷™ĎńEŤîÝäÔtĄÝ‘­%Ďč‹OŹ«dŰz•ďÚ®x›]Ź˙ŞP÷. ¸N¨¸2L—đ”´t­u˙Qɩ骮sŢÍyhĄ~¶ ähčŞţŘÁěńxĚ®Üöio8pŕ€Ün·bb´Z IDATbTXX¨ŁGŹj˙ţýš3gޤ+ťÂ=µxńby<ĹÄÄhÆ Úżе<OŘ™ÍP~~ľĆŹŻÂÂBíßż?ŕX#ěŤTAAĘĘĘ”’’b®«°°P)))ćšBuűËsćĚŃ»ďľk;~üxy<ôŰĎ:ťË€ť­©Ő –Q˛ŘlşyÁ|ť,/ď•ÍäâÝ0j”$éÜ—_^őŔ´µ±gÝľ —żIřúÜą.‡Ń#­VĹŢt“ůu$Çö„%1QŁnĽ±G×4^Ż$ť:v¬×Öć˙ľKRógźuąŢżž‘×ßëâ_“kőpíLIKWáÁĆť#Ú٧7˝c†´ă&O5CŘŮ‹17Í3ŚM´\3ăŢEJ»=K'.w=‡ f7—VÝ—łdĄr–¬ ůüq“oŃӛޑt% ›ĽiaűőtµFkÝ ş/Ôëő÷ŕň:¦Ű˸wQP¨ntrĚĐý©ą®®N‹/6żľZk0ÂP˙îÜĚĚLą\.™Ý¸m>ŘŮë0‚Řüüü ×‘™™©ĚĚL8p@EEEZłfMČ󤤤¨¬¬,hť999Ú±c‡ęęęTWW§ &tymÍÍÍĘÍÍ ú%BNNŽśN§ľřâ‹ Y×%%%ćë wlff¦ŽőâĎ_˝Ťp±‹çZuxÓ&ÝťżARĎÇc¤¸ruÓ=YAc.ZőQQ‘jKK,ČôR\ąJqů:¨˙=sVŔsN;¦=Ë}żţźeű®kwĚ]¤ŹÜîÇůł$&ęÖźţTöŚćÜfĂ'oľĄ?˙Űż…¬Ă¤¬,MY0?äěä ^ŻŞŢ~[ÇÜWfŮśNłľcýĆZýźcÜęş·ĺ憬mĺŰoé“·Ţ:Ć˙Ľ˙ž9K7/Ż[ss^ݱćĘ·ßéÖűŢQ%é”ÇŁĂ›6‡ UmN§nsĺÍ/®Ů]Ş#ŻľŞ™żxQ ))aëbs:őýeŹ˝‘ÖeĘüµ˝ŕőęłŇ÷>ˇŢKăëöźłW®’çĎŞIkCŁ>{ż´Űő0x… n»*Ú˛k¸·\Ís÷Tĺ‘ í(\ŻhKŚ˙UaĐă/Źń066€îşVsQQ‘8tssł<OŔČ…”””«.»ÝîŁ(ňóóURR˘––ĺçç÷(\îĚš5käńx‚ĆZ´N¨ućĺĺ™p¤á˛ń:Ű‹ŤŤ•ËĺŇÚµkŐÜÜp^#L?~|ČÎöŘŘX•””hâĉýö3N¸ č–FŹGźĽő¶n^0ߏqxÓ¦Î1ŇjŐĚ_O;¦Ł,ă$K˘MéOŻŇŤ)):´n]źżć8‡Cwmř­üµ66ęÜ©S3i’FX,şůţJHuęŹ+ž §ŻZ%Gv–ůµqśŹ´Zu›ËĄ§S{.ĎPî íŻ{¶¦Vϵ*!%E–D›¦-[&{F†¬~.l`Ůţţď]w×ÜľŽĆű>*!A›/¨Mp:u׆ßęŹ+ž ŮŮšľęźÍŻ/¶¶ębk«,6›ŮYŠ›Üńć‡Ó{L7ßż čý3i’Y—§S‡Öý:l]¦-[¦›ĚY—›ď_ ¸ÉŽëŇľÖF´ń~]ŤĎ UŃÖŃfgő‰ęăťŰ;^[ŻĘ#ОŚÖě…K)€»sWGnĚś9S%%%WĺuĆÄÄ„ Ťccc•““6ďŞĚĚLĹÄĨĄĄE+V¬Đ_|ˇÜÜÜ€Řč^îHNNNŘuşd·Żm¸ĎíĎk¬×˛;ú,L0A)))ý¶{™pĐm.*’=cF·Çc¤=ú¨,×––ęO›_5ĂýéUaë8ŇjŐ­ąąşyŻ{wĘ‚ů›.Z•öčĎ$ůBĺĎ®6Ż=Ňj5;–Ă~cźťmËí_—$łŢöŚ µ6ž ű “›ĚשcÇôQˇŰĽ~śĂˇô§źŞ‹ń^úw0ďÍ[P3{F†ů~ŢĽYźĽůV@Müß/Gv¶jvďć čq“oQrjşŞŽVčy×]f¸lĚoޞŚÖĂĎ0@ŻąÚsJJJ‡ĺ#:ęćí©ÎÎmŞmŘyyyfđš5k´fÍM0Á!aĚNîék4\Ž´6ţvÖ!=a„~.łˇ Ű.x˝:äüM_µ*äPÚ‡®/Ż čmmhPŮ3ϚݛľéÄ>{­Éóç›ă<»: •|#1ڱöWF=Ląd^lmŐˇ—×Í‘nôxtĚ/ŕNHíů7{–ÄDł«öÔ±c*{ćŮ€ë^đz}ˇîÁľotłłd óŤŕŮšZíYžŔ6z<ˇëŤ|çpŁ(ţ´ůŐ :^đzuxÓ&ť­©őŐ#%đÜ·ĺţÔ ¦˙×ŇGÚ ^Żö,Ďëpîń­?ý©ďóŐبĎ®ęŠţČí6ßÇŽ>s­ŤŤÚłw«ÍۢžÝ'×/ßµÝ÷˙–+?yË0'8ťšťżA‡7m6˙ÔßćtęűË3;koŢlÎă5s›§Ěź§łŐŐaµŞö*…¤ź•ľŻ)óçËbłiúŞ–Ĺ– ĎŢߣ ^Żaů'o˝­Ö†µ64¬ńKŹ'`β}Ć Ý¶Ř6H•¤6ż1ó/ę“7ßŇ×çÎu8á‚׫Û7kÚcľúݵá·úÓ¦ÍćµăM[ö.SóÉ[o_ł ţŁ)¦ŻZĄ#Żľj©–ÄDM™7O7ßż ěńţż¸¶l™śNŐ—ÔEŻW N§nĘş§Ă ďoÚ¬Ě_Ľ¨‘V«ćĽţúÓćWuňŕAó}ś2žŮY’|ÝŐW«[~Ę‚ůşté’FX­Ş//Wĺ›oÉ>c†FZ­Aď—±.ăóuęر ˙ +šęewL•$Ť›|Ë5żľŃ-e­±‰ö€Ç’SÓ5%•pŔŕp­:–'LĐi7q¤bcc•““rÓ˝p× –†;G(áÂŕ®ć…ÉÝ9_GëéÉ:Ť˝=·Ű­––Íś9Óçđ¦MaĄIYYJp:ÆŃÇÜEúäÍ·‚îŻ//פ¬,Ť´Z•ůŇ/$ůF\Ť ô‚׫Ď®ÖĚ_Ľ(‹Í¦iË–iÚ˛eAĎ;uěţě7‡ěO›_ŐôU˙¬‘V«îÎß  ^ŻškkÍ`×x}¶T§RRdKuę#÷•ó„ѲgdčÔ±cÚł<ŻĂő~ňć[•`ÓÍ ć+ÎáĐÝůB>ݶ´T‡7mşfź‹Ş·ßÖ¤¬{d±ŮäČÎ’#;Kgjj4b”Ĺ ŮĎÖÔŞňí·5ýrguűůÂ{W¬ĐĚ_Ľ¨„”ł&ţ­űµyl¨ĎŚńřH«5ě cFvojôxĚ_6ë>SSŁúňr5z<Ş--Ő¤¬¬߯ł5µ:đ,6 {Ć&Úőô¦wúěúĆ(Śö!ňË_äÍ0h ¤Qč{%%%Ú±c‡&L Ď?˙<äsüÇ{.´S—7čę|ŢŹÜnĹMvt8¶âĺuú¬ô}ݶإ„ËjKKőYéű«ż?m~U’/ta±ŮůŻ1ÔZŤűÚBŚčđ¸šíZú’çĎ7R¶ź#\ł{·.x˝JqąĚ1! N§oÓżňr}TôojmhPŰ©+s‚ýÇ"´64hoŢ M[¶Lc“Î}±µŐ|OB˝–Ă›6édyą¦Üż@öł˘ëTĺ›o…¬­˙yýkI­Âąŕőj×ŇGôýÇŐ¤,_‡pśßfن#­VÝ”uŹ$)!Ő._đzµgyžŮŮJĘa~ľÎTűć·64„ —Ť÷äLuµů™ó˙|¶66޶ô}U…‘ÝuŮ›·BÓ_f~ÖýŻ]ńň:Ő—Ԥ쬠÷«Łu€äëJnj¬ď•îßĘŁжŚîrws›·E'j>îôŐKę~×ô‰ęăjkýŞËݱň¨ŻSšŽh× Á2"•““Ł;v¨®®NsçÎŐ† ĚîđććfhÍš5’¤™3gvąüZvéŇĄKĽ•ýßÎť;őŢ{ďů~ČţKůsX”1Ň` h–ÄÄF´ďÂęµiµę‹Ą×Ç<üĎ2ß<łcî"}Ô…Ť<úň}éÍĎ0yľźfŢžžű‘/Hţ§?PĽÍ׉Ľí—Ë6č›˝đ=°<¸V//›§ŞŁzüW…J»Ă÷KÍ=Ű·čŤWžÓś‡VjÜä©ză•çÔÔP/IжĆhÉżä›ĎmŻ|×víxm˝ů|É×ýŔ/„<ćźü@M őZµńMIK¸~ęíYzâĺ+˙^/žaS”e´^}˙Sí-ŢŞ=Ĺ[ÖőŔ/(ăŢEA×8Q}\;^űŤŽ|řËŢŮ Ńś‡žR´5fČ~=gŢţýďßáĎžI)™˛;Čt@,Ł»\.—Šüţú5cCÄţ8sů:ŢB@rÁëPť™‘}`öÇÚ^đz#ŞáÜ˙ç ÍÎß GvvŘçŘüţTěË0]ďýé}éÍĎ€ˇÁ/1ĘŁçߥK—¤9­ÔŚl_Řş§x‹^/ˇSuą“׿sŘč&®ŻůXľXÉÎtÍyhĄ’SÓŐćmŃĆź/ j%iŰK˵íĄĺ:çmŃÝ —jÎC+u÷ÂĄjj¨×Ćź/67î3´y[ĚpxÜä©A×÷_“”OIM×¶—–ëő‚Ő˛;¦¬Ë?7”ďÚ®uŹĎבv+9Ő÷:ć<´Rń6»öoŃĆẎŔUA°Śžp»Ý*,,TJ»żä•|ˇň† ĚÍű#Ćb€ăÜ©SJp:5*Á¦úňň °|¤ŐŞď/{L’otEcĂeHŚ@öDőqÝ˝piŔĚâ´;˛´ń狵·x«f/|ÄÜ8ϤŰo¦W_ă;בvk­űŹ!ď+O»tôĂRíxí7ťČ%ŰÖ«|×vłsÚ˙|i·gkÝăóôĆ+Ď)íö,łSŘXsĽÍĐ=l\ßt…±Ö*Ď!Ĺ'$é_ßúď€k<—{§ęk>VSC˝ySC˝¶˝´\’´ä™‚€®ćŮ —ę9×]Ş/äsŰĎBöż~Ŕl壡»©ŤăĂmŘ'Ž÷0fBG[cĚćöÚĽ_ńá醿ÖzÂ3`0~Ăw”¶ŕI]?â;K° \A¸ śÖ†}ňć[ć *Bu ·wľŐ¤†šc8~âr7qZč@ĄĕŮÉľcâmv]şúúń6»âmvĄÝ‘%É6±˙µ:‰ołŤĂđ_SČěQŁĂ®)Ę2úr·őT>DťŹŹ§ľýúo:w¦A1¶‰G° "\€"Ôx M őAˇ­ȆŰLĎčJn϶s{Ooz'â5ł Ű_żłnćŔsw`Ł.2î]rĽ"ăt:őé§źŞ©©‰b`P:yň¤Îź?ß­c –`„Ë0@áŞ˙f}ţöo‘ä „Ť 9ÔfzţÝÄm­-!Ďup·oľr¨ń•G*Bv<ďŮľEGËK5gńJżyËÁ!˛˙ř‹Îş™ý] ¤Ż<>ÖfWU‡5;®7^yNÉÎtÂç.ŽŽ–Ë墴֯_ŻęęęŹ#XB»ŽŔŔ`lfw:ÄĆtM ő:¸»X’”óĐ•5ôfzÇÍŰG>( :מí[ÔÔPŻx›= \6şC…ŰF[y¤"`¤E¨p۸Ż+ÝĚíŻŃţµá´†űkó¶hŰKy#> RË@x„Ë0řofwÎۢŤ?_lv˙V©Đó‹ďR›·Ew/\Đ *°őźm|p÷ví-Ţj>¶·x«Ţxĺ9IŇĂϬaöÂĄ’¤Ż­7Źió¶hÇkëµîńů’¤%Ď„Ëćf‚ĘÁ÷…›Íl7Ţ#íö,ĹŰějj¨¨É‘Kµîńů:Q}\vÇTsí ‚e cŚĹ€Ŕ3»Ś{iŰKËuäÝĎą{áR=¸üEóëpă'ŚÎáś%+Uy¤BݬÖë«ÍÇŁ,Łőđ3A!oÚŮšóĐJíxm}Đ1’ôŔ/t:75Ô«ÍŰb®»ýőďĐł™Ű?Ţ~dF´5FOĽ\¨——ÍÓ‘vŐ$95]OüŞ0 €® X:G¸ €˙fv÷.ŇX›]ĺ»¶«©±^c/ŹŻh·µ~Ą9­T´etŔýţł‹ŤăĘwm7Ď?{áҰalÎ’•J»#Kĺ»¶Ż)1xsŔ9­ ş?íö,MIMom‰ ů\ĂŘD»ć<´2äfă&ߢőoý·Ęwmב}c>ÂŐş‚`čša—.]şDúżť;wę˝÷Ţ“$%üĄA‰ žçűićíéą/ éZ<ďşK'ŞŹkŐĆwz65Ôëźü@’Tx°‘Ůs¨č9óöď˙{ ‚!ÇCż˙ăžĹбM zÁ2ĐuĚ\€ ÜXH#)’S J =‚e 2„ËĐĎ…ŰĚ®;z+¤€Á†`á2ôsW6Ŕëy lĚIößL†:‚e {ŘĐúąŚ{iJjzŘÍî"‘óĐJIt.€`č>ÂečçĆ&Ú{%X–ÔŁÍ`°!Xz†pCNÝíVŰŮFók‚e rĚ\ŔC° ôá2†,‚e ű—0$,=C¸ €!!::ÚĽM° ôú0´4~nŢž3oŰív č1Âe†€ł'+ÍŰßűŢ÷( Ç—äÎťiÔ…ÖfIRTT”’““)  Ç—äţż ó¶Óé¤ €^A¸ Ŕ vîLŁţZë1żž>}:Eô Âe±ş˙ÚeŢNIIa$ ×.0HŐ”żŁŻNŐ™_/Z´˘z á2Đ—5GĆa,\¸PńńńĐk®§ .'Ź•©Ţóźć×Ó§OםwŢIa˝Šp€A⛋SÝí čXNJJ’Ëĺ˘8€^G¸ Ŕ đ×ZŹ>˙wéŰŻ˙fŢ—’’B° ¸j— ÎťiÔŮúJ}YsDZ›űáČ~€«ŠpŔ€pňXEŔ÷Í…ó:w¦A’/XöďR6ÄĹĹiѢEr:ť pU.ü7',**JwŢy§î»ď>Џ&—ô[Ł››őUl,…‹‹Srr˛śN§ľ÷˝ď)::š˘®ÂeýÖřş/Ôۢ #FPŚ>ÔjµčśŐ*Iš)©Ę1LĄĚÉ©öJ˛LĚĂmSV s´M]k_sµMY1ĚŃ6u­}ëŰwݶ´/űVö­ě[Ů·˛oeßĘľ•}+űVö­ŐÝľŃŃŃF…………Ź/\Ľx1âăăáęć†~”ÜÝ͡Đຬ¬,df>(÷ť ‘H ®S«T§Č«C›±J(7†P „›»{µĆH‘ˡR«ĘŤ#•zW9†9Ú¦¬ćh›şÖľćj›˛bŁmęZűÖ·ď^mi_ö­ě[Ů·˛oeßĘľ•}+űVö­ě[«ł}»tę×2©(3áŕéĺ…ŃcǨ$©§ÄFĆÖ8KU&Čěp """""""łcÂĚŽ """*W»6>h×ƇuĐŔ뀨"¬YDDTŰäććbwD8Ž9ŠŘ+ČČČ@aa!śťťŃŢĎ@pČpŘŘŘÔé2îüc8€D™ )))Đh4ptr‚ŹO 6 #_xÁ`7nXŹ/>ű ×ââdů‰čÉ;‰khßľ=|}}™p "˘ÚďŇĹ‹;{’““K­KNNFrr2<€ź~üKżý;vŞse|‘W'Oµk×J­KMIAjJ ˘"Oc×®ťřmőŘŮŮém[§Ű¸ŞĺRęr‚‡ČܢNźÖ&¬­+–pJĄĐh4prvf-QŤ9ţ^ť4 ŤÍš7Ǥɯ Ť›4……ţ˝w‘‘‘X·f50qüx¬Ű¸ť:u®SĺüfŃ˙píÚ5´nÝÓ¦żŤnÝ»ÁŮŮ………HNJ¡C‡°â§qń¬Yµ o˝ý¶~Â!6¦N·sUËODDDuEaaaˇˇ)©iHËČ` QŤP«ŐňĚ@ܿÞ}_/Z[[[ŰŞT*Ě›3‡€‡§'öî? ŰV©T˘[çNpwoŚă§Ná÷m۰vÍj$&&ÂA"Á€1{î\8::ŚŤµ«WáÂ… Č|đ‰ť:wƤɯ gŻ^zŰí«y‹Ř»˙Îź?‡ĺ?ü«WcˇR©ĐÚÇ“&ż‚áĂő>Đ«'dd`ßÁChÖ¬™Áă8~ěţď˝ůčÓ·/-^Řşe >ůřŁRŰnÜĽÝşwÇůóç°yă&üý÷e¤Čĺ(,,„««+zôě‰)SßDëÖ­K}öLTÖŻ[‹ż/_FVVš6mŠ‘Ďż€W_@·]Ń؆ţĘ˙ÉÇaë–-ÄŠ_~Ńűś!•-eÚIˇP {—Îş6ús×.¬Y˝ ˙ÜĽ‰ĽĽRADDµÂ®ťŕţýűhٲU™É …řfÉŚ ĆÝ»w±gwF>˙čžůĎÉQbÇďżcá‚‹“))ŘşW®üŤ­Ű¶—ÚÇÎ?v`á‡"??_·,-- GĆŃ#GđáÂ…?áĺâ˘ÖÚŁŞśDEFbę”סŃhtëccb0îŘÚÚbČСşĺyʶq”HŚ–ńé~ýů×Y“ë/ěĎ?ńţüyxüď÷îÝCŘźâŔţýXżq:tě¨[·níZüď«/ő>“€ďżű‡¦-[ËM,[˛[·lA×nÝđăŠĺnoŽňW¤ťŠÎUNÂĂÂđţüyz±ňóóqřĐ!Ś=şÔ~Ž>ŚĽĽ<őéŁÇZ™z¬čąv`˙>Ěž9So{ŤFĄR ™L†đ0,ýö[ <„ť ŐśĄ‚j…cGŹ&˝2ąĚdCɤĂÄI“‡Ö-×]`ŞTřé‡ďńţâŘÉ“¸ô÷|˙ÓO‹Ĺ¸vő*ÂĂÂôâ%$$ŕżiďxcę›Řwđ˘ŻÄŕŕá#9{6¬­­ńő—_âö­[ĄöĄP(đŃ‚5z4Ž?+WŻ!,b·n|‰M7číËżCŔ  T*M®Ł±ăĆéýuýZ\<®ĹĹŁ[÷îصs'„B!fÍžŁ'NârL,Î_ŠĆ¦-[áçď•J…eK—ę>Źoţ÷5¬­­±đă˙"꯳¸xůo¬ß´ -[¶BlL ~^ľĽĚăYłz~Yů3:t蕿ţ‘HdR9*[ţĘ´SQRHˇP`Ů’ĹxqÔ(ě=pW®^ĂG˙ýD{1`żÁ}ŘŻ]läŻ6•­ÇĘśkß-űůůůxýŤ7pčČQ\މĹĺXěÝŻĽúlü±c;""bÂčq×®^ô4ů3AA€Ř+ĄÖi4<3h&MžŚĆŤ›@(bĐ Á6}úŁ$ĹA˝í7oÚÜÜ\Ěx÷]Ěš3Íš5@ €—TŠ©oNĂ´éÓ‘——‡mˇˇşĎXXX>|î=z࣏˙‹¦M›ÂÚÚ>mÚ`áÇn\ż®·ŻYsć@$áĐÁč˙T_,řđěŮű÷˙­tý­^»/˙Ť7Ţ|Mš4­­-Äb1şvë†Ďżřpůr´nű­›7ë.`˙3~<ś5‚ťťzôč‰ĹË–ÂÎÎ.ś7şżß·oÇ˘Żż†o۶řuőjŘŰŰ›|¬U)EŰÉŇŇR×F­Z·Ćç_~…ćÍ›ĂÚÚ!ÇC ŕLT˛˛˛ôö“ťťŤČÓ§ đĚ ÁFʧ2őX™sM&KL}s<˝Ľ`kk [[[4oŃóß.EcĹĘ_Ř‘DDDŹËx4nPăĆŤMţLS@zzşÁőĎřËôSO÷ÄĹĹé-˙+ę `ÄČç Ć ŃŽĂpîśáŰü_ž8±Ô˛Ö>>ş‹Ý’:vě„Í[C„¬¬,ěŘľsfÍBżľ}1tđ |őĺěřŞ˘č8” …nYQ9†VjűvíÚk˙Jżq“ÁxöďĂ?Z–-[aŐšµFÇĂ0¦*ĺŻJ;MšüŠŢ{{{{<ÝŻ4 Ž>¬·îŘŃŁĐh4xş_ż2“)•©ÇĘ”ˇe«V€Ź,€üţ}vDDT'p ""ެ¬¬&¦čyö˘Ď>®eËVĄ–ąąąŇRSő–'%%ú?Ő·Ě}Ę .oÖĽy©eBˇ`h|fß¶m±jÍZ$%%áä‰8î,Îź;‡»wî`ýڵذnţ3~Ţ˙ŕÝcĺ&mŇÓ±yÓ&ś‰ŠÂ˝ď!#=ŤFďą]yµĺ•z{W¨ť"OźÂÜŮłaaa_V­*sl˛T¶üUi'CSv‡„ŕŔţý8p`?F<˙|‰¤ŠöqŠç‚ËÜOeę±2eřň«ŻńęäIŘ»g7öďŰ‹;˘wďô@÷=L>Gjďp "˘ZÁůŃ…kRR’Éźąw/Ś^ôŠĹbŁËňňňô–›:–€˘Ä]ĺíËžžž;n/]†c'Oaď6}:6mÜ€µkÖ'Q&ĂČáĂńăßăüůsHJL„R©„FŁ1ÄQ©T`Ňx%Í>ŤyyyŘşes•Ű˝˘ĺŻJ;ą¸ş–ZötżţpppŔéS§ ČÎÖíăÔÉŹî€č_ć~*SŹ•)C{??„ďًצL‡‡'.GGcĺĎ+đʤ‰č˙ôSř}Ű6v"DDTëL‡‡††âöíŰplÔý d-QµëС’qúäIÓ7 čÔąłŃ‹ÁÇ2Ě~ôxĂăŹD"<|řťżIł'T·ćÍ›ăťwgÂĎĎoż5 ;wěŔëS¦”űąĹß,‚\~žž5{ştí '''ŘŘŘŔÚÚ~mő˙şogg…BTč.…FÎÎřzŃ"Ľ?>V˙öXcĺŻJ;ŤçP’­­-ž4;˙řÇŹĂłĎăä‰ČÉÉÁČç_(wÖŤĘÔceËŕć憹óćcîĽů¸sçNź:‰}{÷âüąsX¸ŕCäććâ?&°3!"˘1j̸»şÂËÓÓřĎ^C e2nßľŤąśµHDD5â™Aë×­ŐýĄą,*• 7hg<ÄđT€ wď–Z–’’ řŽŠ"ŢÍšnßľU­ĺLNNĆľ˝{Ëý+wźľÚŰí‹nż/Ď™3Úq~ým‚CBŕéé ±X [[[Ü»wŻÔö^R)ŕÖ­*tüŰ~ßA‡ŕĂ…ˇ  ďÍź‡ #chTGů«ŁťŠfˇ(zŚâŕňg§¨J=šŁ Í›7Çř /cæÍřäłĎëÖ®aGBDD5F*őFëÖ­ËL¸ó‘ ""ކ †ć-Z 99łgÍ4účäććâ˙Ţ›Ź»wîŔ·m[ŁłěŮł»Ô˛ăÇŽZ¶l©·ĽčŻôkV­2ëä‰:xľ[¶¬Jĺśúúkőî;řuĺĘ2·»xá I“&F·)96&7€áA7úá{˝ş€îÝ{ţÜąłÔöׯ]Cçţ;úĄRëśťť/Ľř"ž}.)r9>řż÷k¬üŐŃN˝zŔŐÍ 'OžDNNN?ô(÷ł•©Gs—áŮç´ăLÜç`’DDTË0á@DDµ‚µµ5–,ű"‘'ŽÇđçžĹ†őëpűÖ-äää@ĄRáÎť;زy3†?÷,öíÝ ,]ö­nzĘ’lll°;"›7mÂýű˙BĄRáŕýřůçJß1vě8ŘŮŮa˙ľ}7g6îÜąÜÜ\¤Čĺزy3f˝űîŢąSjƉŠz}Ę€źW,Ç{óçáüůsČĚĚD~~>˛˛˛pýÚ5üĽb9fľ3đâ¨ŇýE·ů_ĽpAw<Í[´,]˛ééP«ŐřűďË1}:233!}ô—řö#77ŁÇŚĄĄ%ţر+^ôôtäääŕÜŮł;{Ôj5ştéZfY>ůě3xyIqěčQ¬_·®FĘ_ídee…aÆA©P`ÍŞUxřđ!†{Öč`¤%U¦+S†ysf# WO,ţfîŢ˝ µZŤĽĽ<$%&bÉ7‹mÚř˛#!"˘ZŢĐŔĐŮ‹/F||<<˝Ľ0zě8ÖŐ«WŻbö»ďŕ®Ç!JjÝş5–}˙ÁńÚµńŤŤ >ýüsüß{ď•Zßµ[7¬ß¸©ÔĺŢ={đŢĽąĐh4÷ŮŢĎk×o€ŢľŕZ\ĽÁĎZ˙ݷ˰rĹ łW”4|Ä|ńŐץf xńů‘¸«{-.áaa?wN©Ť7ĆćĐmXşřěŽĐűĚŞß~ĹâE‹ î»U«Vظe+śśśĘ,çĺËŃ0n,--±uűv´k׾Ü6®jů+ÚNĺµQQ9ĆľôDb1” 6o E—®]ËmKŞÇĘ–aĎîĚź;×ŕŚ#EI¨_W­FŹž=ىQŤ‘zz@üŘxYL8Q­–——‡đ°?qäĐa\˝‹ôGc8»¸ C‡4xp™.ya†Őżý†Ű·oÁA"Á A0gî<íí ~6>.«W­ÂٿΠ%%666hѲ%ž}î9Lxyb©Ů*“p€«±±Řľ-/^Dbb"T99 …hܤ :vě„‘Ď?oô–ţsçÎâĂ÷߇\.GË–-ńÇźa€ŤÖcㆠHNJ‚łł3zŕťwgÂÓÓwîÜÁśY3ě;x€ööýuk× ćĘ(•J4i҇śÓŢ‚}‰:*«śżüü3–-]‚–-[aű”¨Ó`b© ĺŻh;™’p€!Ď DBB<˝ĽpčČQ“۲"őX•síüůsزi3˘/]DFFňňňŕć={áŐ×_‡ŹŹ;""b¨:™zIDDDD•W^ÂÁšUDDDDDDDDq&*×Đľ}{řúG """""""ިӧhţ6–pŕ,DDDDDDDdvďp;w.RRÓ–‘Á"""""""˘ 3ů‘ µJ…”””r·“8:B"‘]ź(“•C ŔÍÝ˝Zc¤ČĺP«ŐĺĆńz4oyUb¸ąąA \—••…¬ĚĚ*Ĺ0GŰÔµö5WŰ”ĂmS×Ú·ľ}÷jKűÖ§ď^]é[˙:Y™™ĺ–‰}+űVö­ě[Ů·ň÷Vö­ě[Ů·š·m*•p§Čń{hhąŰőD@PŃőŰC·–ŁĽŮ1Ěăč‘ĂHJL,7άąóŞcÔ1J˝ ®‹˝rg˘"«ĂmS×Ú×\mSV s´M]kßúöÝ«-í[źľ{ě[Ů·˛oeßĘľ•}+űVö­ě[Ů·V9áŕćę7WÝ{‹‚|“şş8Ł­OkT…ČήÚcěěLŠcŽŢžžđ5'ţúµ*Ç0GŰÔµö5WŰ”ĂmS×Ú·ľ}÷jKűÖ§ďűVö­ě[Ů·˛oeßĘľ•}+űVö­%uîÜŮh<‹ÂÂÂBSv¬T*!3á¶WWW¸¸¸]ăĆŤň^$‚´Ś[JĚC&“A©T–ÇŘh›‰!•J!227iZZRSS«ĂmS×Ú×\mSV s´M]kßúöÝ«-í[źľ{ě[Ů·˛oeßĘľ•}+űVö­ě[Ů·šú=39á@DDDDDDDd*N‹IDDDDDDDfÇ„™DDDDDDDdvL8přđaDGGł"L†O?ýÔ¤©á`Âč1k׮Ŷm۰bĹ DFF˛BĘPPPź¶m‘›«ÁâĹ‹™t &JX»v-˘˘˘tď×­[Ǥ‘¸› KĽ4f “¤‡ÓbV€R©DTTrrr*üY///tîÜ™•X‹•L64iÚ22 R©“&MB`` +‰č‘ĐĐP4kŮNNŤtËÔ*¶‡†bäČčŢ˝;+©łf.::۶m«Ôgíěěđí·ß˛k©Ç“ SŢś†řőçP©TX·n0é@DDDD`Íš5¸té<Ľ¤zËB!&LšKKK¨ŐjVVĆ„C¤ĄĄé^űřřü™ôôôJÝA5ĂP˛ÁÎÎvvvňć4&J(J6Ľ4f,ÜÜÝ nŁ}Ô" ÍĽ<‘źź‘HÄŠk€p¨¤ąsçš´]xx8"""Xaµ”±dC‘¦L:=Vn˛ˇHAA¶l EÂÝ;;w.“ Ť¤«ĽdC‘˘¤P(Ŕ$‰¨a*šŤÂ”dC‘.]»r ÉŚ jLM6aҲ’łQšl´c:pöІ‹ jp*šl(¤5Dááá¸qó&ÔęÜJ}ľdŇáęŐ«¬Đ„ jP*›l(¤5$kÖ¬ÁÁ‘š’ZĄ8EłWH9C­VłbYÇܸqńńń¬J¸téT.ŮPÄĐ@’—/_†T*e%Q˝Ň©S'ömDD )łQTTÉŮ+8efýÇ„CłnÝ:˝é9É4………°°°PµdC‘Ç“ŃŃŃŽŽfEQ˝’€·Ţz‹ADÔmÝşŐěɆ"8|ôÎźý‹łWÔsL8Ô1L6T\Éd´lŐ Q§O„B;téÖÍääĂŚ \Ľp^/ÖŐŘXV2ŐKŘ‹ęâďĘEŹĎ–ĹÇÇľľľŐ"""ĘŤáěě\ćtë¦Ä€ŕŕ`Łë"##‘žž^á]şt…ź4mÚËúIIIşu&NŞP"U.‡= íĎ•uk×čÖ-Y˛]ştŃ˝€‹‹Ëo_s´My1*Ű6¦Ä0`@­Hä0áPG+Ż $'†QŽÖîXę8P˙KyęT©_¨ź<ؤx۶nÁťŰ·ËÜfYúďjXůDTg-v€xŰƬ"Ş3V¬ dgfŕćŐKĺnßŘ«9šJ[\gŽu¤ÜbG\Ž“U)ČRF×ĹÇ\€âafbÄ@•W¨·DĄVU¨}6¬_‡FnM‘ňo©u‰‰‰şÇžŕÚ?‰°wlTmmcj s´My1ŞŢ6ĆcŘŮŮaŕŔL8U'i~Ľň2hݨFö ľÍdQ [ľf«öO`ŤćîöĺnóÜß:kř"Ń 1ŔOęTnŚäô{8xnk•b”,ż!ÍÝí!XW)†»Łní éýž~ R©w…Ű(ćú?°Ú”»ÝŽÝ‡ˇPçU[ŰĂmS^ s´Íă1ŶjĎ]ŠL8P˝&*Ô`ać>ëŞň<ŢaBDDDTű(Ôy•=xâ1ÔšwäŮUŽ!ĎTAž©˝«Á/KU©Š\ÜMQ°}ÍÜ6ŹÇlë^«ľ“ś“ĚŽ """"""2‰:'jUĹîr°Š‘ź_ČĘk€p """"˘:I&“aÉ’%đóv2éYxŞşŮMČSäúŚ»·ŹŃq¨~cÂę$ĄR‰¸¸88ŠlaeeÁ !Şep """""""łcÂŞÍĂôűÚX±" &Č$ÎŤáččTˇĎd¦Ţ­ /="¶:™Äѵ)$ +˘–’Ą*ĐĄ[´iÓ¦VDDDDDDDő€,U®Ý{Á××·VDDDDDDDdvś¬–ę ĄR‰ÄÄÄr·sqq‹‹KµĹ€¸¸¸rcŘŮŮA*•V)€2oť•ÉdČÉÉ©RŚ´´4¤ĄĄ•ĂËË "‘¨ÚbÔdűšŁmĘ‹a޶©kíkjŰ”÷ý˘''Ev)ňNpsw7ů3n^­ˇ‹bĺ1á@DDDTwÉd2,]ş´Üí‚Rm1`É’%ĺĆđńńÁÜąs«V®\it]hh(âăă«#22ĺĆ={¶ŃŰxÍŁ&Ű×mS^ s´M]k_SŰĆÎÎß~ű-;µZHť“ •ZUˇĎDöČ/(dĺ1á@DT~Ď*~=JRţr""]ŹżmŇv+Ö†âĹ« ®“lŕďݨJ1 °můý;‹O?_ĄĘŚá'u‚ŁŘ¶J1¤®bH]ĹĺĆxućBd)5ŐĂmcj s´My1ĚŃ6u­}MmSîÚ___¬\ą˛ĚňDô$Ů9'ťŢí{gíňětŕÎŕŻpŕřV /—uU"G`ÍmŔĘš ˘jâě⊄Śr·ËŐ]§PĺU9“bäçV9FyîČłaeeQĄ)™*d*Ë˙ PĺUk s´Ť©1ĚŃ6ĺĹ0GŰÔµö-ŻmîČłŃÜÝžťDT/ôLűOŮŮCűŻëŕ…9Ŕâ‰ŔíˬłĘę>T›l ˘j‘––†ŚôT0úWXSäVéóEjK …:ŻĘ1Tš|¨4ůO<†9Ú¦6µŻ9Ú¦ľµo¶JŞă˘!Ůĺ{Ce“lđď˝$¤ĄyÔŠqP8KQN6ŚćlĐ&4j`ÇbŕÝîŔXW`˘X2Hz4(VăŔÇa€{óŞďw”¤ř_CŇ3çQ5ŠŚŚÄî°ť&Ý.ODD•ăćŐînî¬ZĘß»v‡íDddd­8ţ©Ť¨ˇI€i?@AđőXŕňáâőyą@Ô.ŕŇ!ŕóý@ó€}#`âçŔâ ú±ĽŰĂßüűNŤepý/ŕĎo•ŢweÇj¸#ŢşܤÚcOO.v}¤Ę+Ł#°ţѲۗůOżú˙ČÓ»—;—¶vŔKµ < ö=°«śŞ*Rć’ĺ}|YÉňW$fɲ݊Ţď§-[ßŃ€­xŮ“ç8™ť@dPČŠ &¨ ý'?Fqj›~˛ˇ$U6đëŕÝ_ŔßGő×÷Ě\XŰę'z>tü4 8ľĄęÇŰşđáŔÁYyÓÖÚý'‹Ćë—#·Ä€SvŔ‹ó€·‹—Ť˙Hľ©M4<=®xů„O„«”ßO IDATŔĹ†ŹĄ:Ę\Ńzełž{ žÎóšj >RAÔPuy¦řő‰meo{ă đV`ůtŕÔďĹË]˝€w~Ń^$§Ę€Ź†šź„é÷KK`ę·€K˙Ú.rćoÖ& €u“›<€•ďjďJ€Ůk´w)9ČĄÄđë LňŽn,^>ę=ŔłMéĺýĆ>–Ę”ŮĐă#%—U&f~‰g ĹNŔŕ×€oĆk‡y­Ďo"""Şęśl¨U*V1á@Deđö+~}çJĺb ›Ş}Đ&®žT ŕĘq`ĂBír[;í U1řŔą©öőîĺ@ř@v†öî‹k´ď‹.Ľ‡Ľfd© „„„´}jj*ÂĂĂ!uĂÖ†—6Dµ Çp j¨ KĚmQÉĐŤ›żţí¦ńí„6VPiňYL8Qpërq¡e§Ę%rj§Ę€ńMµ˛zŽU™UĽˇ˝vżĆ.Ř•YŐ[oŐQ暪G""jš6˛ÔU k+ăw0ş97BŹŢF×'Ĺ]6i_Ž"íěJŮ* ţą÷ u žł°¶Ĺ® /OL†OÁŮŐ žš‚sQ'ôÖŰÚX2áŔ„5ç÷j§€§ĆgwßÖ˙)ŕő%Ŕ™?µ˙Š™Ľ»řBŮĹłôŘć’xhŰ[űÚË?ŻżŢ«Ä# W«·ŢŞŁĚ5UŹDőś‹‹ 7ń@Ü?wXÔ`ID6hŃŘA÷ŢÁA‰Ł¤tÂÁÝý 4gŰVÓĆwJMIZ­†˝ĐťZ8#&!Ös§c!XĂĘĘBoů_q;_ ˝[ÔĘҢԝ5O!ţĆ5 ö,ÜÜÝ+gôŘq&o{éâDť> µZ ź¦DßN×]xRýTŢť,ů…L0<Á„C×î˝ŕëë[+އCą5T5đÝëÚi`öz`ü'ÚA­mµSLö,:¦M6ÚÁ׼Wc߯Úé`Ô{@ŕ Ú iO`ÚŹÚÇ<|Šg¨¬k´Ü0řuŕąiÚŘöŤ€ďC¦hץʀC몷ުRć¬Ôâ×˝Bˇ¸fë‘ę˝Éc‡Łwď^xyŇäJ'*ŞK×n9 °±‚‡ł ADx‡QĂv-řd80k5ŕěéđvg ßx Ű ™żö˘ŢÂRűů˘Č?€Č@AAéĎGíŇŽ90b&ŕ×ptrs´ă<\Ł}tĂţąĚěˇ˝Łˇű0ŔMŞ=ů]ŕÜn üG ;Łfꬲe^űöΑvÚ3d×jľ‰¨ŢňkŰę‰îßÝÝ·ţą G±-p ˘GrUŔUÚ•q7ř~ŠéŰŹ’Tly‘¬T`ĂBíżęÚWyÇPŮ2Ú;>YýőHDD€+Çwę^wxúůz_ކ7n‚T*ĹěŮłńęĚ…P¨8CFM(9`dYă4m¬`kcü ~ŽńŔ„‘Á řÇ/t*”¸u7‡ŹGaë®˝P©sYa Ś_‡ČÖ††ŐŘ>E"|}}yńjfÍÝí!ZC,°.szŐżâRŚęć(„ÔU\îľd© ČRF×K]ĹPkňuSj˛­™p Ş5”6X"€DkgłĆŤu@„¨C©ĺŞ[¬ř‹ODD Š……$öbtöóEg?_Ś|v &żó!d>dĺ4 ‰ήnĽ ¬%Ц¦,JHDÚ±5ŠŢG^—kß‹ěĐLÚŕîâ 7×FHI¸‰\•˘Ü}|łđmHĄŢ×Eť>Ť3Q‘ĺĆÔ/Ţ-´ŹĄ¤¦Cž¦}DV©ĚL&˝EŽÁĎe«4Č/(„B•Ą:ňLť ˘šgÓŘěɆ˛D [2á@Dô„„‡‡#""mÝużHSő)ů‚ŔÖíÚ´Ä‚YSáŰş9Z5—bĆk˙ÁgK9č_M‘şŠád‘Ťe‹żÁ¬ąóX! Xsw{“f Yů͇pnädp]lŚe Hő–{yKőŢ»»ź ĹŻC‡RŰ?.5%­Zű@"yô¨Żţ8$2Y~ 5řY{ˇ6â(˛Enˇ5ä™ÉF÷#´±ŇÝ!Qß¶uÇŞ•?â~p0BBBp ŞNťs ľŤ(A ݲ€  ŘŮi;a‘H„.Ýş™oôŘq8îśîý˝ä$\»zU÷~RöV:58ęÜ\DÇ\Ç‚ŻżÇöß–ú÷éU*á`g'ĤŃĂńĚSđölkü+OĹńČóXą~;22łô¶w°#r÷FŔµř[ýúôěÚoNŤvmZÂÂÂŃ1×±lĺܸyŰäău”8`ĂŹ_˘E3/ŔwżlÄo›včÖ;;9bňŘx* ;<»§¦!ň\4ÖlŮ…{÷SŘčôÄ»xnďŰî.ΰT?€J‘Uj˝§—ö|/ş#Al'4ž,đ÷‡źż•ŽS"‘'Ś0vwDÉőłćÎZĄ‚q_}÷›n˝˝ß}ń>śPPP€ĹË×"(řeô:ź.YĽĽ<Ř řćżsáęÜ NŐ¦‘˝-š»ŰŁ[+tjግŕě €µ•%¬­,1oÚxLu ‚őŐK6Ú»‚‚ŕçď©Ô»Î'ŞĘÍ݇C{??¸şąé­sŮBę*†żw#4w·ç‰Ç„Ń“O:0Ů@DD¤O`k‹®ŰáÓ÷ŢÖ- ”H(ŇΧ%ŇdBŁÉĂ÷żnBÖĂldf=ÄżmÖmóT@wŁűŘ‹qŕX$Bwí:7q˙ÜÁŇź×éÖwňó-÷8?{oúöî ¸tĺć¶To*É—††»«vě§ŤżG`]čźČz eŽ ŰĂ`mčźşc=bľäç"S‘ “¶OKKCDD¤®bm¬ę|ůĄ®bôôqE;/'x8‹ (Q&W77tîÚĂG>_îŁ T˘ďHŕçďŹ!ĂžĹË“&cÖÜy5f zę3€Áűˇ_`wEv¬43â#Ôŕ’€öńŠ˘¤CEŻ`˛HËŘ™púě%¬Ů˛KoŮkł>*µť••%2łŠg˛ps){ ç]{ë˝?Ł{ÝÄݵĚĎÎyk2B†ôÜNHÂŰďÜ\ý[¨űőÔ˝ŢsčD©ŽFęî¤ěŃË×lĺ‰P‚L–€[q×!u—9ÍaYę<ÄĘ`ëĆą&mźššŠđđpH]ĹČTćÖů=›6†Ą& ŕé%EkH˝˝™d0#©ÔR©7@űŘE«Ö­1čŃúłcp.:ç˘cˇPć@,°F~Aax’ ˘'t`˛Č°ÂÂBd+rë.ÂöĹÎ=‡QPPPj;ßÖ-đú„áצš6v…µµu©D™´I˙ę˝Ď(1í¦Č®ěŰĆ'ŹQü °•rJŚQ¤äř·KŚGQänbń(ř-‹‚€Än޸ZĄ„CCäîęŚaű PwX[ZŕŘŃ#híÓ­Z·fĺÔÇëşgWôěŞ4óŘés8y6ů?ČAbš’‰&Ě›t`˛H_Éi1Mѱ}¬ţî3lmő–ŔŇŇ´§|UęÜRź5UV¶ç.^ÁŔ§zCęŮ_ŽU›˙ĐŰF,*žJ0GĄ.s˙ĺ%8Ś)šeÂÝŐ/…<~A=ôÖö,+© čÖ—˘ŽÜťěŕîd‡ät%d© ÎrÁ„QŐ“L6UÝ;SĆë’ Źă‡ß6릗üűŘŐ˙sƸ/OCĎ®á`/Â_ÂźűŽ"5=C·ŤB©„ÄÁ^—PP(őÇz˛ tŻłł•lTޱŔ­š:@`cŤž}ú—J4Pí$ ńÚSuú®ĆĆ<śEpwâŽ<ňL+© 4’|ҡ¬$™l "Ş;\\\и‰2ą¬ŚZ¨C»6ş×_}÷’˙•ٰ°MÜjd˙ń·•­Ŕ†íaş„Âě7'ęmóĎ™îuKŹL”\vóv•L&uŁS gŘ m`ceicÎrR—H$ ö,^{c*Úűů¬­,Ńş©¤ÖÍn‘©ČEă&pqqa¨6'l "Ş[<âÄʰ2jˇ’łAh4Ĺ5>;°Żţ/§–ŐűëéúmaČz¨”/xđÓčĐ®x6„c‘çtŻ‹,)xpń˛Ł§ĎÖŞú•Ą*đ ĐłćÎăÉVË´nâ©«€v Č~p|†:žx0q´xz8‹ĐÖÓV–µâce<â2á@T›“L6™Ďť0Ž9 [[<űL_Lť4iéĹI˘6­šUëq(”9X»U;†……ţďÝ)şuŰĂ@žš®=ĆC1~T0DvBHě1yěHŚ9pď~ vDdŁRमb¸;i×uusĂ„I“ŃĄk7VLçć'M‚«›ö-±€•„QĹ’L6™ĎúGŹ2Ŕśi“pţ`(ţ·p6"ĎEăŰ_6čÖ­űáK„­˙ˇZŹeăŽÝČČĚthçCf+đ·_!#3 –––xĆkřkßśŽŘ€9Ó&ÁŇŇié0ăŻJŤď@ć!Ů °­;¦NťZçË"´±ŇÝŮŕęć†ŃcĆrjËzD âĺI“Ń; ŻO™ˇÉ2á@T¤“ DDDćłçĐI,řęČ’ţ…F“‡¤{r¬XŠŮ !üŔ1lţc“˙…B™Y«őXrrTX˝y§îýĚ©tłNÄ^ż‰‘ßÁÚ­»pűn"Tę\ää¨+żlŘŽágŕĆMţn@ĺóz”l€Ďż/H륀  87rÂä±ĂYp– "Ii~\ňčś›Č !""z¤˘Sa>îĎ}Gđçľ#×}őÝŻř껊ﳬőe­[»u—îŃŠÇĄ?ČÄ’ë°dĹ:6z-'•J1{ölĽ:s!ŞĽZulÎöÚYY:wíĘ;€~ÝńÓęPVÄcx‡‘sn0Ů@DDDT~: {@_Ä$dÔŘ>E"|}}‘ĄÔ ż °ÖÔ…•ĄŞ<4ięÖ>><9€D™ ťÚ6‡ĐĆŠ•QDDDDDTe‰ήnČRj|]…ÖpŰâß{É<1€¬¬,lÝ 1”lX!%0á@DDDőBdd$v‡í„ź·+wG!ě‘íˇ[YD5L– {ý¤í lëŽU+Dxxx­¨&¨^HKKĂż÷’ŕ(˛eeP#°±‚µE>e2VQ R«T8vT;6M,jŐŁ=µŤl âlÜY T­ÚhäĽŘ±#ÍRĚ“ŞŤW^D…ĽU™¨.Ńäćb{čVtíÖ­Z·f…Ô#j• ŰB·"W­X˘nŽBČR¬śGph–;ôĹe[/VU«Ną‰xëáÉ[ţh[/¬pč˪ݲôß™t "ŞCŽ9‚ĚĚL$Ędč׺tëĆJ©RärěŰ»©))¬Ś2đ‘Š@ \BŐŻˇ'µdV|fśję\kÄJ "*ú=W©D\\$"XYZÔĘcěŮ«7ŕŘŃ#ŘżwÔ*Ż;‰Ťë×é’ íýüX)Fđ‡¤˝źÚűűł"Ȭ®ĆÄŕjl,+˘„QcưȬRSRpěČVŐjYYYČHK…DdSé™*˛”D^—ăĘńť&m/“ɰtéRř{7BLBF­ś!ñ‘&LšŚ?wţÔ”\ŤŤĹÍ›7Ń­{wtéҡ'O˘V©pńüy€@ @@PştíĆ߇™p ‰ÄR©7+‚Ě*1S=Žß3""jbŻ\Áą¨Hř{7BäuŽí¤˙{¸ŁÇŚĹ±ŁGp56ąj5˘NźĆ…óç™x¨cB!‚‚sĺ F<˙$ +Ą L8ŐŔ…ęaĎ" ¨˘NźŇK<¸ąąs@É:¤K×nčŇ•cq‚ """"""3ĘĎ/D¦"í}[B(ĐżsA"‘čGFŠ\ÎdC-‘••…âăsţ:0©`L8™‘Bť‡XŮü÷ăqF·‘H$ńüóĺ yéâBxIĄĽ}ßĚÔ*e%ČđĎÍxdeeéÖĹ\ąR'™Š\´iŐ...µâxp ""˘z!$$Ť=›ăµY±2¨Á‘Ą*ŕŕҟ̛ĆʨcĘ»A­Ré ěęć©ÔRo)ÜÜ3QIg˘"!KH@˘ĚđXd®nnđďСN–-Vöłgż€ž]kÇń3á@DDDDDT eeeA @­VĐÎZ”š’‚K/l¸»»Ł˝ź?ü8ťÉâăâtSZiŮŞ5Zűř@ęí]©DNď€@‹<ŹLe+¸&¨V ¬ŃĽ±=–,Y‚9sć4řúpswÇ[3ŢAŠ\ŽÄDî& )Q¦K@äŞŐH”Éŕęć?N8¨U*¤¤¤ŔK*­—u”"—C­V#1QUŽ ))ÚR^3ÖčgĽ¤RHĄŢđň–še†±€  ě‹ü»VNÍĘ„5xVVpŮ"..Ž•ńXâÁÍÝ]7¦@Š\ą\ąü>e2¸»76úYyŠż‡†ę]l€Ô[{‘-‘8B"‘@âčX'ŃŘżw˛˛˛ —Ë‘ű(ńRQý äIUCp """"˘:ÉŐŐÁÁÁX±6ąš‚S±»JĘĘĚŇ{_4nÁăăôD@PŃ8ŰC·ÂÍÍB;ăcN88HĘ|´#6&Ź,!á±cÍ„J­Ćô™™‰¤ÄDŁű—8JŕćîµJUćřÄ„Ő^ŢR´Lk‹ă‘jlź... Á‹WłŚz{cÔ1HMI*Gą\µZ…¬Ě,˝‹˙˛¨U*$ĘdFY,âéĺUNÂáŠŃd©ÜÜݵĺzô„—·Tď=1á@DDDDDőíÂVęŤ,ĄĂN4řşlŕďÝËQcĆ<Ń‹a‰D‰DRć1Čd ptt2ş>++ ž^^Ú×HT#ŕęćöŘ2!Ü%ĘÂÇ!ę&¨^ŚŚÄţ‡ŕçí„Ř„¬jp¸B‹\ś‰ŠDď€@VUHy 7wwŚ;®Ęű1G *›ź·v‡íDžę!ź|_`É&!""˘ú -- ˙ŢK‚ŁČ–•A ŽŁČBä"ęôiVQď ţ˝—„´´´ZqRAµF»6>€kqńzŻ_GDµëűZŇ­ţÁ˙÷>®ĆĆÂÚÚ999Oě»Ë~čÉâTˇ_Ţ˙ç×Ö}zcć;3pőęŐQWţţ[WţČH޶HµëűyęäÉ2×W·Ď>ý—ŁŁńő˘E:{Ž CDDŐJ©T"..‘ ¬,-X!DL8P]w-.^÷ďě…‹řzŃ7¸ň÷ßűŇ(\8ľZ÷YD„‡A,C,cwxx­m§ĺ?ţX#T»,[ş………Ő~NűN^ż~0hđ‚ýî>~Îצ~¨!ČĘĘBFZ*$"›ĘÇPjy]Ž•+Wš´˝L&Ă’%KŕďÝb!oŢ&bÂę±XŚ>}űbáÇCŁŃŕ»o—ŐëňćççcĎîÝęÓ}úöĹÁ››[+ŹőŘŃŁZ°O÷éŽ~íŃ7(ďÍź‡¤¤$“ă?~;¸R©D»6>ňĚ@Ü˝{S§ĽŽ]» ¨w/Ě|g233ő>}é&ڇ®ť:˘wĎ7g6Rärô @»6>&' ÎDE!55‡ Áŕ!CńđáC?~Ěŕ¶ŮgVVľüü3 ě÷4:úµÇÓ}úŕż-DFzz…Ë|áüy´kă+WţÖŐ]·Îť››‹UżýŠ!ÁčŢĄ3ztí‚Q/<Źß·mă \OXXXŕĹQŁđýwß"??ߤĎňý4tN=ţť\±ü'´kăĄRYjCwÚ¤ČĺxoŢ\öę‰n]:cô‹/bďž=Ą¶;°^{e2ző„»¶čŐ˝&O|ÇŽ)óřşuîd|ß&”ą˘} 1á@Ő¤  `eeĄ·üáÇ?v <€Żţ÷5˘ţ:‹ożűQ‘‘7f4ŇK\PW„­­vĘłôôt|đţ{6ým=qC† Ăţ}ű°tńbݶwnßĆ«“'ánÂ]¬YżÇNśDŹž=ńÚ«ŻŕÁzńĘ‘H„ţ˘˙€y¬˘"űT©T8a<¶…†bŢ{ďăô™żđ~?wíÂÄ ăuî™ZćnÝ»ëÝF~-.˘/ăóO?ĹâE‹đÔÓOăŘÉSŰ˝NNNX¸ŕC¬]˝š'q= Ńh0mút$'%açŽĺnoę÷ÓĐ9ő¸ioM/µŤ±ÇŮŮřĎرŤ‰Á–mŰqřč1xxz`öĚwöçźşí6oÜwgĚ€••¶l ĹĹËcë¶í°˛˛Â´©S±gwD™ç|UĘ\‘>†p jtéâĹGżř÷Đ[ľ~íZČd2L›>A} ¶·G·îÝńÖô·‘"—cÝš5•Úźµµö˝ěělŚ3ť;w†˝˝=^{} ŕř±â[«×Ż_‡śśĽ5ýmtęÔBˇŁÇŚE×®]‘——gň>U*< M4D°łłĂ3Ďşí9(¨ŹŢň^˝zNž¬ú…n@@€îu“&M©©©şeçΞ-µ<Rˇý9| …Ď.8d8Ôj5< ·mEöyp˙~ŔŕˇCő–0P[‡GŽT¸Ěe%i.\¸ [ÖĽE \¸ŤŐk×ňd®G¦Ľń˛łł±eÓ¦2·«‰ď§!‡tęÜY·ĚÓÓíłT‘ IDAT—cb±qËݲu6ââĺżKý˘ćݬŕîť;ŢweĘ\™ď=!!!xmęŰĽ.geP“’©BvˇFŤĂĘ jŔ"ŻËńÚÔ·RÁkťę¡\©Â&ÚŃÉ =zôÄŰ3fŔ·m[˝uEM=<ô–7iÚ KH¨ňń¸¸şę^=ŇQňůőääd@ăG EÚ´ń­Đ~vG„ĂŃŃA}Š/Táââ‚Ýůü •Úç­[·´QŢúUÍš7׋U‘2ňękŻcŮŇ%xëÍ© Bż~ý1ŕ™gtPT85j„W^} ż¬üŁFʆ˝˝˝ÁíjâűiHŃ9ďććVî¶—.^Ćőëpăú ¤¦¦@­VCŁŃ€î˙ЍL™+ó}#"Şi*M>ň`Ĺż¦=!ž^^¸#K†ZĂßp *©Č4sEă˝ĺEď‹ÖWĹăăF”úühBˇPoో0C´lŐJoÝť;w0lđ ěŰ»ă'Ľ\á}ŠĹbdeeáBôĺjŻŹˇĂ†ač°aHIIÁÁ°ę·_±wĎnXZZ`ńŇe‰Ęř˝ÍĆRW1ÂĂĂ™p Ş8†U«-Z(ýX@Ńű˘őŐ©qăĆ´Óđ•grڰpxxx”J6@óćÍáéĺ…Ý•Úg«Ö­ňű÷k¬]ÜÜÜđźńă±jµv€ĽÇŹód­‡ĆŤ‰V®Xnđn‚'őýlÚDűř‚\^ösöwnßtęÔYoąˇéwëRźDDDć#‰ŕăăLE.ňó Y!DL8PC2đ™g'Oč_ĐFž> č?p@µC§.ÚG.>šIC—D7éóÉÉɸpá<űô1şMź>}}é’+ĽĎÁ‡<<˛HĚ•+č˙T_|öÉ'•*wŃfŃłć3ß™€^=q÷î]Ý6NŤémKő‹­­-¦ż=[K ÄX•ďçăçTeő}ú)ŔŮłé–%%%ˇŁ_{Ś5J·¬čĄ’w4äççă—•?ëŢ—śőŔ㫠}Q}%qt„“ł 2ą5¶O©TŠąsç"Vö u jH&Lść-Zŕ—•+qöŻż P(p&* +W,G‹–-ńňÄIŐ ^†••~úá{ÄĆÄ ''ŰB·âúµk&}~wx8 KŤj_RPź>(,,ÔÝĺP‘}Ž?íÚ·ÇŠĺ?áđˇCP«ŐąrďĎź‡ű÷ďëŤä_ŢŢÚ‘ü/^¸…B˙x‘/?˙÷ď˙ Ev6V,˙I{Ľ/OäÉZOŤ|á4őđ08ŔbEżź%Ď©Şxyâ$xxxŕ—•+qëźđ #‹ľţ Ť/Ť­Ű®hć–ż˙ŮŮŮHJLĬwß…“S#Ý ŞGŹAnn®Ásľ¶öIDDő•źż?z=ŤXŮ_b5üĽť°=tk©;^‰p 2‘H„Ť›·`Đ Á;g6zöŔűóçaČСظy ÄbqµC‡ŽńÝ?Bhg‡qcFcđ3‹źůUű%°,űkKKKݦw@ ¬­­uw0Tdź˙ĎŢ}‡5u¶˙&!F bQA+®PëDquYµ¶Ö®·¶¶}jű¶Ő.ŰÚ*¶µÓNşA;Á˝­‚V´‚ ‚"Q“0HČ đű#‰L‘‘„űs]˝jNNÎ9ąĎyBÎťçą>źŹďü ÷ÍźŹ÷VľCŁđŕóá+•âăO?ĂĚŰoo×ű~é•WĐ»7~pâ'ÇáŃÇĂ+Ë—Cyů2¦N™‚ŰFŹÂáC˙`ŮňWńř˘Et±ş(777ü÷˙žéöŮ𚺉?ţ’„¨¨(Ě»w.&ڇ’’¬^łł$ţűĎŕţů`ßŢ=˝ő<ňđC1k>řK–ľ<űß˙bLlL“׼Ł~&‘Γ››‹¬Ś#PHé<’žG,äBŔ2áĐÁt †ŕpXy(fŚ é2A^Ř”ňŇÓ㳀úQ“6»žŮ)ňőőĹ+V\×öŻÝWSűnîxšZ>aâD[WęzőŐč}||Z<®”Ť›Z=vOOOś8yŞÝűôđđŔâ%K±xÉŇvť¦–ÇŽ…]{öÚ-»ďţů¶Â–¤ç´Ď©Ó¦aę´i7Ô>›»¦Z;ŽćŽËßßżŐBĄBˇ//[†——-kS›nËńµ÷3©#> IçËËËCćŃ ŠŔ¨uŇŁH„<`ÂÁ´4ÜC!f@ż0úŰçdź—.^@ii©Cőp .ďőW—cÖ]w˘aě–9|0bäH—Ř'!„B!×Ë`0`ĆÔřëšÁŤ4öţęUđbUQ/»kP¸Ľ›BB“ťŤ˙˝řÎŔ`0ŕčŃ#xëÍ7 ‰°č©§]bź„B!„\Ż_7¬‡ľşÉIżP0H‡Ł„qyóX€?úó‘ÆâůgžĹŕ›oƆ_Cč•i)ť}ź„B!„\łŮŚożů+ß{ß}ű­­sK¦Ď=ó ˘o‰A`ÂŘŰđćëŻŁŞŞ &“ Łc˘‘™‘)“&bpÄ@ë˛Ř9rwĚś›##?9ééi8tđ î9QaÖ]w"÷ôi»}ť;{Ź>üFŠáQC°`ţýŤÖ!ŽŤj8!nňÄMžâňű$„Bqf:C ˛‹ĘńÍo¶iýÜÜ\¬Ył1ýĺČ.*‡Vo¦ ^‡żţřˇˇˇ1b$ÂűŔďżý†ą÷ŢŰâk–<˙<¦Ď対 ‘‡JJJđĹçźaĹoŕťwß…Á`Ŕ÷ß%â«oľ…ÜĎ<U••řô㏱juzâ«/żŔË/ľ >}°*a z÷ľüożµßýđŁm_/.]Š;îş }ň)j-üöŰoxuů2$­ß@'ĎIPB!„BČ ;–†í©ż#¦żĽÝ۰ÔÖA«7#<<śÚÉ, ľübyô?€˙,| _ů%jjjZ|]Ňú ¸ţđňö—ËEź>}°č©§±oß^°X,TUUaöÜąT(ŔăńXëD<ůôÓë×Bˇ<°—.]Â˙=ó,ÂÂÂlËNť˙’6üJ™“ „!„&ťř÷_Ěžu7ŕëÄDÄÄÄRPC‹‰‰A-‹‹÷>ů†‚q ±k÷X"ä5ZGŁ7µXp/"Č KtĆćÇwÍ(5 x7Pi đö•âÁą3)¤U_|ţ9VĽýîž5«ŃsSS±îóĎ›M8ěß˙7Ţ]˝BˇĐ¶l÷®]ť~Ě EŚF#Îśˇˇ=-É.*ÇŇ'FLôJ8Bq\SS ‰›RS6áđéÇ㣵ŇŘb___řôî1Uę9lD7pŘ,T-0-M®çăÉG˙Ţ’V·'«ó†<Č}} “z7zţBŢqŰöšc®cC©ąÔěóÁrnv‰‹*–Ú:č 5°ÔÖŃ…ÜNł5ŕ@ˇ˘`ŘżeeeyűíM>?u*>ţh-ěߏQŁG7zľoß›đןâλîByy9öîŮí۶Âß߿Ç;Ěź7S¦ĆăŽ;î„—‹ä¤$ <Ř!ăÚ;0…L ŚÍ|w­Ţ ˙€ŢđőőĄ„!„Çd±X°yÓ&ÄŽ‹…Ű·ăŐ×ß°U›v${÷ěˇF\>© â»ÁŤĂ†Xȇ͂‡ŔľÇ‚ľŽ®PŚ`E@čAÖë«48°§ő_§L…¨ˇĂš}~}R©ÝcŁŃµJeźPP`é’çí–ťc.@Ż·özŘ»c+L}‹‰‹*˙–ÓÉďˇ\d6nÜéÓ§S@:ÁşĎ>Ă#Źţ\.·éĎ˙yl!Ö}öY“ ‡·V®Ä›Żż†Uﮄ@ ŔŰĆbÍű`ýúdÜ1ŁcĎŮ‹/˝„·VĽ‰„U«ŕćć†aÇă­wV:d\gĎ˝Ż®ú¬Ĺ¤kOD B!Ť:xjµq“'ĹbcŰÖ­Ř·o/&MŠk´î±¬,¬~ď=ś<™źŹŃŁGcé /â®;n‡Z­Ćńě[˘B«Őâăµb×ÎťP©TđööÁ¸ńăđĎ< oëÍ‘^ŹaCnFPPľřúĽ˝âMdfd€ÇăaÄČ‘xýÍH$Č8z÷Ď»:Wř€~Öiµ>‚ľ˙)ý… ĹĹ`±X茹sďŬٳéä§ŕ#„B*ju˝ń1ĂŰt/$ŁÁ€^r©í±\&˙Ęëëý2Ý}˝m˙V^č•R P«T0ŤŤÖ—ÜqĎĚáČÉ-@µŢ€sL‰Ýór‰fKm‹˝:óâqŮPHEHMMĄ„C'ůáçź[]çîYłlĂ-®íA8pŕ@ü’ĽľŃk>ţ>ţD“ŰkŞb[–EDFâç¤d:i”p „âJRSR  1nü°X,E"lJMm”p(Ž. ©‡¦C«ŐŇ…Ez,ęá@!ÄΦŤ©H$5ęęŤL |}}±iăFÜqç]¶ĺ%%Ö±Ő~W’őúő o´ÝłgĎ‚‚ě»v÷ ¶ŰVCľŇ«ăÎ9kAË–<üČŁxM=ľ1±±;vĆOśhKXו››‹¬Ś#PHE`Ôşn?žćf‡ĘdT(Ô”Lh™\™\ŽD°ö)..FQŃy3 BCĂđČŁđOVţN?Ú¨„«ŢŕşÁ‚b†A BŃ!IµZ é•Ďaš čúÔע8–†Ţ-{"¤#)¤"deDÄCxx8%!„8ŽŠŠ ě˙űoÍfÜŃčůC˘´´Ô6Ő’ˇÚÚeYpÍ “‡Gă_őzkeúCŁšţb{Mµű†I†ëńŘăŹ#¨O|˙]"ěߏý˙Ť·VĽ‰ÉSâńĆŠMq yyyČ<úŹC$äRÄO…ÂS™0›Lč°~ýF7ť€/ $4!ˇˇv˧OŤé“FCĄ.Ă?Y9زë´ }…(.Ő»TŃI™DV56$'áŮĹKÚ˝ťň˛2lßľ [6oƑÇ‘s:—.0Bś,áyôô’QÂBcٶu Ěf36mŮŠ›BBěž+,,D|Ü$lݲ÷Ý?€µ ¤Á`€ÉdźĎo”\hH$A«Ő"ăŘq[mÎ2%>SâăˇR©°cűv|ý՗زyŘlVŻyźN4é4#˘"0mâhD„[ŰÓżŹK×_pš›q©Ź-ůđ므)̇ÜËĘŠj—K<´GEEvîŘ-›7áđ?˙ ¦¦, ¦‹‡6ÚśTC.@©1P@(á@!äZSRĐ(ŮÁÁÁčM7Ú~~~8ţ"‘¨ŰŹMg¨AvQ9ž{îą6­Ď0 äŐcF„PÂBÓIMM›ÍFtLLłëÜ777[†AăĂŹ>†ŔÝ÷ΙŤ¸‰“ťĎżřŇúG†}őĎ źĎÇ÷?ţ„űćĎÇ{+ßÁˇQxđůđ•Jńń§źaćí··ë¸_zĺô\€řÉqxô±ÇđĘňĺP^ľŚ©S¦ŕ¶ŃŁpřĐ?X¶üU<ľhťhrĂÉą—;ŔÓSŚYsć :6–ă䉇¸)ńŕó­Ó•†ú‹]>é0zĚ”••âőW—cÜŃXöĘËŘł{7 †wž“ťŤ#é#"Č«Ý۰ÔÖA«7·ąŘť^ŻG^^$B8]Ô„8JB¤lÜÔę:žžž8qŇľ÷„‰1aâD»eŐWfŻđń±ďbîááĹK–bń’Ą-î§ąéךZ;jvíŮk·ěľűçŰęLŇQä-Ů •É0{Î\*é"""#!—Ëń×€ĹaĂP\éŇď÷‹Żľ¶ŠÜľ[¶lĆźż˙Ž_ׯ‡»»;˘cb0nüŚ7Î6%f[i5”—Ş!ňč˘"„PÂBČŤyýŐĺ8qâ>řp­]ŃČ#‡FŚIA".Ăf!XnťRŐÓSLÉ$“Ë1Á€xű~¬OŮîŇď×ËË łfĎƬٳQ^V†;¶cËćÍŘ·w/vďÚ6›ŤA#iýş8!íFC*!„´ŰM!!ČÉÎĆ˙^|g `0pôčĽőćŠDXôÔÓ$âüĽÜm5&OŤ§d‹â ŕ ¸gć$Č{PPoĚž3ß~÷=öHĂň×^Çđ#pâßé˘h§úZłćĚ\&§€‹z8Bi·ů,€źź~ţé'Ěżo*++áíí‘·ŚÄ‹žlrzMB:Kż~ý5lR¶ííđm‹Ýą¬C)hŞËž!"ü&(ŐeNsĽ˝ ˛:oŚŤ~CŰńőőĹ˝óćáŢyó V«éBh§úZôyAşZvQ9–>ů0b˘GPÂBó‹›<q“§P H· ‡FgÂ'?męđmsŘÖbtr9ýRŮädgCŔ2C.@©18Ĺ1kőfŔksS“É„¤_~Á¶­[PźťNˇPľ7Ý„¸É“1ďľűŻ»†!Ä1> üzĂ×××!އ†TB!„´‘X,ˇ ô™GˇQ_„ŘE‹ꪪpß˝sńÎ[+™‘ŤFššhµZ?v «Ţ}óćÎAee×Ďä°Y ąČËËŁ ‘8•g/AEťµŽ‚Ń%!„Bą‚aŠ V©zcŤKľÇĎ>űůgÎŕ™çžCęć-8šu p$3 żýů>ţÎŕăŹÖvů±‰n ňFBB]Ś„¸RA!„rŚk{*éšçvű–-¶Ç|®kţ6·cŰ6¬^ó~Ł)Ť=<<0pŕ@ 8ý ŔŞ÷ŢĹ˙^z™. BH»QB!„6Ş©©Áúä$¬ON‚J©¤€¸ŁÁ€őÉIĐjµ¶eBžkţ6wńâEŚ=şĹubGŤrŠk<<<ëÖ­Cúi%´z3]Ȥ[±X, Â5(á@!„Ň浨U*¨U*¬ONBA~>Ĺ0L~üţ;ŰP ©LćŇď—Çă!˙Ě™×IOK·÷őM ‹¸w!ý4%ă@.`ëÖmô9ѨÔĺ„kPÂB!.ˇ´´—.^€XČí´}(A›0ŤHůól۲ŮîWqâ\¦Ąá×ädŰ9ĐĐ0§{.n° aZ]7rĐ Ľ¸t ˛Oś°[®×ëqęÔI|öé'řß K1vÜ8ş@n€ż·§˛˙EfĆQ F ,-w©tĹB..]Ľ€ŇŇRJ8B!„t”ôôtlJů‘AŢťşźČHĚš3|>p2'_±ŽNŽĎçcćwbrüT§<~™DV56$'µşîĂŹ<Šüü|<ýä"»ĺÆ܌»nżk?řţţřżgźĄ ă¨´Öz/Ĺ ¬Ě «žgĄ S„_S¶;ÄńDycSĘHOOw㡢‘„B!×IˇÂý ÄÁ´8™“cK<äçç#&6QC‡QśDtl, Fbbb{L!Đ1·Ý†·W®Ä‰=¸\.d2zcěřq{ďO>ý_ VřČd˛f ćńřvŹo EPź „„†µ;y1hň‹.ăř#.Óćz-X,¨Ő*:x’“ńö»ď^×vµ ĘKŐiFsJĘô¸\Q řwŐg肱1Ă16f8Çé 50™­˝g4:Rőj „!„Bc‰:Ě6á¸Áă•_Îř×Ü@_+';••ZëM8_€@…©cb4 R©`4Z˙o¨6@ĄRB«Ńئť5gNłĂYär9nŤŽA`˘ĂĆÄ‹Ĺbđ…Ů=ż3q8řůőÂíwÜ>ÁÁxëÍ7°îËŻ¨áv°†żçś.@ÎélHŮQ:q4T/ ŕĘĐ«°0ęőÔMôŐÎĘĆŢôŁČ9]ąDŮB‰J8B!„8>…" E˘ ŔÚeW©T¶úş†UŃŻ˝IK$Ömuk%|•RiK ´Ô;á`Zl}®xµJŐl2A&—Óp‰NŠĚŚ DQŞË°iÇ~lÚ±CCý p«CA~>öîŮ ©L†°~ý¨púŁŁQ)•(fäçźA1ĂŕÁGĂń“gp$+‡ł˛íĎ…¤„!„Bz®~ýú!jؤlŰë”ÇßÖgOO1*+µŤ–kµZ[Ź€şşşë“~±Ö!•É h")pĎśąÍn#';™GhĽVK˝î‚&ßźX"†L.‡——¤2$/—ąNý˝ÝˇÔ:|Ü·ŃląŽŤ`EŔ mÇb±ŕŇĄKř櫯h†ŠnR¦ŐA"äA$°ŢŞŐ×}©¨Pt{RŃY3 Š‹(/_Ă0v5ŕĹ×Wˇ¬ŇčÔď‘Që0sňXôë׏„B!%<<ť źü´©Ă·-â»ÁX]­VŰíÝ›]¸Đ–`Đh* ľ2 Á`4@uĄ‡ź}•ď[K4E«Ń´ëu …„†ÇăC,C"ńrů®ăý˝ŕăÁÎXÓd·ě'šŤOľ]ß®m+5H{ 1{î˝­®ŰÖY*&ĹM¦–nP¨¬¸řxňá-âA"âŮÝ4_şx ‡ …Đťfąh‹–ŠËÖ ˝ zóŁbÔ: ~ ÂĂĂ)á@!„â ‚ĺP 焢ccâÄb1Äbq»ęDDjňuJĄFcŰş )p+˙Â*–H –\M´Ô;A,#"2˛G\Cý˝ŕç-‹ĂĹRÖčy…T%“Ź'šŤ’˙č¶qâR©ŁĆŚÁâ%K»~˙lD7äĺĺ9Ě/´ÝĹ`¶ ¤LŹ’2˝µ˝ąy»s°ŕéeč«@pPú(zŰţ-t`}Ň/`±XŕóůűůA,–@,·8ÄÉ3Ś­ţËŔČFIË¢—ŕL^^Ó c J+Ť4Ó%!„BHGé›üú¤u"ˇ;†GE"&:{‡îŘąf× ňCd7ň/j;mĚxsłT8D¬n ňFBBÖ­[GOZ˝ąQ"ęS‚sL €Łv×Ű …'ŘWf -ČĎo´­úš/- ›r S„ ĹĹĐTT@«ŐÂ`04Ů»Š)Q\rň  R—C©ľšŘp9IĐčMTô‘„B!„tď NeŮeČ%‚ľé Ýńę’…č«čݦőŠ ÄM‰®íÎBuGŹbŘpš®ŃŮčôŐ¸Pʆď›e7Ă–ĽĐjQUĄĂ†”HĄŢűzŰÖiXë…ÇçCŢLŤ™–’őÉĺĺ˶B˛×2 ;~|“ Ě¢ž+BÖ‘­ľďGŽQëšŢ‡ŮŇěs„„B!„t™â"ÚŇK‰o,áŕ-áĎţ§ÍɆz¶(Ýśt¸Ţ˝Ý°ÖnY·nÝv']¸ oôŻąąp9ŕqŮpă\MDŔ“Ű›ÝưP9řWîMF#ЦÉő6¤ě°ý[&őF°"z˝ĘŇ2äź>‰Ę˛Ë­obr ,WnM ‹J ÓWŰž ą ň†Fgkja4[`©­C•Á “ął…N:%+©ź3ŰZ[Cq®:~ĎĄżô^ůă^\ĚP0śźÇ‡L.źĎ§©úéÁúzˇ·Ü«Q˛ˇ˛ě2\N«7HőI‡ 3ç´éď=¶´đÎ[+pţüyÔÔ´\$oöÝwcě¸qXôÔS­n7:6bi/Ľ¶ęsşŚÁl±]smť…QiÁçr®ţ}ărŔwcŰ{yyˇ—źu¨CźŔôU@.óÁ·I)ŘĽs?@.@&nţ;g}ň@Ą)m¶Mhőf¤źVŇI¤„é Nćä 'űDłNâ<¤2"#!jŘ0 †a"śĚÎF~~~Łéšs‹Ĺ ĂĐáĂ]ľú>!Ä>Ůŕç-ÂÝłîiôśF}CC|mŹ«ęÜQëMݵ3LDDF""2ľň^íž˝˘)Ë—˝‚’ Ú´î‰˙âĉŰ”p ®ĄaĎšđôQřCć뾊Dôiň5{ÓŹÚ’ őŰčÎaA=ň»‡‹K/ ´4ľľľ”p Ρ ?{wď˛ÍďMśźZĄÂŢ=»‘™qѱŁ00"‚‚ŇÍ´Z-¶mŮL =;§Y™ČĘĚ@ÔĐaމˇŢEť(557nDL9ýFş=Ů0çŢyMörš5gŽÝcąLŢęçÂŘŘ€Żü­Ů_RĽXUxő*<»xIË7’—/ăűÂĐaĂŔápš]o@ż0‡RA:^°"#˘"ěz-´EaQ >ů&™ŘÍ"Ľ±)ĺ°jÍ1c%ă۶e3NćäŘłĽd`„-W€-§ęÔΦVY„Z%Ú’3¨«PŮnr™˘ó?•ÔM ňó±uËć«=¸|°{‡‚Ó»XŢr°„ôë¸3©«P˘¶B‰Úâ3¨-±VĎĘĚĂaJüTjA‹j8EsíĽ˝3{ ެß™˝ÂËŰGĆO?ţ€Ö~d[ţŐ_ŕ«/ż€ĹbÁÔiÓáîîN'µR•–cÚÄ1 Űž WŞËńÚjJC(á@®Ń`@Ę_^ýµU(†[d,8Á‘'Ć–YE‘±¨˝sÖ.@ŻĹÉś(•JĚž3—~íb9ŮŮŘ~Ą0pĆ€ÓoX<:Ίĺ%ÇKNp$ęôZÔdîBmI>Ô*Ö''aöśą”t Ä“ ×3Ĺő‹Ĺ6{ĹăO<oĽ/ď«3lß¶ «WŮŻONÂâĄKéÄö@:}56ďÜŹ{fNjăú¬ú$Ń®Đ#!¶{ iζ­[lÉ–4Ľ¸”lpµ€ŢˇŕĹ-; €uEĘ_R`ş+ŮŔĺ·n‘±”lpĄäP î¨;á62€µŇ÷úä$¨”Ôĺß™¨´xúř!0HAÁ Ťx‹Eťšl¨‰¸)ńőC.i˙߉_×oŔč1cđKňŐşß%&‚Ăá`Ý—_áhfůĎň×_tr{¨ő)ۡT—·°Fťí_ź~“ŚB¦„‚F(á@ÚnĎî](Č·vfG€7ţ^şrŐ›!žÜQw‚l­áPĚ0Řł{¦+n`”JěÝłűj˛aň`yŃŻŢ®Š něť¶¤ĂÖ-›a4P!-gˇÔ öíŐîîđÄuőôÂĐPY§'®M:ŚÜŻÝŰ(bŠđĆŠ·lý{¤R!+31±±sŰmyxŕń'áBqq—ÇÓb©FgBXXX›Ö/--ĹĆŤˇŠ ŕrč‚ě@żoÚŮŇ7HÖ)1˦`J8¶Ójµ8–™iý(ń’ÁmČx JŔ9,i ŕXf&ýúÚöîŮm­ŮŔĺ7n.Őič t{‡Úz:¨U*dfdPPqňdCsłQtvŇaÁ‚ůxňˇŮíz= €§‡‡íńÎ;PWW‡±cÇŮ–UVV‚Ĺb]×v¦góNC!µű˝éŚ5Ča*°xńâ6­ŻV«‘šš …T—nm:‚HčŽ'šŤÇ´|]ďM?Šő)Ű)`„ä:o‚üşÍuőlčII‡Qw\ľíftž‚ü|Ű%NŘ0ęŮĐp‚#mÉ˝CÓiöBPÄ AöAˇ˛Şů›2ľ|Ď7Ą¦"÷ôiäśo_˛cîĽy¨¨¨ŔŹ?|“É„1·ŤĹ+VŘžżůć!=f4î™3‡N˛‹“K}°`ö Śzµ0|aQ ×§ çtőzŰąS'ކH(°&V}FÉB Ň>E独_†˝dÔ»ˇ§Ţń`yÉPWˇÂ™Ľso”ŻŻ/üz Ż ‚AşÔ'ß$†Föëô¶\źlř·°Ě®†ŃląŽŤ`E@ëź?l6=ő=őT“Ď“H'Őʼn„î:qfĎŚł-ÓWôö¤µ[W§ŻFbŇ_xňá9HLú‹f¤ppŚZ‡™“Ǣ_ż~q<”p v´ZŤő¦“n‚zöŤP@,*¨®ü O:¸ťi4¶Sí×U§×˘N§ia…«SŠĺźÉ\î×ěŞ ‡7»yá:ë¸YoĆkuđµč\:ć111pxâ‘g—ÓHşÜ×?üЬ_ÄM‰GDdçL#^^ˇÁ‰śÓ8Í”5îˇÔ í%Äěą÷ŇÉ -3łgN‚¬AýŹM;`CĘöf{.ěM? ˝Ţ@3R8IÂačđ[N âxę»ył¸T(’X§î#Ď`4\I6Č(®Ü~¶%ć¶µˇ}{ö´y»ëEíďuä^kBxŤłő™.ź|čh /\8sÓDŽĄş+¤‰ĎvłůµŔÖ-Ök¦“újŢůđśc¨ľiźľŠ,;á!¶e'sĎ"1é/śkCŻJ6J8B:ŰŰ C§±ýZíƧ`¸2łă%ěŞŮ<ă"×MŽŮúLÄĎŃy"¤)5Ö„rKI•RŮh&(©L†qă'4»Ýň Í•dëwe ą ňĆÂ… ±nÝ:ş¨:€HčŽY3&aú¤ŃWŻŐŇr|—”‚ĂY”D ”p „t5.Źb@H™={6»?ŮP]ŤÜÜ\ěŢ˝Őlľó¸Ől&TçŇI"äZ­Ćę*řn-ÎTŃRŇÁ'#«É„Ă®Gp8ë´Ý2Ł9źţĽľZŐhýM©©8‘sšz6v™6qî™g›yB_mŔ¦ű±iç~*üH(á@!„8»ŔŔ@‡G9dČÄÄÄ 11ĹĹĹHDb©†W¸ýÂZ]çTŢ™N?ŽGz_›čÔ±Ě9qęâË=ĂT\÷ë•6î˙}ÂŽ6š˝‚ăĆŁn[›«/yš)Ł Ľ …Ď=÷~ft† ČýC°`Î ôUô¶-;’•Ää”vMĄJ%!„Ҧ/ç>ř P] $ŠnÁóÚÝŃ …ÖÔĎxE®Î^=4|AËő±äöěŢeZŃÜl}“;hjŔAbrJ—íS("<<Z˝™.8´mšKBş›B@!„ôĽ¤ĂŚ3y\?čY\ JˇR©Ń?………vË 9 ?JKKńçżăŽ™35x¦ÄMÂźü0™L#GŰžżçî»{úęđ€›##P\Ě`@ż0ĽóÖ ŰňsgĎâчÂČaC1ű‡ó”וlKđ@56$'µşnţ™3HX˝ ÇŤĹüűća}r***¨8©‘Q‘XµüĚnP«aÓÎXôÂŰŘ´c?¨‡‰é/Ç×ë>Fjj*%!„Ň=Ö•(ćzS@zI“'cďŢĆ ‡é3fŕ»ÄońâK/!vÔ(…BŚ9Ď-^‚¤_~ ü÷™gĐ/<Bˇ<°§Nžluźů9s&ÜÝÝ!ňđŔ  iý†sĄĆ€ü‹Zlż2{EłÉ K-^{ďłvÍFÁçrŕƲ aZ]7íĐ?XóÁ‡¸kÖ,\(.ƫ˖aLl žZô¶nŮ#M‹ÝíDBw+Z\§Ż"Ż-yKž\™Ô€ušËĄŻżŹÄ¤ż¨($qTĂBéáô !®˘ĄÂ‘őő&ĹĹ!aŐ{¨ŞŞ‚‡‡t:˛Oü‹Ź>ţoŻXč»×Ýrë­x?aµíqDÄŐ±á//TVV¶z\ŁÇŚÁ»+Wâć!C ˙wnęgŻőĂb©krćBI—ĚFáĺĺ…ř©S?u*kBhz|<Ž9‚];wB$aRÜdĚ9·Ü ‡CŤ«‹-zhdľ^XúĆM&#hšKB B!„ˇ3Ö@ćë ±DâĐÇŮ–˘‘čßěߏ)ńńHOOCLl,DĐëő9˘ŃkxĽ«Ó%»»»ŰţÍb±Út\oŻ|ß~ý5ľüb–˝üFŤŤW–żŠ   —tĐęÍ0-Ťž+)ÓCu%)ŃŐBBB§ĄăŔţýŘšŠm[·ŕĎ?~‡T&ĂÔiÓ0}ú <> şŔsoÇȨŔŘáŘ›~ÔöÜŘáxpîLŰРز¦ą$”p „BqV…Ę*ڏ%‘‘.ń~â&OĆŢ={0%>űöîĹ´éÓžžŘą{<<<:tîîîXôÔSXôÔS`"Tj+±ôůç´á×w-5•l¬µ,µ–n=6ʇń&`ü„ ĐëőŘł{¶oۆß6lŔ÷‰‰]2 ŠĹRŤÎ„áC"Ú´~ii)<…T•ĆĐl|ťĹŘá6q”íń‚ą3qäX‚hšKB B!„G â»Á`¶ŔR[GÁh¤ɓ±öĂpűw`Ď®]Xţęk€A!3#cn»­Óö­PÁh4âĚ™3t"‹Ĺ‹Ĺ›Ýö’o S„ły§ˇŠŔ¨uíÚŻÎX¦I?.nÓújµ©©©PHEĐčMNťpVŕɇçŘ-óşcĹ‹O"0Ŕ϶LYZŽOżM¦™'%!„BşĂÍ}}Pc©ENQE§L+čěúôé›BB°zŐ{;nśmČÄ Ä›Żż†·ŢY‰AC«Ńŕçź~ÂéÓ§ńů_´iŰ~~~ČÉÎĆM!!¶áóçÍĂ”©ń¸ăŽ;áĆĺ"9)ɡ»ç)ŕ™ë‡ü‹=+)b±Xp0=©©)Řącô:<==7y2¦_™J·%ĹE ňsOŢP¡§’I}đę’Ç,©`˛TźlĐW°>e;Ío˝łŇaă¨PAěŰ J͉qÝś?pۨX”––‚Ďçă¶±c1}Ć Ü6vś] ŇńDBw,Yô<ęë2Ô¸¦>J…¦Ď,[Eu%!„B(éĐő®w|}}M…kMť6 S§Mkó>®];jö8`·,"2?'%Óé *++qčĐA¤8€ű ¸‡ĂA˙0}Ć Lś×áuFłPJ8B!„t9___řő Ŕů"§G&i‹[F˘¦ĆÚ†DEáÁ‡”ř©đőőĄŕt±†3R´fÁś”p ­ŇčLčě0í™M§„B!.qé·ß•öęÔ‚ ,p5é âÓo-Ä51j*ę<đěâ%­®Ű÷¦›đĚsĎaçî=ř%y=î»>%şÁµ3R´F.óÁě™q8Ң¦Óoż 111q<ôW—B!.«ţ ×wß}wC="^¸pć8¦‰K%N-eă& B7kjFŠ–(ŐĺP—–ŁŠj8'C B!„4éŇĄKHHHpŞcÖVę —ě–uTŇŇůD|7űy !!Ď?˙ĽkľGˇű53RŘ;™{ĘŇ2ś+*Áy¦…L ‹$N‹„BiRee%*++ťî¸ů\NŁe”t ×C«ŐÂX]ߍ®‘.Ćá° ň——çTÇ,÷€HĐú­UˇÚW/„‡ĐÝÖk!'·…E%¨1VÁ¨«°[_g¨[›KmťSźŰ ŻV×ŃjP¨¬ş®8kőfTĚ(Ż2Q˘„!„Bś‰»»;ťňŘŁ˘˘(é@Ú%çÄ ¨‹ ,÷@SárﯵéS›s˝3ˇt©TŠéÓ§ăłÄd̵]şďľŠ żRđQU”“A×ękfÍÂâËÉ…Ů|őł§O?´ęK¨¬ÓŰ­/Z§$UH-8sQ ­ŢěT×Ú¨+ňŽ·şľĚÇ#nŤnöů¦â\#ŤŢ„‚‹•0-ô!F B!„8ŔŔ@,^ĽŘeŢ%éÄĎ‹ n*íŹ}é]¶O___Ě1/­ţ¦Ëö)şcÉ“ b[¶g7*Ą˛Ő×Nź8şŮçr˛ł‘“}Ân™VŁEeĄ|.‘AŢ8}A˛JŁĂ_ }XňäȤ>¶eë“J[O8Čĺ7~BłĎ7gµJŁŃ‰‡ÁÁŢô™N B:—ÍÂ’‘ ¬>ĚŔÔŤ]ÍEŕÓ¬’Ć”¸lÜ&Ĺ-ţbüwW>ť0Bm­Úµ3BIBÚ®ąž ú…µř\kŠ hőfüň·ËĆÎZa!ú*zŰ-oé&ą­""#ŮhyA~>¶mŮ ŁŃĐ^žČĐ™zxE_E^]ň8DBw»ĺłçŢ{ĂŰn.ÎÓŇpč`:Ü8l„ř{âßÂrj肦Ĺ$N/:@ ‡ŤQ’n=™;ŻŃ˛[ÄX<"ç5F:Q„ÚZ'µ5jg¤˝Iš2“¸ąDTcCrŁ“(¤"Ü9ĺ¶FɆΊ{ć̵}^őőótČř¸Č%Ě»;ľQ˛ˇÓżŁÄĆběřń>žű÷­ż_Żű©©©”p ä†/`0ľŹ7~Čą„±A^pc±š\ďfąËbú a\ţoX ž|„ú‹‘¶g;ŚC—ď?jč0He˛+1˘ĎrGAg‚8µ[üŸ¤3ˇ Â€ U&Ü FÚ57Cäâ‹Ä— Ô›!ááAţpcłPß-R*ÂŚ)~:yg+Ş$`~¤Ę 58S^ KŕîĆĆ´_|—} ĄŐfL öĆÝýdř(óžŰS€µBuĺţúß‹t’µµ+míFŰ€&۵3rŁIŔ~xEFA©ÓW'¤˝Ş««áîn˙ë´N§›Ýł§äó®Ţä‹Ĺân9ąźNćäŔCŔuČqج+±â/tOŚär¨U*‡MĘôDÔĂ8-€IÁŢŘyŢ:Fkga&öńűš^Çyá÷<J#Ś–:d^®ÂßL…]ď†qWÖ9]¦‡©¶ůŐH9SŠţ°b IDATŃ şŽsŮ,l*(ĹEť ¦Ú:ěe*čɧA¨­µ±­Q;#ŽjČ!đőő`íé hbZMť±<b‰„F\–ÄË ÇŹk´üXVĽ˝˝{tlö~ę®›éú_ďťX"îľ}‹­źŃŽš”ˇ„!Nd¨ź'tćZä—W * Đšj0ěšqm2w. 5öÝşŽ«ěç÷ ôä#·Ě~˘Ľr=úí˙ iŻŽ×›káîFMP[kk[ŁvF‘^ŻGBBJK­ŐÓó/j›,Y¨¬‚LÚdA7B\Ĺđáñěĺ—±g÷nh4TVVbßŢ˝xuů2 >ÜaŰp^^ÄB®ívW%—É1kÎdQAÄćD „áŃŁ)F„†T§××›ĎÚOŻł˝° ·‡JqäRĄm™ËˇĆ~^ćJ“ýüĽ|+o»©Ń>j®éRk¦.¶„ÚZ»Űµ3â¨É†ââb[˛A©1P`HŹőÔÓ˙Ĺ˝sfcŃă í–są\|¸öŁV_ŻŐjQ^ކXČ…Von×1hőf¤źVâÄľ?Ú´>Ă0Xłf "Ľ‘]TŢîý:ľ@`› „4M,ĂG*ŁQÂ3X&‚ż‡Gů7ůüÍ2Ž«t¶›‡ŁĺęMŚÇ5Ýe 5µx-­K-—NjkÔÎ%ł RŔ3×ůĎô¨÷ÝŔüřó/řŕý5ČĚĚD­Ĺ‚ČAđĎ>ۦŢ=9'NŕČÁtDy#ý´’.$B%sŠ öÁϧ”8T˘môÜ0?OÄőő±Ý©ô&KČ-«¶­3´—}WđóZúz pŞTOÁ%¤“Úµ3BÉâ,Š }{A©9ŃăŢ{Dd$ľüúş!”p =S!ŘyľÍÝgÔŘqľ“űú\ą ҨÓQ˝áîĆĆYM5ľ=q/ÝÚÇöšSĄz¤”âžţ2Č…<,µČ-ÓcCnŰ»ţ|R‰Çn‡ Ľô÷9ŔÚ ˇvë4||íš„¸z[ëvÖT[ŁvÖąŠ‹‹‘ŕ´ ‡·ß~›’ „B%i›3Š[]çP‰ÖÖ<Đ“Ź%Z¤]¸ú+­\ČE™ÁľLÖĺ*d]®jv›Mݸ4\vşLŹeεúBzr[»ŃvÖT[ŁvFšS?óDG%X,µq¶óͨuđôőĂëKžhuÝýÂÚµŹSygzěő Ń› «óĆŘáÔ8ZŃ@ź›9‚ŃŤŇO+ńőűo`äPÇ8”p .ďöP)Δ뱿X“Ą~"f…˰»¨‚‚Cµ5Ň‚°°0,^ĽŘ©Žůpć ĽöÖ»PHEšlPHE(9w_qŹ<¶.Ž@UZFA 6Z˝đŰ}Ç ŐâdN6RµÎ!c$öíŐ­Ó3LÎćťvŘőD”p ./1ű"î“á•č>pcł[¦Ç¦‚RTP7ZB¨­W×QĂ(Śf jÍ&hÍ&0LŠ ®+_7ŐÎĘqŮ÷çČ=D|7űy !!Ď?˙<]Ś h48–F7Ó-(.bź{’bD BşŽÎ\‹źN^¦@BmŤô@Ułˇ´Ňľ~uŕ°YHůóOĚ_đ Äb1ŘŨ”JěÝłf¶:}5¤p8,H„<§-VK±Ç¦B!ÄUuTHKmN[‡™ŚFlHú*Ą’ěBr˛ł±>9 Ĺ gOAŔĺPPś€T*ĹôéÓÁ¨u0™k) „8J8B!ÄĄuÔlZ˝ůW¦‰ŐjµXźś„“99`•‘í[·Ŕd4 •U0-ë¤ŔMýúwiWv___Ě1ŚZGçŚDC*!„âR, ćríč©/ë·ę/†ÉhĶ-›‘“}cÇŤ‡L.§ŕ;†)Âö-[ Őjm×ι˕4]j;)AĐęÍř1ĺo !%!„â"ŇÓÓ±műNřŠůPj ĐęMťră¨Ô 3Ô Ô_ ‘Ŕ Ĺ ‘nPť ĂáPz:ŠƶĚ`¶ ·Xť±Ć)ß“XČ…€e¡é¸5:†N2!=TD6ĄüC%bbş˙ł€†TB!Ä%”––âŇĹ đp;ĽgõtĆ/,ŁÖˇ¬ŇŻ“7ăH ŻpŰ6o±%,µu`Ô:d”:m˛$B0á`ZťŕN ŠŕĹŞÂű«WQ0ZŃ…3DZ>é F7\şxĄĄĄq<ÔĂB!¤ť®ŽU× çtäRÜ3sFFEBč.`ťů€†[8ët—ŮČ/)‡TÄFI™%ez»a8„B:%!„B:R]†OľIF˘0#Ł"pKTţŢąb±!ˇaP)FędĹ ±Db›şôHVŽËĆá¬čôŐŕ°Y((Ń@!ťŚ„B!L§ŻĆž´ŁČ=uÁrhµZdef +3<> … E ęýĐTJ%ŠůůglC%nę7J«“{Ju™Ýú”h ÎH.“cÖś9xmŐçŚfD „pśBÁ „!„Bk»\QŤK-|<řxŕ°Y0Ť(ČĎGA~>€ÇçcJüT„„†RŔÚŔh0 ¸JĄSTdWř±ˇ˙=Ž Ë)`NF«7#ý´'öýѦősss±fÍÄô—#»¨Z˝ŮecĂl3¦‰ĹbřHe#J8B!„¸>Km”­€ĄŹ'bw.$BDë×0“ѲNÂha#8(ŔVűˇá ¶JĄB BA`4™ňçźM>§3Ô@Ł7ˇ¬ĘH7Ýŕ`ZLGL9ŇO+) „J8B!„t•˛J#Ę*Ť›‰±;é§÷ăŹműr©‚¶˙Ôś>y€µ7„\.‡X,†ÄË b±µNźĎwʡ*ĄFŁZ­Z­ĘË—a4!‹19~* °¨ŞŇr2%¶˙úúpŔał Ń™ ­6Cg¬Fg˘ˇ„B B!„b©­łK@ÔSŞË T—ápV6 Ô_ ąÄÚëÁd46;„`Öś9P(‚š|N«ŐZ )^)˘XO&“/Üđ{ŃjµĐj4`W¬±)?|—µJŐâöĚulě:ś‹sLIÓűÓp`0[č""„J8B!„ö*Vë ÔTĂCŔ‡Í‚Řť 7Ű6$Ł^br ,pÜ×2_@°""ˇ;ňsO˘ ďT«űj)iÁ0Eř59ąŐmÜčŘXśĚ=k[¦,-Jm­§ĐR˛Ag¨AŤĄÚjs)GŁd!„PÂB!¤ËÄÄÄ –ĹĹ{ź|ăRďË`¶Ŕ`¶4Y“€ĂfŮ: –Ú:ä4± …T…TÔęľę“Má m8Ţß7íÄšo˙löůP1Śf ,µu¨2m‰qcTţź˝űkňÜß~‡A ŔEp1,`뮀u᪭Ú:zÚcÇé:ť®ÓĎiű«ť§­vOkŐNŃö´śµn‚Š[° ¸.†hBHHřýFQ A2îĎuőjČűćIr?yÁ÷›ç}ž U0ś2–a´€J‹–jtVw`2Kĺ\y§•iaáűsŃżwśEĄRˇ}‡Ž^5Y ŐVݨ÷«-1@[b€\â ˇPPűBČ%ľ—NZKë=  ˘”Öy’qőc¬VÇ…ü3z~X[€ÉbE„őŽPˇć)Ň™ÜN†IS¦¶Ţ ýĄ d2‘Kţž+Ň™0XˇĂ’Zí5čőz”•–¸lFŢ"şžĹĚ <ˇ˛ŞÎűŻťG˘ľZG—99ĂĺÂVzz:’““ČŐ'ôĹEř)5qáA\¤9"+SĂŚ\# ˘kŮĘÎ1„ty¨aµĄ’a]E,ň:XŽ´´4†AäXp Z‚CB.ť™A"‘0„ •\’¬+fDDDÍ “ÉYł,Ş•ó€ą¨…B °i†łžĘpĄEN>ΔJűíęr÷#""ĎůűŘFťáć]š©V«1kÖ,ähËë˝dZ TKx§K éŠQmä¤NިÚl˛óĹ@Z@DD„ý¶­Ĺ=""ň ±qqčź89Úr†ADXp kt‹´ß¶;Č@Ľ5w·ývXš´‰Tj=bÍÝĹ@śäČ‘#Ř»;«QË?y…L©ŔŚí™†AäĹÔÁrěÝť…#GޏÄëaÁj˙±R(е[Í·ŻÖCŽrđ2Őf¬y5‡Žaa\Gą%$¬ÉÜ GUv!r‚ÜÜ\ěٵňJJ™R‘™Áż)-u(¸÷ß{—a8ČčTŢ~,[ú#Ăhĺ~ŘłkrssYp ×44éĘÚąć­˙c ^Ä’ń+piĺ„Ö\CŮt‹@ǰ0€5o7çr """"ŹĂ‚]GˇP gďŢ5?čŠaŮąŠˇxËÎU¨ľ4ź@ĎŢ˝9şá&:,©f%K%Ě—rDy¨ŢˇË—VŘNäÔś ™ąT¦'Ş6›`Éř¶9j.Ąŕ膛#$4‡«ůÁR óÚĹé@DDDuR*1 >ÚèGX¸]Łş3#‚ŐkôťwÚ'¶«.ÖÂňűűI)yŰ©üš~=• fĚqwŹg07Ql\FŽľłćK%Ěë–ÔĚźÂy!© Ůe1cFŁö?räüq$t…B&ňčl ây2í€ZŽčfäB|ŐG"•âibíęU8”“jľćňŠ#YđUGA˘†OW1p»"C±ŐeE¨:~Đľü%ÄÄĆbČĐaHĄ ©ŠR©kWŻBeeeÍ$’GvÁ§c|:FÂ'¨-2r#Ľ<†ĽQfF¶gjĐ=šĂ76bĎj«†ŢhAtt4%bÁĽÁ¨;Ç LŽĚŚ \¸ tŨşęD•Ü[@@Mµ<6.Ža´˘n¸ÚXłj%N–JŘNäpTą-¨Qbă⇜ěläd¬9!"·‚¸=ËQ .BˇP`Ň”©Đj p(;GóóQYYÉ`Č-±ŕ@MrąđPi2ˇ¨¸zťzťŽÁ¸Ë ­R …RĐP\Zµ: Ő ´ ĆMHý¤ ÁO©© Ľ tC$RiÍ §p jQ— —˙ODőKHH€M Â;ź~Í0ČëčŚf„TaHB_†AäŲ ĘđʧFB|?—x=\Ą‚<‚JĄBűˇ7Zy˝ŃÄOLd-@[b@yµ?¦ĎšÝjݎŇdBˇV벫qhK čy&M™ÚzÇ^ʞŇŹ_±¤ˇßí;t„JĄr‰×Ă‚ą©Hu°ééé ăEĹEXžşqáA Ł9"Kł…ąČ%E>PË‘––Ć0< DDDDDä–d2"##ˇ3aµV3"Ă‚5›B©D`tóM{NµZŤYłf!G[Ce;ČĹp• şa…Úšeúôz=ôz.Ťéň˙P(ˇP(aj./â.ôú+KĎriLW'KĘ Č+ĹĆĹ"?¬z÷ †ADXp &:”“mÁIäççĂ\YÉ@Ü”X"Z­FDdbbc˝: Ť¤ 2%]0ɰjk™KĽ&˝^Ź=»váh~ôz=?°DŤTZZŠłgNA!qĄ ň:R‘ľ°˘P«ĺ D^L,ÇŢÝYPĘĹŽŽnő×ĂK*¨Q´Ú,O]е«WáPN‹ nÎ\Y‰ŁůůX»zľ]˛Zm÷Ä]+j‹yŠaĐ [wFc˝^ʵ«Waá`ďžÝ,65őxÖh°rĹ/śťśĽRR A–§.e- T)…?oCćcă†?F+öěÚÜÜ\—x=á@ Úž©AfFĆ•;d ř„Ş!ěÄđ gHnÂVTX̰ť„íT>`ÔŁ¤¸?Ą¦˘Wď>2l×fSá#Ć<Ĺ0ĚÔoh•‘ĹEEX–ş´V1ϧC|Â"!) đ„@¦ŕ‡Ř T—ˇÚl‚eS*Ă "ň‘ľ«ý’bŞ;#s…ĹEE Xp Ćą<˘ ’Ŕ7.ÂČ> ĆM].ůtŚz%Áz"U{7–ĘKߨë0jôťHĄ,:ÜĢCNv6Ö­Y}Ąź:ÇÂ7n nJ c """â%Ô¸b 0â»c±ÁĂ;ÇA|×c†ŽćçcÓĆ ^™…źź€+E‡›uy…V[pĄŘ ’@”8˘ţcXl ""˘Z”Ę@ O€¶ÄŔ0ę®FרîĚČ…p„ŐiďžÝöbO‡řöż±”Áx X ńČaŮą ¶98”“…B‰řÄÄ›ňüą˘ÖťŃżÂGTó*, X˛dÉMéP\T„żţj/6‡N +‘÷2[lĐ–đ䓵żV«Ĺ˛eËç.zôŇ …ń‰‰żčW~PęˇV‡Co´ŕ»[ äŞôz=6m¨ů–[Âb·ü2č9 –ň"T—c{¦±=zŘ—Ńl)Ż)GˇĐ·ŤËd7­č°iăűś ˘Ä»Yl ""·–“ťŤ¬¬,Ć"§ ü†Ú0Y¬Đ–’’ҨýŤF#rssˇ”‰!ň‚6"WĂK*č:™Űě·E'°Řŕ%b)D'\9ľ 3 »R±ˇWŻ^ö˘Ă´iÓ´ěĺZm}â)aL'_%""·§×éPVZĄLĚ0G8е(ôú+—RtŽĺuä^F S@“ë! ŽćçCŻ×·ř(ŹŹG|||«˝ďŕŕ`¨T*űĎ7c¤Ăž]»kn$Fqn""""ň<,8P-Ú‚‚+ޏ Ä »ö€őp4?˝z·üɰJĄBtt´KĺĐ’E‡J“ ÇŽć×äهŁČ#ń’ Ş%?/Żć†LÁŃ ^J SŘW­ČËÍőę,ZęňŠüü|űmaX$?tDN…^}úqvrňJ:٦j1Ä'0 "/–]P†1)ăí_žµ6¨˝^WóÁčÁ0ĽO‡š“ŕââbŻĎ˘%ŠzťÎ~›E9Ott4z÷˝ťňÎĂ-0A|ÓV™ň6ÚĘ«ý1}Öl†á ŁŽ‘·aŇ”© Ł•´ďбÖĺÂ,8Ë(ąt‚)q7Áľ‚‚·KHH°Ď1Qá#Fެ—SÚ˝<’„j}PČDČőňQ–užĐk đţ{ď"ˇ;ż¬¨OfFÖĄýŹąčúa%C¸ŠFŁAff&ŔŻÚ‚ÉĆ˝ÍţŔWÂp‰®"—ú".<óćÍcD€"şçď¸BŁŃ`É’%jŠ 3u8uµ """§ýýąęÁbą“â’ÖýĄP*áë+ş©ĎŤ @s¸zŁ…"Ăe1‰Čm´Ôőś~~~P«Őµîc±ÜMQÉyś(8ŤÎáZĺůcăâ•s ›¶d˛3 Dä&>űě3ěßżżĹÚź4i’’’°Ř@ä®JKKqöĚ)(d"~ÓI^ë§kŃ6@€Ţ}ú"6.î¦>÷‰‚ÓXůűVvQ+RČD8{ćJK;¸ÄJ,8‘[hÉběÝ»III,6ą1ŤF•éé ‚ćp!Ż#ôŔráJLB¬[łGóó V‡#$ôúűĂ®Ůwµâ˘"T6bĄŞ«Ű(*-Ăg‹R=ú$N*0c{¦âřa«'Ł çĎ!';ű¦»čЏđ ¬\ń 6 RRRXp "jŠřĘă7sZ{Ëä˝Qčd?Ya±Ü•ŐVŤýÇĎ#.<r©/Žćçăh~~ťűNź5»Ţv6nř§ |ľéłfăDÁidíËÁĘő[a0Vxl¶J™R‘™‘Á‚ŚôĄg‘“}bÁÜ“ĘzŃUÎűćŇĎf˛Ř@DDQtČ.(:XĄL ą´îîßű·ú ±ę@(ĺâźËQDD DD€ŠŠšoeXl ""wgµUăDŃEűĎ YÓVŽ8QtB!—Řt5R‰ĂÂpčČ1†Q…R‰Ŕ6*śÔža.‚"˘KXl ""OÔÔIT •U­öZÍ´%<ůŕäFíŻŐj±lŮ2ĆâÄą‹­úÚ[ZHh(&M™Ę‘%ÄĆĹ"?¬z÷ †á"|‹ DDD®Ŕd±B[bhôdwFŁąąąPĘÄ•Aä‚8ÂĽZJE6ŇL6îe±Č‰Xp "Ż]U„č ‘“ń’ """""""r:Č#DEEˇWź~Đ–y%m‰˝úôCTT”KĽČ#DGGŁwßŰYp "§+Ö™p±Ú÷LžĚ0dܱ†Kb­H[b@ďľ·#::Ú%^ DDDDDD,VTAµ:ĽőN$µx˙˝w‘Đ=Ôe3’ČüÚzŻ/3#ëŇţç˛y#Č%(d"$tĹăŹ?Î0< DDDDDDDät,8‘[R«Ő1c˛ Ę`0U1"Ă‚ą%™L†ččhčŤXmŐ „ZÝĎ‹>ŔCSÇŁ]Ša€ň"ć ‡ŘGĐŞŻăď˝:ÔůÚĆF¨đrB'ĚÚ /Üޏ`9;Ťx¬9ńXăqFDDD®"Şk'ĚxâŻX·üK,úđuL¸k8üoŢżK2Ξ9…ŇŇR—ČĂ— rwń }00L‰ ĺ­ö:BüÄ×Ý÷@l[śÔ›đáîBĚ6¨ü|11:ţb!¶źÖłóÇšŽ5gtYZZŇÓÓśť<»  zŁĄÎmę`9ÔŤ(X9jC!!.<¨Á6´%†z—đtF5S»Î`F޶ĽYm€ćpQ˝ŰbŐPĘĹÍjĂ}ăný댾i¨ gôÍÍěßÖ˘‰ ±=Sń ü…[OFÎźCNv6băâĽ>@€ľ=cŃ·g,ţőücŘş}7Ň߂͙Y°XZîňź¸đ ¬\ń 6 RRRXp j0¬SľÍ9‹iqí°E«CUőőĂén •clD0%ľ(ĐWbů‘"Ěě§Ć¬ŤGqyďţí0,<!2ĘMUX{â|”MÚrü/·°űěä”íŻéśŃ‚af5O„Č+ʵćgę<ÖxśŃe2™¬Qű˝đôĂhß!¬Îm»łv`ďîťÍjăĚéB¬\ńKmŚ5}úÝŢbmŔW_|Ü`Ńť1sćÄfµ_đZ˝ŰŇűgĎśnVÎčwë_gôMCm8Łonf˙¶ĄL )ĚČĚČ`ÁÁAFúŇłČÉ>čŐ‡g缅QĂ1$ˇä2?€X,BŇ H4.ńűf Ňߌ]űrP]íŮ—±ŕ@níöö ś5q´Ü„S͸˝§tµöéꏻş©°řŕYÍ –ăáíáë#°â‚ĺHéŚďťĂ±ň „+¤x ®-ĘLUČ+«€µđóőÁ]ÝTX’}Ą Q!řxĎ)ĚŘx%EŕŮ?ňk=÷ţâëżŮ¨´ÚŘqä•ÇZsŹ3uk<Îč˛řřx€Ńht¸_€~P©ęľ¶V!ˇcű†ż5vÔFix{l–ŰŠŠBttt‹µg““lCĄRˇ_ݏfµŔa– }ن÷:jĂ}ăný댾i¨ gôMKôoTT©‘[Ú±3vB,a`˙Ţ94w dßŕ/Ă„»†cÂ]Ăq¶¸«×oAúď[{ô D®D`Dç üđgÍđĽő'ÎăţvČ<­ĂŐs Ä˙r‹ˇ˝P Řsî"üEBÜbßgčĄ}źŻůGj~yVä•âŽ0%ňĘ*Ô\'ľňh)Îjľiݤ-Ç`u`“_÷ÎAŘÁo]É Ź5gÔŇd2’’’šŐFtt´ĂřĆP©TÍĆęŚ6¸L Í˙FŘ}Ăţm™ľq•ţ%r%fł¶íŔ†m;ě‡o—§aÄű„’íBTxhęx<4u<ňŹ`ĺď[đŐ÷?{Tś4’ÜVď¶0XlČżt˘r´Ü˝ą }ÚÔÚ/ÄO„:S­űö_¬ősX€GÎ×ţF,·ĚN i­ű ô•öŰF‹ ~ľM;„îSB­bőńóě@ňşcŤÇ9[ii)ŇÓÓˇ–C*zô{U«Ă1}Öl‡sax»řÄDŚL™ŕ˛˝óÉ×qĎßp˙ß_Ä·ËÓp¶¨Äľ-˘K8ž{ě~ŹëŽp ·5˛Kë Ŕ IDATV«=|o݉óڬłě÷ÉEBŞjŻľ`¶ÖúY"ôÁŰ»^÷U×,ŻdiĆrKĂ ÁűN_×.‘7k<ÎČŮJJJ––u°:Ł&‹•ˇËŰźsűsŽŕťOľFňÁxî±űŃ.4Ř#ß+ ä–n ‘Ł˝\ŚGz´Żsűm!rűuÝ[5ÄB*­WN>üŻ©€›ŞlřżŚ0µŔuß~ľ>¸':§/šńMÎ9vyí±Ć㌨féĚ‘C1rp<şt óč÷Ę‚ąĄ‘ťŰŕ‡?‹ęśľOŰŚěŇĆ~Tl4ŁłRŠ#ç+ěűônW{(řI˝ ]ĄřłÔčÔ×) đTŻŽXsü<˛,[Fä ÇŹ3"""ňfĎ>z?F‰G§°×m;W\Š5¶yÜ{ćävş·‘Á_,ÄÎ3uO·çÜH…>čަfy´ŚSzŚŤF;ąbˇ˝Býq{{E­ÇlŇ–ăŢčDúAä#€B,Dr7ż­CŁ_WyeÔ|öűRşcka9O‚Çš“ŽłşŽ5gDDDäj|}}1(ľ/Ţó¬ýľGďźX«Ř żp?§˙އź #î}ď}¶ŘórŕGÜͨ.m°ţdę»<»Ŕď'Ë0ŞK>oĽ´t_5žęŐ~ľ>8¦«Ŕ˘g0g@'űcţ,5"íh)îí‚P™f« GαüHă'śůáP»­„>Ŕś-ÇÔJ Ä_bÚ^·˙[Ű ě3ńyñćŚă¬®cŤÇąŠ!‰ý0rp†$öG€żěşí¦J36i˛°ę÷-ŘşcŞŞŞśúü:QÝ:×»´/ D řpwaűl?­· ó´§®|K*Ἡöú×{Ď]ÄŢsëmóŮ?ňŢwřĽ/m;^kűsu<†Č›Źµćguk<ΨĄëLRăÁ)c†ŚzÝv+†čŐ9|üćśëZ±}×~¬Zżë·î@E…©Ĺž?G[Ž3& ď,8Ý ă"‚‘WfÄÖBĚVÚĘŸ': ĘŹ5""˘™,VTAµ:ĽőN苊°iăĆ"Ç˙¶š,VHdţ mµ×“ťŤ¬¬,—ȨşşűsŽ`ĺú-X»!e:˝W;,8Ç[ś}wG†ŕßńťŕë#Ŕ‘óF¬·ĆŕIcńěżŢBÎaĎžüÚ‡""""""˘ć R*đĹ{/_Wl¸Vhp|ńÎËR*<:Š„đ……Z-ĂpQeĹEy÷D‰ß7AJl6–ý¶<őOÄŹů n:îĽS˙ľ˙y%l6•6eśSź?ˇ{(.řiii.‘ DDDDDD„(ĄđT`yęR†á Ł’ÂŁŘ¸áŻÎap|_Ŕ{ź-Ćkó`_öa\4ałŮ`0V űpŢţč+ĽżŕŰZű{*ś }»PŔ˙V®w¸ßĺí/íď©Xp """"""r‚j›  îWUuiUŔŁó`ÁÜ’ŃhDnn.2„>ž}â¦V‡cú¬ŮĐ.bÇ×#>1#S&´jF…gjž{Â$‡űÝ5b ¨¤ÔŁű„Ëb‘[Ňjµ?>âÂ]PĆĺ1©ŐmĘ؉ȮáţÄ_Ń®mŇ×mĆń‚B+LJ%čÖن&âÁÉ5“Effí÷č1±ő^^Źí™¨ĺ.›‘BŐ±qq­ö´ÚË=ě˛y#^RAD׳™ą%ŁŃÂÂÂ÷S©Tő7uF››Ű`~~~P«ŐÍj˘˘˘ü\‹ŠŠŠfµQZZŠŇ҆׊ L&k±6nf˙:ŁojĂ}ănýŰŘľqô\ŢJ§+GfFÔÁrhK ¤…Zä9ä%öď…1Ăď@\÷H´ †D,†©˛§ĎăŕźąH[» »öç°ŕ@DŢÇVvŽ!´ ĐPś*,DµĄ’a9™V«ĹüůóÜ/99)))-ÖĚ›7ŻÁ6"##1kÖ¬fµ ,¨w[jj*ňňňšŐ†FŁAzzzmĚ1ŃŃŃ-ÖĆÍě_gôMCm8ŁoÜ­Ű7~~~řŕřKŤÜŽD,Ćüął1(ľďuŰä2?Dv Gd×pL¸k8V®ß‚˝ů!¬V ä¸pAĎ ZT"­ąˇ+fDNv8ďxŁöű|q*ćĽ÷uťŰ2âšŐ$tm°Ť]űrĐcđřfµŔa±ę@(ĺâfµˇ–7jňĂĎż˝ŃŇbm8Łoۆ3ú¦ˇ6śŃ7îÖżŤí›ĆŚÚ€ŕŕ`$''ăóĹ©0[l jmĎ=vťĹ†şÜ5|NśÂK–±ŕ@ŢAˇ¬)8XOĺÁ7.‘x)[Q cXĂh‘ăLiż]mÔC S0"'iŁ FvAĂËŚ9:11ŞšÝ€FµaµV7»Ť†ś(şˇPЬ6Šu&čŚ _ng0UµhÎč›Ć¶áŚľi¨ gôŤ»őoC}s˘č":‡ú7úµ«T*¤¤¤8,ţÝLc’îč/đĺ7˱uÇś>[SĄR‰íۆŕŽ}đŘ÷@ŕŹ»ďLbÁĽGdTNşbžy©jł Ő%5×OŞŐá ¤¨ĂŻäj;•ad†Bä$‰¤Ţo`Ëj«nv\¦ CeUłŰ0Y¬0Y¬­Ţ†3úĆ•ú×}ăiý{Ńd‘;ó÷Ż™{äů˝Ť¬}ٵ¶U*qěd!Žť,ÄźyǰđýąQyt\Ą‚jéyĺ—ţ± Ä YswŰoGDF2 P(r)ď] „ČIŇŇҰpÁ'Ťľ ¨±¤"!|aEˇVË0dTYqĹEE^ťCá隹вŹä;Üď࡚‰gOźsî%¶±áXąâh4—ČşîDčň0zkŢnT›M Ĺ‹T›M°ćŐ:†…!$”˙ho)˝űÔ\ŰWmĐĂz"›ą°Ąţ‚ ,O]Ę0dTRx7üáŐ9,O[’ĐĎá~ń}{–ý¶Ć©ĎŻ”‰qöĚ©F­s3°ŕ@×:,©ć†Ą–Ś_©Ú·¸´rBk®3í băâPsÉRŐŢ ¨./b(DDDDnîűźŇńůâTĽ2ëILüŻŤî™ź~R ˘şuĆ“NĆ›˙zß˙”ŽźŇÖÁĎOZë?OÂ9č:!ˇˇčŮ»7öíŮęb-,;WAÔ ńôbCvl'jÖމŤĺü 7Á¨1wâ§ÔTŔR óŽUťXĘ`ÜÔ¦_!P©€Pč‡ďʇď«…żÜ“ŚżÜ“|ÝýŽV•q7á@u:,É~ŤąíD,;W1fŮą ÖC5×y‡„`ČĐa ĺ&P«Ă1rôť5?čŠa޸”#Č«Y­ŐĐĚlä˛!8$“&ODĘoŮoÚƸ8ÄÄĆÖ*:\=q',[¶ ˙řÇ?ě? Ľţúë7n&Nśďż˙ŐŐŐ ŠÜžˇ˛ 9ÚrĚš5«QűkµZĚ›7qáAK={đ¶D*…Zî”R<•Bˇ@›ŕfäBxI9üĄ6iň¬Y˝ÇŽćşbX6ĄB˘†o—„Şąl¦Ş6ęQ]¤EŐń¨.ľ2Órǰ0Ś»{<‹ ­`Ôťc¦Çş5«K%Şöm€5o|:DBŘ%‚@NŢéÍŇŇŇđß˙ţŇ«ŽÍůóçcóćÍöź-ZĄR‰äädFDDÔŠ“@eĄf‹…_°ŕ@Ť):Ś?{÷ěFfF*++kću¸ęD" OܡĐP^dź˛VK$čŐ»'‰le±qqP(ČĚČŔ©ÂšŐ+ňvŰW (C8Ç«gf sîňV+W®Dßľ}ńôÓOŠ‹‹±eË´k×ożý6 ľýö[¬\ą’""˘V¦żp‘!°ŕ@MŐ«wÄÄÄbĎîÝČÉÎĆ… ú+-•µľ)'÷ @l\z÷éĂQ .B­‡zJ8r˛ł‘ź—W3˛čjşb°Nî}Nť:…7Ţx*•  ŃhP]]Ť)S¦ ěŇ2Ć=ô¦NťĘ°\Ŕ¸”d¤¤ŚĹädtčĐ DŤ!‘JźřÄDčőzh  ×錛Q(•P‡‡Cˇŕĺ0®*6.±qq¨4™PT\„ÂôÜŤÔOŠM68Ż˝«Š‚™™™€Ä«F% Řl6ŻĎ]ĄRˇm»Č=z‚B"j5ůyy÷Ţ»?ď=ôéŰ)cÇbä¨Ń d8Ä‚QŁNZ5ߎQËą<9—(uOÎ*8´mŰK—.ĹÔ©S‘ťťŤÝ»w#&&AAAjF@|úé§čرŁ×gž_i™ţ2?€DäT•+,Ő>č¬nxÄBĆöČÔh‘± šmŰđĘK/áőąs1hđ`$§ŚĹĐaĂ ‘H<2#±TŽP^jÝš4‡‹°đýąčß» DDDäXRR.\üŃ~ßĉí·źyćčőz<÷Üs ‹¨…éLn'ä) _ľ;ÇŚÁťcĆŽÍGňťwbWVţXżrą#FŽBĘŘÜ> Bˇ°QŻAŻ×ăPN6ÔÁrhK .™Ń`u†Kjµ× ŐŕXîáVÍčŕć_šÝFŹÁă=ćŘaÁČ…ÝsĎ=8}ú46mÚˇPI“&ađŕÁöí={öDtt4RRRą=…L„¸đ <ţřăX°`GĽ§nÝ"[24ضu+ŇÓҰvÍjüúË˙‚1wÝ…ääô¸őV‡íčtĺČĚČpŮ‚+(,Đ"˙Č!fÄ‚5†H$ÂĚ™31sćĚ:·żü2/ "rb±Ă’’0,) FŁ7üuk×âçĺËńÍâĹř37Ź!y‘Š >>JÄý>Yp """"jŔŐä›2ÜůFGŤŚääd|ľ8f‹űLž+ řŔÇLJťčaç~~Rt kŹqهař xmţlÉÜĺŃy°ŕ@DDäâL&V¬XĚĚLś;weeeX˝z5࣏>BLL †Π¨Y'?CűcHb?Ü Urą fłçŠJ“{¶îŔ[wpEr)*• )))óŢ×.˙Z­V+25¤Ą­Ŕú߇Ń`@@@FŽ…d^ç**L8św‡ó˘ đ >|ăE<6ă˙µ/›"""şůt:fĚ“'OÖą]ŁŃ`ĹŠđ÷÷Ç€5Ů-‘]ńź—¦ŁK§°ë˙ˇč'D—NačŇ) É##˙xfż:ůÇ Q#]ţý=x`"JKK!‘H0xČ$§¤`đˇ‹Ĺ É ýoĺzĚyţQ<1m DDDÔ:-Z„łgĎbÚ´i0`ÂÂÂjMůꫯâí·ßĆňĺË˝ľŕ––†ôôt$t…ćp?<Ťp[l4~0’K'<¶íD꯫‘sä( #‚ŰbÄ<|ßx· BD—p,úđuLy|NťaĆŤÁË(ĽĎ… °}{&4۶aŰÖm(,ÔB(˘ű-· 9%ĂGŚ„żżżWg4.%))c1&9:tđĘ ÂĂÚb˘»9µÝŘđ@¬\ń ŞLŔ‚ŐOŁŃŕ…^Ŕ AęÜŤ_|łgĎfXÔ$b±ď˝:Ű^l˙Ĺ7XôcíĺÜΗâŰĺiX·IĹ˝Ž°í¨ ŔśçĂS/Ľđ—Cłň;ŔźyÇ0éo3Ńżw<ń×I¸%Ş+öeĆű ľĹ‘üăuľ–6J<8eĹ÷E‡¶!€@€˘’Rh˛öaŃŹżâĚąâz߇şc;<4?/óŢ{ó罇>}ű"eěXŚ5ť‘@ €RáŹ>·Ć`Öß´ßçLJ™gĎśBii©KĽg\N§Cź>}îÓĄKÍf†EM2vÔP´ ©9Ú±çŔuņ«ť+.ĹÜy_`ÎóŹa“& ›¶í´o3UVÚoűËd’Řďżö|…Bűý‰ý{ˇg\wLzt& ĎÔj;®{$>ç%*jÝß)¬:…uŔÝw&áůż MÖľëOěúőćoüłÖ,ď"‘/zĆuGϸî9$Oţă5űÉߍ>ćj•f 'ôĹŻżŘč÷HŢĄK×®¸+9ÉÉ)čĆ@ę±}25ddlfŰ6ĽňŇKx}î\ <É)c1tŘ0H$·|oWă@ÎŹîkN‹JDDä‚‚‚°gχűdee!44”aQ“ Ičgż˝ě·µ îźąk?Rî ó>[ŚÝŮď·X®ś+•ř×óŹá폾Bß“1ńáé8}¶ft‚\懿ýeb­6üĺř𫨠€ÍfĂ{ź-Fbň¸}ôTĚť÷9ŞŞŞŕ'•ŕÝ˙›…ŕ6AµŹ ĄďĽ<R‰Ć žţçč3bîžö,Ę čs+˝błs-«ŐŠOĽţ÷xß~¸ĽÜŠô•xü‰'Ylp 00wŽ×ßx6oAúęŐ°X,Ř••…éĎ=‹Ä·ăź/ĽMĆ6X­VŹÍÁ`¬Ŕüß°ŕ@DDD­#!!o˝őľţúkäççĂ`0¨Yą˘°°?üđŢxăŤz/ą ŞO÷Č.öŰÎú†Má/ÇşM¤ţş•f3rŹžŔü/–Ř·ß]k˙{ÇŽDhpŔw?ĄcIęoĐ_¸c… ËW¬ĂâÔßěíN7ęšÇŽ‚RQ3*âËoÂfÍ.Í=ˇĹżßţKĘuĐŻgłs-ąĚĎń{ŚëÎW3X­ŐĐĚŚŚd× Ĺ=“'#» ĚăŢ[·n€-|úů4xÖ®YŤGzCÝ·Ţ|h°ťŘ=Đ7ţ—Ψ˘Â„ă' ńËĘőôčLÎ;îŃź[^RADDäÂzč!8p?üđ~řáűýÉÉÉöŰQQQ¸ďľű5IRaż]R¦sZ»ż®ţŁÖĎ»®š}˝]hp­mCűŰoŻZżĺş¶ÖmÔŘGE$ôë‰Ď-µo»#ţĘĄF›3łj=îřÉBô~ďuíÝČcšű©i •UČŃ–céwłµ˙‘#G0ţ|$tEvAôFK«ż‡[˘nĽXňgn^˝Ű$R)Ôęp—xŹ-E,cXR†%%Áh4bă†?°níZüĽ|9ľYĽŘa> P(Đ&8¤U3â\-,8ąŤ€€|ňÉ'ří·ß°uëVhµZTTTŔĎĎť:u A0nÜ8D"†EMb»4ů!řřÔ=i™Łk‘ëűGµöÔŮZ?—é.ŘoËü¤µ¶u˝j)Îă§®këdáiűík—íěŢŃ~»đôąF˝çyL]®}¬Ł÷HD7F @ đŹĺłŕ@DDD-F*•bňäÉď~ř={őÂÁ°sLJcä¨Ń^™Ń… °}{&4۶aŰÖm(,ÔB(˘ű-· 9%ĂGŚ„żż??L€Äą°Ź?ţ«WŻFii)‡m7 !!Éă& G[Î0aĺú-öĺ{ĆFăńż:ž01Ş[g§ż†Mš+7¦ŚrÝöä‘Wî۱łÖ¶Ě]űě·‡ Ľ˝Ö¶.ťÂ°÷ŹĺČXůRż|ŻYŹ!€J‹–jźF-uąníZLš<[¶e`vľůî{´oßß,Y ř-=w „ÔĄK=.#±TŽF,Óśp{<űÔSXú㏠ƿ_~›·e૯áîńXlhÍá"<ňřÓHq‘KP8ÂČ…eeeaěرůňőEçÎťńŘObXR’×fÄQ Ţ"""–žž‘HˇPČ0 Ńh°vÝzĆ"§€KcQë8wî>úôS : ľľ<ÝŞKEE>ůč#¬Y˝ ĄĄĄčŇĄ+žzć 10íű1jÔhÜw˙ý «‰ÔÁrěÝťĄ\ŚčččV=Ľ”Č…IĄR©´´gĎś‚R&fDäTˇJ)üQĺ©KÜ×ßßqXl°Ůl0™L—Qqa>6nřŁÁ}ßź÷ľ^řNź>ŤĘĘJ>ü'žöěŰ[ł$íÁđÚÜW±ní~řn ŕ°g×—™x•%7""""""$"!|Vjµ î«Óép[\lŁÚő¤KΠX> IDAT $"!Ě5Ľĺşµk1iň<ýĚ3Pb˙ľ}óâ‹řfÉbôěŐ żĄ§ăµW_EęŇĄ9j4?€nŚ"""2|řpW&€ĽüscpŇH""×'‰ P(ŕďďďµ”••aö /Ř3č׿?¦ĎśO?ţ V‡cÎżţŤ©S&óăćXp """""r’úF-”—•!/?ß,ZŚ6*^ś3§Ií*•ź€ĺiż»}Fí;t€N§«UtéŐ»JJŠí?wčŘôú&µ®F×ŇîجŮ͢‹`ÁČ…\;JŁČ›čŤháŕć_µ˙‘#G0ţ|$tEvAôF‹Ëľ·Ŕ  ôë×˝{÷Á#=W_yożóNŁŻP(źů‹~uű~ľ{üxĽ÷Î0ďýŕăS3­``` *++íűś>u rąĽIíŞŐáĐ-řnĹL,8ѵšr ŵXś "r}Bˇ#GŤĆG~ŕµ<ň·G1sút<ňĐřŰŁŹˇÇ­·B.—Ăb±Ŕfłˇ°°oĽţn‰‰áĆͱŕ@DDDDDt3UWĂh0xíŰ߲y3rsŹŕä‰Řž™Yk[l÷+K9~úůü¬¸9\G)ą·[˘"BŢ|ý5ś>}şîT__tîÜŹ=ń$†%%1,7Ç‚ŃM$“Ëčµď˙ÜąsřčÓO1tč0řúň”Ô“±w‰\śÉdŠ+™™‰sçΡ¬¬ «WŻ|ôŃG‰‰iÖÜž"%%m;vĆ#Ó_懆ZM}«TX­V””c{ćv,OMĹ›˙ůŹ×fäďďŹÁ‡8,6Řl6ÍfHĄR~¨š »  ˙xęa$Ä÷cÁÓét1cNž#""°g÷î&=¦¨¸ËS—".<Čă?"‘*• ť:ujŇărD–f‹Wdä.8ÂČ…i4Ľđ 4hPťŰŁŁŁńâ‹/böěŮ ‹ÜžP(€R&Fnn®Gľ?›Í†˘˘"|őß˙ÂV]íµý\ߨ…ň˛2äĺçá›E‹ŃFĄÂ‹sćđ ps,8ą0ťN‡>}ú8ܧK—.0›Í ‹Č4v•ŠwÜÁ°®„~ýúŁwď>xäˇńę+ŻŕíwŢa0nŚ—Tą°   ěŮłÇá>YYY eXDäu˘ŁŁ±`ÁhąŐ5ű·őě‰ąŻżÁ¬‡P(ÄČQٱiÓF†áć8ÂČ…%$$ŕ­·ŢB^^ „öíۨYą˘¤¤[¶lÁwß}‡ &0,""ŕh•ŠŇ’ěÜą_}ů_čĘËíżÓ©ŐŐ¨0™›cÁč*&“ %ĹĹ(Ôj!–HŠ0µšÁń8k5=ô8€~ř?üđýţäädűí¨¨(Üwß}^ź•FŁÁÚuëś‚r~xČĄ…B„¶m‹ä”„©Őx÷ť˙`á˘Ĺ ¦÷Ý?î™4‰A4‘:XŽ˝»ł ”‹Í‚‘«Đ`ÝÚ5ĐëtµîS«1jôťP(• ‰ÇŮM€O>ůżýö¶nÝ ­V‹ŠŠ řůůˇS§N4hĆŤ‘HäőY•––âě™SPĘÄüŕ‘S…*ĄđG–§.Ĺ˝“§4»˝Řżßă2*.ĚÇĆ B –äpßĆÎs4mYLŞ)8ěٵÚ…°ŕ@ä*ŠŠÎá§e©un+Ôj±â×_p˙´Q g÷Lž©Tʰ®!•J1yňdLž<™aµ‰H_…ZmłÚąz• ±Xěq™+ (.*rJ{2ąüđą9¬[˝P}iy"@Pk{qq125OHdXD7hÓ† ·cďžÝ<ΨŐHEBřI„đ—ÖŚŃ–ęÝW!5j$E±Î“ĹÚ¬6tFs˝“áIEB„(ĄÍj¨ůF¬!•+Št¦m#T)…D$l°G}ÓŘ6śŃ7ŽÚpF߸[˙ŔéóFXmŐ^ý»¤±ßŢŹ5Ęk3r4ĎEII1¶gnÇňÔTĽůź˙đŹ DîÍd2ˇ¸¸¨ÎBĂĺ"„@ €¶ €'BDÍpް°Ţm<Î;qâ~ţůg:teee0 @›6mpë­·bâĉśx¬„>¨ĺčĐFVëţ ő>F_rÎźk°í^=o…Dć߬6bÚt…"¸]Ý'‰Ć‹()<Ú¬6ŕTnĂC»ĹR9‡G´hĹů0› ¶ă¨oۆ3úĆQÎčwë_Ŕq1jćqĆŔA0kö?šôX©DŠŽaa8täGçÓ¶m;Ś»űntęÜoĽ6 ľüŞŃŹW(•lŁÂIí~ŘXp r Wűş|ŇsµË?—3,˘¤-(hÔqć¨(á­öíۇţóź°Xj{Y^^Žňňr;v kÖ¬Á;ďĽvņŘđ@ű¨H$ Á¤±#ë}\Nv6r˛6ŘţĐaRĎ’ĄŤm#6®băâęý¶qCełÚ€eKKl#$4ÔáuŮÎhcăaنc;ę›Ć¶áŚľqÔ†3úĆťú·˛˛ …Źýýi|¶hvîÍľˇcRo´@s¸7˙Ҹż/Z-–-[†Řđ@ś8w†ĘŞV˙˝ŇRs„„†bŇ”©¸÷oł˝â÷sDDöěŢݤÇÄĆĹ"?¬z÷ ţcÁČ5¨ĂĂnż|r°Zč8»¬cXúƗ_~‰ŔŔ@L›6 ·Ýv üüüPQQ˛˛2ěßżß|ó ,X€?üĐŁł(--EII‰S'Áęęo/6tíˇIIP(ŤúG­ŁĽĆţøąm\>i.Wiَ‰ćnV®Ň7îÚżłźš†ĺ+~DzëZü÷‚ŃhDnn.”21„B˙hx€«çą°UW3Ü_ǰ0ś*,„@ ¸îŰ×Ë·{ÂDDŽŹ3 îQ<ÎęvěŘ1|üńǨ=ÔY.—C.—#,, >}şGç`41gÎŔ´iÓŕ`H}c)d"„úzöîí”U"ŞqďŘČÚ›ŤăÚÓ ®ÓŘy.ŢqĂrs>Ś€}çH$’Z†«…„„ňşr"'g€ÇYřúú"¬‘jµÚă—ĹÔ^53ü’%K ŃhšÝćĺI% řŮ#rş1#x˛H7î¶ž=1÷ő7„»˙;†ŐL0sĎäÉX·zŤ}ÉËşED`äč;ŹłV‡˘_ż~őîsđŕAÜzë­^•Ë’%K Y#~5EšŕH¸+‘Ó…ŞÚ0Ş“ŁU*JKJ°sçN|őĺˇ+/ç¤Č,8yČĹж¸Ú4h P\\ ‰DŚĐP„†¶e8D<ÎZÍ“O>‰ąsç"?? @űöí!‹a±XpîÜ9deeaÍš5xůĺ—˝.›k‹ ° DxçÓŻůÁ!jez˝úógˇ–sĺ j4ˇPжm‘ś’‚0µďľó,\´Á4¶Ä€±Ł† **Š"W¤çuäD<Î\ĆsĎ=Á€… báÂ…uîăăăGy6›­Öýëׯ÷ČL&Mš„´´4TTTÔ*:¨T*´ďĐzŁ…˘V¦Ó•#3#Ăc ÚTmńęě'Ůą2ź€IăF:Ą˝ŘżźÁŢ@?ôî{»S'XnÎá@DDäÂ.\¸p]!áZ6›­Á}©Hu°ééé ăÚImŢď]$tőč÷iłŮpöěY|0>Äbq“›™‘ui˙óřŚÜ G8ą°U«V5ů\ÎRXXyóćąDFٱÖĎjµ3gÎÄĽyójŤtđ•đCă¦.ĎZ_ßµÝäüý|ˇ–#-- ÉÉÉnýYn*WřěKEBX¬6XmŐ.‘ŃČQŁxP¸9\Xk ˘˘ąąą.›M]E‡AC¸´eSţ±˙÷§źĆ3Ď>çpßożY‚7_Ý+ŠŔ¤{&.^\ďę%uť,ůřř ((}űőĂcO<‰zǢJÝB•RD´W0VÔ9Ôţ2 ö-nń˘C}„B!‚1pĐ Ěšýv DDDä©Ú´i•Jĺ2Ż'88¸Öu©×¶lúˇJ)Št&v^#,KMĹOţ˝ŢeU«««ńĂ÷ß7ë9>űä|üч žd»ÂIxzÚ ČĺrŔĘ´´—K˝ú5 ěÝłŻĽôoLą÷,Zň úôíËY ˘ŁŁ±`Áô<ŢĄŢO}źç[˘"ëÜvŁŁ"ś~˛ď#€XôęҢEÝXp ""˘V0|řpW&|Ľüsc´Ä$‘ HIIqéĚ®-:\>iaѡ ˇ%ĹĹř}ÝZŚą«îˇë۶áÄńă …°Z­7ô<›6nt‹<¬V+V­\‰Ä!đűşuxĺŐąŤe$—Ë1đŽ;đŇ+ŻŕÉÇLJĽŹoľűž´FęŃ­=î4Gµçp4giE={÷FÁÉôZu¤yNIDDDnďrŃáňD’íUJŚ"‘1±±řţ»ďęÝçűďľEt÷î׍€Đëőxóő×4d0nŤŤÁŕń/ż„˛óçíűě޵ ·DEâŕÁjľÁ˝%*ŁGŽ€ŃhÄ łga@˙~¸-.¶Ööká…Ůłp{ôéŐ“&NÄęU«ěŰÍf3~ő%ĆĄ$ŁoŻžč×»î™0?-[Ö¤<¶gf˘¤¤#GŤÂČQŁqáÂlŢĽ©ÉąöîS3Şáŕü5ŇSMÂ˙;bă↠J¤2u*Ú…#¦3—­v7 ™gĎśBii©KĽŽp ""r!׎RđÔĄ-[˛čŔ‘ŤSUő˙ěÝy\TĺţđĎł0Ŕ 0̨ŔL( *¸/ h)îެ4S\JSoVÚćµn·[ÝşŐ˝mZYVö«[šĺ’Ą‰»Ąf™»€ ‚ 3l3l3 0ż?”U0f†ĎűőꕜóĚ™3ßsžç{žĄăƍǻ˗á÷ßĎ gĎşsdiµ8řóĎxüÉ'ńţ»ďÚ¶›L&<8{.^¸€7ŢzŃÆ!ţĐ!üăďĎâŘŃŁŘřÝ÷pwwÇŔAđ{Ú9[áTr úF„٤¤oüç?ČÉÉÁO>X~zZR\Ś™±±‹EX·ń[ČĺrĽüŇ‹XňÔ“°X,¸űž{đÚż˙Ťo7nŔ‚‡ĆÂGEQQ^üçóxń…˘¸¸sçÍkV<â¶n…T*ĹČQzx`{\ĆŚiŮ5+Ƹşşň&kf˛aDô`‡hÄIĺř51C#Łśţş%ŐI‡Š*+^yű\Đ^nVŚŠňs’śĚäQ;ŠĐř`űÖÍTY좇"{8‘Ó%šÓÓ᢮~ÁÓ1'𬍍Ŕ]wß @Pď< ëľůp÷=÷ÖŮţőÚŻzö,ć>4ă'L€——ĆO€ůý+ŇÓÓ±î›ú‡Ôô’0˘ ?źń%fΚ…™łg7xŽk×~…¬,-[ü8n»í6x{{㙿?‘H„Ťë×vlŻ^>ń‘G…§§'şté‚^| R}đĐC!$$„ÁifŇ!,, 'Ož¬ţYě†ü"3SŹ{'OÁ݉‰řţ»MxhŢüę†^\ ……rß}7”?ţ<@ŁŃÔŮ~[PPuńrÓ]ŻCC»7ëÜjŢK©T6XfŢüxwů2<öČBDEGcÄ‘=ť;wnv ¶o‹\.Gô°a¶m‘QQP(Řľmîť<ĄŢ×]?ç„ÜŰÁâÇGXŹúľK$P«50–ZęM6Ś?ˇŢn÷˝ÂĂmÉŻă'N@îăkK ęu:?q˘NyĄJ_Ą ß~ö6 %9ů†'ě#cFŐI,n\żî†cÔîéTűfłąz=´ą%íźdK3©ç!ş@íç€Ŕ@Ŕňضrzťű÷ýÔčgÜżď'čuşFăT['•]T `µë‰$er9Ľ}¸¤˝Â/w&¨>éééxć™gPQQaŰvřđaś˙ý žL*lđł4'N-JV3§ĂúuëÚdőŠ›]´©ř…GDBwěxűV8&¨>_ý5|}}ńÄO ""UUUHNNƇ~Ő«Wă•W^ašH6$&&Ú’ éWŚ J#ÜÝÝ1~Â|·i~9x^^^řýĚLŹŤ…DrcWrŤF;y R©ô–ž›ŹŻ/ô:ĘJKáŃHâ`ü„ ?aôz=öîŮĎ?ű?ěܱ..ĽłüÝFßc÷®ť°X,Řľsş×ŮwńâEL;»vîŔ¬ŮđfiĄGćÜŹČ˝!–8Î 2b‰•-l2ÝLRˇĄŻ)1U`päz,j’}¬śHĎąĄď'  aµZa±XXçp ""˛3IIIX˛d † OOOČd2DEEaéŇĄ8ÍĄöl¸îťR=d`çŽŘúĂŔäz†S@đŐa=şśś[~^]:w©~Żëş~7D©Tbć¬Yřü_ţüs“ŻŮ¶5ţţţ7$ ((Řľmo’VŞYŤÂ‘’ ·ZnnîMż¶˛Ę _?ĺź~Î5I‡G.hÖD’ ů=í\“˙8ř‹mXWŻđp|úŮçĽip ""˘Ö2 ¨olsŻ^(**b€lhs †FŁÁá_qđ矌ľ}űŐ[věŘq€=W'ʬ‘ś”„‘w Ç«×őŔ©QYYŮâó~gőÜß~;lŰ–ťťŤ>á˝0}ęTŔSO<ŽČۇŕŇĄK¶2Ţ>>uŢ»!—/_ƱcGUkî†ë 6 'Oś@vVo”› ’Kp×đޱô埡 ?ÖŻĂÜŔťĂ˘ň3%řúx7{őŠ–*,(ŔŰoľ‰qcFă×_Ĺ›ożMßoĆđ;îŕ ä€8¤‚ČÎX­Öz»˛KÚáÉ`ZZ¶ŮŃÓÝŔŔ@ôëׯŃd€f%<Än0—CŻÓuč•*jÜ;y VĽ˙`éłĎ6XnƬYŘşő|üŃJ‡„`Řđá8—–†çž}999uV”Ťć6ś?źăµV‘h®śÍß}‡OW­ÂŕÁCŕëë‹·Ţř/, îź6 Ń»wíÂ^{ ˙~íUxzxâăŹVf?đ`ŁÇ߫Պčč†ŃÆaĂúőŘľm~ä~Aµ@Jr2BşČ0hŕ‡ý Zm&ä(BkŇ—………řqď^ěܱż>ŚŠŠ ôéÓ福éę"@~ž@űÍéă!uÇsO<„Ź?ů It­>^II Ö|ů%ţ÷ůgđôôÄó˙|S¦NmÖŠ6Ä„9 ´´4¤ĄĄŮŐ9˝đ P«Őő&5ŘťpŞYÇ Ry"7+ű÷™›ś`­#¸gňd|řÁ Ü}Ď˝ –‹ĹXłök|ňńGxëŤ˙âé'ź€H$BxD–,}1ŁęNF÷ü /ŕĄ_ŔĽąsZ|Nrąk׭Dz·ßÂ̱°X,čÖ­ŢYľÜ¶„傇†»Ô›6~‹‰ăÇC@­ÖŕĹ—ţ…Ř™3=~\ÜV¸¸¸ 2*ŞÁ2C#Łŕćć†mŰâph .`Ď®ťÓä$ŽÎČh4â§÷b×ÎťHLH€ĹbH$BTt4bFŤB̨э®ľŇdc_â†# 1,ęövýśî¨|<ĐĂâ ciůMŁĽĽëľţź®úUV+[Ľ3g͆X,fEş ÚÜÜ=nşwďnçĂ„9”šU®O6DFF˘WźřöÇŁ Rę› Îßß)gS›UŢÓÓKźyKźy¶É÷Š6 ?í?pSç]ştirâÇYł¸©I·nŰŢd///$ťů˝YçzłźŃUUUB  óŇ%ô0Đá?ŹŢ`‚ŹÂscďn˛ěŁ FüˇC°X,Ëĺ?abFŤĆđ;‡‡S]çôôtCo(±Ô‚ţ}ű`d̰f˝¶˘˘[ľ˙­üŁsćÎĹĽů š\Y†šN8 t;ÂÂÂp ""˘úŤ=şĹű~üńÇ6=‡ĐĐP,]şÔ.⑚šŠĺË—×Ův}˛aîÜąříxo"; ‰`µZ‘ť•…‹™—¤ńwčĎc˛T˘®P«5M–=°?TŞNxtŃ"L˝˙ţ&çi.˝N‡ű÷!\ămKýjµ™Řłk'ŇŻˇ3b©gł†¨íŘľ¬x—łł;c>ú|}}[}N)ÉÉ8räÂ5ŢHÉ,dEd¨ĺęK6‘ý1›ÍxůťOđňŇG2é K`±¶lžýáwÜÄ„ĽňŻ—°ňQŚŠjŐ\<&ł YZ-äR´h˙„CÜŹ‰HÎ,€±´ĺKVţíé§Bˇë×­ĂWkÖŔjµ6ůş¦z  äĺB.±ň1á@DDDőiëž ÎfăĆŤČşşb“ Döݤ´ o®řQ˝»bÚôéµ4¦RĄB© eĂ >ýěóę‰"÷ěÁÎť;°ĺűď±iăF¸»»#2* #cFaÄČ‘đóó»é󪬴ÂÇׯÝâňѰ?ľőĂ×, +DDDDöÉ"ű—«××ůąŔX mö¬ýę+Ě~ŕ‡J:Ü oooLť6 S§MCA~>öîÝť;vŕç°ď§źŕââ‚Ţ}ú`ýĆooęř%ć ‰ľóO˙\)ÉÉŘwč7<ú{«ŽÓ‘ć3aÂČÁ´E˛ˇÄ\ą‡EF#JÔĆt99ŐőĚT¨¬˛âôĹ|ôM:diµŞWC©=€Ńh„Ń`¨SV&—C&“ÝđÚÚk­hc6™ ż.rý1ô:Ěfs›ĆÂÇ×Ó¦ÇbÚôXäĺĺaĎîÝصsŽ9Ňęc×wľJĄ˛NlŻŹKS±˝ţőµ“ 5s61á@tL&rőzdiµ‰ĹP©Tu~Q뵏Ú]ŹJ6äĺĺáŹ+ŮI…ÍS\j®°ý±}âř1§IźČču:¤§§ µ–J¬ťtضm;î›zß ŻývĂzŐ“NľůÖ[¶í»vžĆîÝ»ë”7nĆO÷Ô‹Łč†ăŕeű·+*ŕ‰˛şż“ ‚×–_”ZK TŐ)s1ł¨Eźż¬¬ ®X];w //]»vâÇÇč1c P(°kçŚ7ËŢ}ŻUqţrýđ“VÝpľĹpGe­fŢőq±X]ę Ă ®]§ˇ‘QŚŽn0ŮP3A$D- ÍĚĞݻnČžŞŐ7~dr9DÄzFíDˇP`Ú´i(--ŤI“ę-“€í۶!Bㄳş&Ź©3ĐĹG ‰ 9›9Q[ٸa=ĘÍfTVYˇż®qZ“tČĐťÂŔ!‘ N$Y^^©»»íç)S¦`Ę”) ľgĘŮŚ†jz›|&™T‰ ż&&`hdTŁeß]öľZłĆöóŮłżă©'ÇÚoÖˇ_˙ţH:}ż> ?ĄĆŽÓçtA{ÚřÚ©ľÉ„Þݻ1mz,?ţ8äŢŢ8uň$žî9¬Yý%úőmĂ«ŻĽ‚ ë×·*áp+\ÔßpŻ7w5 ąTcŢHINj2áźźŹgÍDFFÝDQQQ’’N#)é46˙ľ^żJĄ’żDöěܶĺxAťýz˝‰ ńŚŠf°nŇ}ű«×ëqâř1Ö3úS•+~Ĺô+żŕȉ îŽđîÁđ^{˛šßäq||ý JÜä·#(ČĎmň82ßηôé©g‘ÖôdoU®đőS¶ęÁÝ{˘¨´˘ţĆF®Gă[uڶş6Žv}ŰâÚ4vŚÖ\›”Ô ¤¤e ĺl|˝ÄČ/j|.„’Ň2ĽşüLÖwÝuÄ ‚Ă #ĺ¸CŻŕ™ż˙Ýöt~đ!xúoK°ňjµĎ˙óĚťn÷źĄ­VُŢ{Ë—#7/Ď>÷† żH$0›Í¸|9‡~9„O>ţĽ˙>ţýÚküeĹ„‘cÓëuő&j’ÚĚL6„Zˇ±§n¬gdtąůŘľ÷lßűKťíQ=TMľö˘ö2¶żýqűĂŐŢ{4˝.üżnń1Ô~Pű5˝Äß~hđifsŹq á(ľÚúsý o©źVŁ­®ŤŁ]ß¶¸6ŤŁ­®MSɆkIRÓŇQxu"IgĐĹߡÎp€ţ"7÷Ú„•ţv=amJr2$žŔçoÉńŘŹ÷W|€Ű‡­łÝÝÝÁÁ!Ahhţń÷çřˉ "ǦÍĚĽˇŃS[ÍĎ×/ďDDm_ĎšÓčĎÖśů šüă][hÇĐć–@›[ŇîÇ0–ZÚ$®ör gşľmumšëúŐ+ĽUhsKđčÜć=ýĎËËCbb"Ô~ĐL0Y*Űý;ăŢÉ“ńÎ[obŮ»ďÁĹĹ@ő2™µ{ů]ÎΆ‡‡G‹Ž«VkđôŇgp˙‚gnŮą«äŘ·'Ž»Ą«Qäç#˘wďFËôíŰů-:ndt4d~ťńňŰźđ——ťpa¨ŁSk474zj«fáÇńcDmRĎČ`Q“<ÄnÍz’Oގ&éŁĎĂĹôč ¦fOv—››‹¸¸8¨ý< ÚGÓfţ‚ż˘˛˛ óš‹řC‡`4!‰`±XPUU…ĚĚLĽţÚ«čŮ«—Ý] ĄLbK6ÜĘŐ( ’“’-“šš 9'“vxěá@tµ‘“ť•Őč“×ć6¨ńz&ę­k¬gDÔˇ+úvSV+Än.H˙ŁAq˘¤żŻÔ.z)´ĆÁźFZZ*.]Ľ_ëě ďfű÷ĘŹíď)ĽŢh•²f‰ąY#cb°ä©'ńč˘EކÎ]ş@,Ăb± ''‡±ňĂÍa–L89ń&bíšŐ Nh§TŞ8®ś¨ ëY}ÉÖ3"jގűcÄp¨T*¬űúk`ŇÁ‰’­bţóÚ«¸|ůrýŤ/77ááGEĚ-Xy§µne݆Ú?ń$đú«Ż6XF©Rá‰'źbĹ`ÂČńÉärLť>{vî˛M Y#8$cÇO`XϨťŤ„Eó®Ťíź1kĆFúö ŮŤśś¬Xą#GĆŔÍŤÍ­ú( lÚĽ_­^ŤýűöáÂ…ó(++X"Z­Ć°áĂ1oţřúú2XL89•ŞfĎ™mf&ôz=Äb”*TŞN ë9€îÝ»Ł˙ŔÁŘşűád<ÄnÔŻWťdPýôľÉ“ŕ«ęŚŹľŘČ@‘]đôôÄťwŽ`˛ˇqztŃ"<şhц %ĺč…BÁ„‘=Rk4GNÄzF(,, †’r¬üz;áD<ÄnčŰMđ`˙ËŚŚ ŘôýÖ?­K8u°F\i9”VŚÔtY}#Âoę}~O;çĐ1ęĺŰ á˝ŰěÇŽĹŔAx¶@жK–LÁ˝íâ|¸JŮĄšdCŻ^˝02¦ńńî{‡!,Đ!ť˝8ćę"€L*DZZš]ť—±ÔDlÇI ő:ľÝ°áo»ĽvĆR dŠÎŹhłcΞ9ŁeŤíädI8h·1ęŘĂěNídø ›,/“É0cÖ,N$éč×]↍–-[†U«V9äg¸U˝Lf˛´ZČĄ"‡żÎń‡`ż†K—.ˇ˘˘˘Ń˛Óî»#FŽÄc‹7y\ŁÁ€‚Ľ\§‘ł`""""˛;ôÁС‘ÍJ6ÔPŞT1kş(ĽŘÓČŽ˝ôâ ČČČh2ŮII§ńÁŠ÷4ĹDDDDdW®_Ť˘%j’\˝˘cP«ŐX˛d ć=ő"JLvqNFŁożő&řyyą¨¬¬l°¬#ĎŮĐşś¬Yű5 WW×ËőěÚacä,p """"»ĐĐj-ĹŐ+:©Tа°0K-vsNŻżúolýá^śFxűř WxxŁÉ†šëKŽŤ """r yyyřăJ6dRˇ]5>¨yšłEKŐ¬^±vĂfŢô§ůĺŕAĚś5ó,@çÎť›lTwDąz=őďע×ôě ěńŕ`8‡9…„„lßşĂÁ´d5Š–ŠŘ}»*9§µŠDč 7T"K«m˛¬ąĽO˙íočPɉĐć˛bču:Ţ0í(އ źŻúqqqvq>ěá@DDDDí¦&Ůŕçç‡đÖŻŻ×é`6›¨VÄ &O˝›6l@E•uĹ :µR.§  ßnXʧ—>ÓhŮ~ýú!==ýúőëp1ĘÍĘŔţ}fL‹m|9Ë›éĄŔž މ """"j×F ¬VäęőřvĂz¬×z"ô¸ÍÇŹ…L&köńvîŘĽ\}ťă¸˘žŔO&aÂną_úţůü?;c&† …ź\\رś:&&¨Ý\Ô_—¸Ö[.(‚ÁPآ„CU•ÚÜhsŮ­›ÚÇÂż.@~~>ž]ú·&Ëň©}ă>ţh%}láŔj#"""""»Pn©‚6·wÝuWłĘ—––"-- 2©®.»ř ™™™(.nűž4jµO/} g;N2mĹ{ﵨ|dt4ĆNšŇˇbdďŘĂě‚ÉR mn &MšÔ¬ňZ­Ë—/G„ĆÉ™v±ɱ“§ ‰ŕćƦVCjVśhŽýúB$ŁgŹžxéĺ—Ôµ+č@ŘĂśĆč;ˇÍ-a ¨ÝHĄR&ÚPii) €<÷wÄÁ°&‘]ňňíąÜ› ‡RVV†W¬Ŕ®ť;——‡®]»aŃăŹcô1€9ĚƸqă1söěŁćÎ]Ńł{(~O;‡ňňrěÚą/<˙Ţ`† """"j7j?tQÔť˛Ě*F%\ÖłEF€»»ýÂÔˇ˘Îör¸áś6%Ąe :ÝRď.{_­YcűůěŮßńÔŹcí7ëĐŻ$ť>Ťß†źŇcÇŤgŔšA$aÜřńxůĄ Ă„9…îÝ»Ł˙ŔÁŘşűá@ ĄĺR{`pż۶đŢ˝[śh¨ŃU€cďEV¦¶Îö\c)N¦îaŔé–Űł{7¦MŹĹâLJÜۧNžÄóĎ=‡5«żDżţýńömxő•W°aýú›phÉ5úőŽŕÍŐśďÔ’rt‚Bˇ°‹óáDDDäÂÂÂ0`Đíżď`ŚĄür"ÇSłŤČčč›N6ÔP«5¶c…„†â˛>kľc˛ZŃ+-‡É*ÂĐȨ&Ëŕ™ż˙J• "‘‡ ÁÓ[‚Ôł©¶űóůľ€3gÎ8]ŚĽ|;!<˘7ov”˘-Ä]÷LATT”]ś{8Q»;pVk&Ü9Á!!mrL˝N‡oľ^‹?ň™„˘Ö1–Z`‚‘ŃŃM–íâďÁOOO۶ţ"7WoűŮ? EFcËÎÁhÄ™”d¨ý<ě2±j,µ@¦čŚđ¦{"4w‡–ľF«ÍÄů´łvŁŽ=Č.$%§`ë–ÍHINnÓdCú#ë dR!˘z¨°páB‡ý ÷NžŚwŢzUUU¶mŢŢŢ0›Í¶ź/ggĂĂĂŁEÇ5 ‘µźo”dej‘žz†1˛#L8‘]Č/2#ýŠ{vílUŇÁh42Ů@ífţ‚ż˘˛˛ óš‹řC‡`4!‰`±XPUU…ĚĚLĽţÚ«čŮ«W‡ŽSyy9Ö¬^ŤY3b1tđ ôîŐ·Ři÷ăź“ÉÄ›É pHŮ ťáj#c×NhV÷ěëONCZVÁµc‘ÓňóóĂ]wÝ…ŹżÜ€rK•]śÓÁźFZZ*.]Ľ_ëě ďfű÷ĘŹ?é°×­¤¸sç<䤤:ŰŤF#Nť<‰S'Ob[\Vµ^^^ĽŃ{8‘]ŃLHżbÄţÄ-~턣Xůż L6t …“&M‚6·&KĄ]śÓ^{—.^¬wź››BBBđÖ;Ë3jT‡˝nüŇĎťĂSK– nÇN=qpäř |·ĺ,|äQśĎČŔ‡¬ŕMîŕŘè“É„\˝YZ-Db1ŐP©:10D·°ž©T*ŞŐ ÝtĐ> ë,š7˝ÉňzťGOťÁ˙6lgđ¨]ĺää`ĹĘ•92nnlnŐgďîÝxgů»5ztťížžžčŐ«zőę…={âí·ŢÄ?ž˙'Ć„‘ăÓffbĎî]0 u¶‡„„bĚřńH$ Ń-Şgj5ĆŤź™\Î ŃM‹‹‹Ă¶mŰŐC…„ł:ÄIH8 ¸{L$ÔjMɆoľ^‹,}FíÎÓÓwŢ9˘ÉdñŁG1pĐ Ł+W®`Řđፖ‰6 Ď.ýo¨ ×xcűÖͨ0ŮĹŇRA@§ËAÜ[nh@zú9lÚ°žA"ş…ő,K«ĹÖ-›$"Ş×ń“§°iÆz'’¬˝ĹE]1E·„ÚĎŢ‚bĽűÎŰM–ýőČQD˘&ËÍž9Ăéb”}î6®_×dY‘H„ôsŤ/s™_Ţ|-$—ŠđÇ•läĺĺŮĹů°‡€=;wÁl6ĂjµŔjµB @Ż×#1!‘QŃ ŃM:°o_˝őĚÖh`=#˘K-Ő«M\7‘$WŁ {čţűúk¸té***-;íľű0bäH<¶xq‡ŠQDďŢxîŮgđß7ßBDďŢ¶íĄĄĄ¸té"ěߏ˙[µ “7”c"z˝Î֪ݶƑ63“"j…쬬:u«6Ö3"jJÍD’µ—̬YŤ‚ɲ'/˝ř222šL6@RŇi|°âýŁyó ==Ź/z¬ÎöýúbĘ=÷`Ĺ{ďˇK<ůôÓĽˇ{8P‡W»SÓŁˇ¶šźsőz‹¨ ęY}jęYí¤Q}I&ž€ŢhÂĘ˙m`PśLeĄ†’r ęެňĄĄĄČĘĘ‚L*D‰©•UÖöżOsr°fí×0p \]],׳{(~O;×ě㪔*Lť>/żířËiŢqçťřĎo ©Ö˛BˇJĄţţ3±3fÂĂĂŁEÇ ďÝpĹ—¶˛21á@dÔÍ ŤžÚj’~J%EÔő¬1 5™tĐ>Ťźźf0śP‰ą)ÚB¬_»´YĺµZ-–/_ŽŤ’3 `,µ´űgđöńAŻđđF“ •J[t\±DµZcź±-Lžr&OąĎöóé”3­>¦L&ŻźŇibÄ„‘“ DvVVŁ=šŰ`"˘ĆëYÍPĄú|¬gDDäč~‰O@YY>˙ě˙đăŢ˝8ź‘’’HĄRuĹČ<0gŽť<Ĺ`5˘ŞŞ ĺĺĺ\)Ž "Ç7"&›6l€Ůl®wżR©âDvD­4~ÂD¬]łfłąŢdë‘ósuŔCâ±Ü\ëN%&“ m˙ÖL¶áő ×x7ú>5]뵹% :ýéňóóńଙČČȨł˝¨¨II§‘”t›ż˙_ŻßeíAŰł{hłË¶dŘ 1á@d—TŞNtϽؽk'ŠŚu'ž  ÁŘń$˘V’Éĺ:}:öěÜe›¨•őŚČńI„® ] şB,t…ˇ´ĽÁîĚ7Dh|š}n>¶mÝ‚ÂüthťíîîîAppBCCđŹż?×acÔPŻ…Â‚śK?‡5_| _…Ď=˙Τž·íó÷•ÂŐE€b“ećJ,•ĽQś”ŮR ‹ŐAj˙&Ëäç#˘wďFËôíŰůN#‘ÄJ•ꦏáíăÁ‡`Ŕ€˙Đ\ĽňŻ፷Ţâ Ř guřüÝcČ€ŢL8Qűru@&Bá)†Îh˛=ÍďÖ ]ŐţRűCéç‹ŰđчŘz$ČĺrČdr¨:u‚H,‚J©‚¸“»µ6ŮĐVÂ#"ܧŐV/囕©Ĺ2 nY}n>.d^Ć‘„(7•Ôip•*lĂJ8$Ăyč &řu–bZěŚ&Ë* $'%ÝĐá¶ÔÔTČĺňťŃhÄ™”d¨ý<ěr~ťÁ„;Ő!ŤöNjö÷’«+ĆŽŹďż×˘×iµ™8źvÖncÔ1á@DDDÔÁřxŠ —Š “ ë ) 銑ŁbĐUPďë[üxł’ ŽN­ÖÔů˙ő”~ľPúůâbZ˛ł®5jÄW‡™Ô Í0[*a()Ç]1*«¬ĽńšA&"Bă… bŐŞUůFĆÄ`ÉSOâŃE‹= ť»tX,†ĹbANN'&bĺ‡ *şe% …HŚŹď8Ťi«eĄĄ-zIV¦é©gp`Âţě$ÂS _/ń +DŐs)tŐř7lĐ!’ -Qó´[ŻÓA§Ó!K› m¦EEF[Bĺë‰+ĆŠ:V’s[üÄ“HLHŔ믾Ú`ĄJ…'ž|ŠÁjÄĚŮł1sölÂÁ1á@DDDää$BWô ôľ!Á¨†ć6 Ő­wÝŃ)U*(U*Ű0 ŁŃmf&˛´™ÉäxzI4.hłńsüQ9yşÜ|­ŤHĄR„††âčÉTVÚG/…BM›·ŕ«Ő«±ß>\¸peeeK$P«Ő6|8ćÍ____^@bÂ×ŕţáŇ/ůW.áŹ+—‚Đî apn™L†đ:óDtU klćĆŢ ÚlěŘ{ÇO§ ČO‚? Ę 3¸› V«±téRôľs˛]ť—§§']´Ź.ZÄ‹DDDDDNhDÔ L»{ ”~ŐOQŤĆ0»™°±ŁëŞŔ˘yÓqâřqŘ÷BşmcΙx說ŞP^^ ‡)‘“cÂśB\\¶mۆ¨*$śŐu¨Ď.ş"H剋şbŚş3÷ß=R÷:el°?2™ ^^2!ş"¤‹ j?\Đ#żČĚ9¨žÝC›]ö÷´s µ©pŤ7¶oÝŚ S˘˘˘Úý|\xI—Ż—}‚|ŕë%Ć=#`ně=7$Č>‡„`ÁÂ…;~ĽĽŞBbˇ+zČŃŇ×Đž¨ý<ŕ-(Ć»ďĽÝęc …B( ÜvŰmNŁěs§°qý:Ţ0íH.áŹ+ŮČËËł‹óa""""ŇŮ *ďk S…ź‚Aq@5ó=¤$'ăçýű`6›Ńµ“<Än\RÓ5Ôkˇ° çŇĎaÍ_ÂWˇŔsĎ?Ď`‘Óc""""T;Ůŕ§Tbös02făŔÂ#"0{Î\ř)••·;şvňb`ś„·Ź‚÷>ř—.]Ä+˙úBNŹ """"Ł’Kę$¦MŹĺ˛–NB&“aÚôXô ‡ŻÂUnęóWVZa()Ghh¨Ó~FWWWŚ7ěoŃëäro ŤŚ‚6·„Ą5şuďÁŮ&©±â"€ÚŻşęĺUÝ8s¦{§"–H0nÂDÄÎGćMďPź˝Ä\m!–.]Ú¬ň©©©X¸p!˘z¨ “ çZ­(+-mŃKd2"ŁŁŮn„Z­AHX/ĆČŽ0á@DDDä@^b…®€q'0ŮŕÄÄ şŞ02zádfΞŤSÉ) 9=&©z5Ů ‹ˇVk`pżDDDDÄŐE±»'z†‡3@Jr2RNţ†pŤ7AD‡ """"â!vą¬zťŽÁčR’“P— peL"r@L8‘S4ić/\Ś„łl“sĐëtČÎĘP=™"µłĄ« ŚFb$’xpĹśv–śY€‰“&#**Ę.· """"GýßdbO'ľ¶»vîxJÜ”v¤3P)¦ĹÎh×{"K«µŰŐ8t”ꌌŐnç`4Q—ëX+–´u J-čâ…BaçĂ„‘ŞŞŞÂĆ ë±vÍj¤$'3 NÄl2aă†őČŐëV«2©¨C|v±Â5ŢX¶lo„ëôzľÝ°Ł)II8’p1˛#L89 ‹Ĺ‚"ٰg×Něľú4ś[Fz:>űżOmÉ?Ą˛C}~WWäRŇŇŇx39&X,ĆýÓcm Ň3))řüÓU8“’Âŕ8¨Äřxlݲĺf3 ß€ e`!•J CI9*+9ł&‘˝aÂČA)U*L›k›ČÎh4b÷ÎL<8¨Đęä‚—— S§Oo×±đŽB­VcéŇĄHŃrbM";ÄŮgX"Á´ŘHINFb|<ŠŠŚ¶ÄCrŇévťäŽZF©Ráî{'#00b‰„!"‡ÇDDDä°}ëf„kĽ;äçŹŔ‚… 1vüxyÉrąś7†ŃëtMö<  a˛nšÚĎ'ŽAjjŞ]ś{8‘SČËËĂW˛!wňŮü/ęŠ1jŘ`ŚŚVďţđ„GD %9jŤ¦ÁăM&6l˙zť)ÉÉČH?ăŐI>ŐjČd2Ǩäx˘ ßnXŹű§Ç2 ÄHź•Žýű\9¨ťÇʆg%ÂÂÂp """˘ć+1W@,ő„RĄj´\xDDŁűÓÓÓ±g×NŞŐPk4 T#P­f€Ű ÁĄŐB«Í„V«µMY›63łÉëCöE,t…› YZ-ŃHŚĘËJ ×é ˛aÂnZÍnVżx…R©„X,á”Äh4Âh0@ŻÓÁ\nf@€L&ç“KréçÎŮ~ź×nDŞŐP*UPkÔPŞ:ń~nÇŹáŔľ}őîë‚ĐP„pČ1á@T?­6g’“‘žž^oĆžH,†Z­F˙ˇVk;L2?z´N÷["˘¶ •J…ôôsČŐëmŰk'ŽĚ~pN“˝):˝NŁŃĐh&0đZBßËK†ŕĐh4·µjČČčhČü:ăĺ·?áMŰÁÉĺŢ…oăö2 Ô¨Ń-Ż~N8Ć`0á@ŽöKöŔţ}ěFć$ĘÍfd¤§##=j5†FE1ń`Ě&Nś8ŽÄřxn9ĄJĄJ…ČčhM&deeA—“­6ąz=ĚW,4–lŘżď'ŤPuę±H ĄJ±Xě Ššefł z˝¦2ôzŚCťäďô0°ÁŽ?jŤ†ČÝÁ’ IDAT=Cnö:”ZpV‡¤ź77«ĽV«ĹĆŤ®ńĆĹśb§^S&“!2:ËżŘÂĄjµĆR Ön=Č`0á@Ž"%9{víĽ¶A(†K@\şC  „@ÄnŽÂZn‚Uź…JÝ%Te§ĄFdiµŘ´aĆŽźŔń¤íHŻÓa×Îuž2şř‡Ŕ%0.Ţ*ĽůtŃ‘Té2a9° ‡!–H‚ŕD"Úö˝d2›šüîĘÎĘBFzz˝űý”JH$’F'Ů«ičËäm3ÉlŞNÔŮv5‰Ň`2DŻÓaë–¦ą………ŤîçďŇ?Wii)ŇŇŇ —Šŕę*`@p Gr`ß>[—JĹp ×î™dpP‘‚€¸„ŔŤĘ´c¨á 7šŕĺŰ á˝yщţ$*ąž(Ă·Ö3ŤÄHź•Žýű~b0ÚQ„ĆŰ·nFBB‚]śDDDDDg0A¦čŚđčO"şÂMP‰,­–Áh$Fĺe%Đë8]Ă„µ9&ěTxďŢ9É™ F'KCI9Ń™\o_cdGp """"˛×”L_?e‡Yň±ÄTäĚ,Y˛¤YĺµZ-–-[†pŤ7<ÄnNĄJ…i±3˘-dĹh@xD†DßÉŮ&Č.TVYa,µ ,,¬YĺKKK‘––ąTWWHdgp """"""˘6Ç„‘ń»Á\VĚ™ŕ‰Čî1á@DDDä@‚TžČÍĘŕZ÷DDd÷p """§…‰“&s6""ę°´ą%č?p0şwďnçĂ„9…B.ţf6Ş«g÷Pçlí,Ô~ĽČíHo0ˇŘꎩӧ3ŤÄČ/ #cF1íůť‘[‚noöÄ«·DDDDÔ& ţMß~Űh™÷îmqb`ţCs›Uî÷´sN׬L-ŇSĎt„Dč µź¶mŰfWçe˛T˘®P«5í×ÔfâÝwŢFT•]^;“Ąb©'”Şö;żÄřxě‰űŢncÔ1á@DDDD­& ńŐšŐ¨¨¨¨wżŐjĹg˙÷)D"Q‹Ž›y)“Áí@DB¨ý<Ç`9&¨M‡ŕ‡-[ęÝ÷ăŢ=P©:ˇ˛˛ň†}[6Ź{ďž„ţ}zcüŘ1زů{@ßpdeiŃł{(ţűúk€ňňr ŹŠÄńcÇ0~Ěhô ď ţ!›ż˙#†ĂŔţýđ×ůópéŇ%Űľ çĎcÁĽ‡0dŕ ęßsŤÔłgy‰p """"{RQQ)÷݇UtC/«ŐŠŹ>\‰ű¦N˝!á°ß>Ľ»l9žyöď˙ő0^}íu¬xď=üšSÉ)އKüăź/¨îIa2™°fő—řě_ŕ艓őžĎO?ţ ëÖáËŻÖ">ńWŚ3Ď=űŚm˙sĎ>‹QŁÇŕçCńřů—C5z ţőŇ‹Ľ&,, «V­BÂYço!bÂś‘ŐjŰáĂá.•bë?ÔŮ·wĎnTVUâŽ;ďĽáu«żüĎ=˙<˘‡ T*Ĺŕ!C°dé3Xżî›zßG  ¸¸Óbc¨V78Dă«5«ńôß–"((‰Ócg`݆Ť¶ýé¸űî»áîîOO<8gÖoü–’ """˘şRSSqâŘÎćßÎüőá:˝¬V+>Zą}x!Á ĺϤ¤ 2*ŞÎ¶Ű‡EŇéÓŤľOďŢ}Ýź–šŠ={6¸řwŕÍ7ŢŔw›6áĘ•+ĽpDädR!ţ¸’ŤĽĽ<»8&Č)¤ĄĄářŃĂNźpĐMđňí„đŢvy~&NDeUâ¶V÷rŘł{7Š‹‹1aâÄzË—––"rČ`ôějűďŽč(äćć6ú>^^^Ťî/..†‡GĂ÷ÂŢxť;wĆ˙}ş ŁF܉‡ĚGf&'¨¤†qA9~ML`0‰QQ~R’“ŚvˇńÁö­›‘`÷*DDDDDg0A¦čŚđ»ţUUUřřŁ•7ÜÜÜę-ďéĺ…#ÇOŕ÷´suţ«™żáfÉ˝˝QRRŇŕ~www<¶x1víŮ‹Ý?ţ§ž^‚g˙¶„7Ő?IE ‰ńń F#12ćý”ä$p """˘[cĘÔ©(**Â˙>ű yąą¸oęÔËöîÝÇŹkósčÖ­Nť<٬˛jµÁ!!8wî/QbÂÚ”D"ÁěÄĘ?ŔsćB,7XöÁ9sńÚż_Á‘ß~Éd‚.'ď-_ŽG~Đ©S'¤$'٬¬¬Eçđׇbů˛wťť “É„M7bę”ɶýĚś‰Ż×~…’âbÍflXż˝űô±»X†÷îŤA‘Ă‘śYŔ«Ł×+±0””3 ÉĺđöU0FL8µ ˇ‹ĎŐ@ä"h×óx¬ż˙ ŰŢ‚ž iŻY1*„X×XĎśÖĚYłáííŤŘ™3-7üŽ;đÔ’żáŐż‚!`Ň_&ââĹ xńĄ—Ż˙÷ <öČ#=rD‹ŢŘđáűĐC1}˘‡ŢŽŰ·ăíw–Ůö?÷üóŘľm†GGaxT$âńúß°ż”L_?e‡Yň±ÄTäĚ,YŇĽá-yyyضmÔ~]ť:6J• Óbg E[Č/„GD`HôťŚ‘qcČ‘EúË ruÁ°@9öe¶ß‹ŇýĆ%ą,•VŚďę‹ě"3Śĺ•ĽXÄşĆzFäÔ~O«;A.—c˙Á_š,˙ňLüË_ę=nô°ařůС&ŹQß¶{'OÁ˝“§4Ř0ůfý^8;SYe…±Ô‚°°°f•ĎÍÍE\\Ô~0”–Ădáď"&Ú€‹ąÍ_Ąü9ťqPk@…ŐzCąľ*Üâo±2Ťf|›ŞĂß«±tjJéâ…Ť”R! MŘ}1ż])‚«xeXW|‘t÷‡© ” qĄ¸ßüžËĹŐ]µ–Ź †›‹+F…ŕ€¶ß§UĎŞ-›RőŢ ź¸ +/uđşÖP=Đd]c=#"""bÂčOs{ţ()GFˇ ŮĹĺ¸Ý_†řlCť2ýTžřK°_&ý]i9Âý<0Żw¸ąl “?L öĂ×grpľ° ™DtB©ç Ęŕî悿+°:ůä•Y0:Č÷uWâăŮ€%ű3°bTžř)˝nĺr@[dĆďyĄĐÍ;Îçó˘Q‡­kMŐłJ+­k¬gDDDDއs8Cä/UO ôăĹ|ŚľÍ×/ˇńĆ÷izh‹Ě0WZq<§µ…¨]läŐ2góKQ^eEza¶žËĂđ@9€ę±ëŰ3ňpĄ¤ĺUVĐ"ĐKܬs€ý™ŐĺC}Üyá¨ĂÖµ¦ęŮÍÖ5Ö3ęÂŐŢČ>w ׯc0 ˘¶6 “J,UH/¨ž±:ŁĐcyvňŞSNé.ÄE©Î¶Súâ:?z‰‘š_Zg[ZA)n“Il?gͶ—ZŞŕîÖ˛ŞóÍî ő§“OfD¬k ŐµćÔłÖÖ5Ö3ŠŠŠÂÄI“9›?uXÚÜô8Ý»w·‹óá rHc»ú`ÇůĽ:Űö\ĚÇ=!~8ňG‘m›‡Đ¦ŠŞ:劮›XNěę‚7îěvĂ{TT] n©jÝČđbK%¶śËĹáťđńÉËĽ€ÔáęZsęYkëë) tńč0łůSÇ`4Q— ™TČ{»é &ř(ü07önŁ‘őďŰ#c†1íśp0čöfOĽĘ„Ńuú(=ĐĹC„ů˝»Ô»żŻŇ§ô%¶Ć‹ČUsĺµFĚőO?MUx9ţ"L•U·ôĽĎ”ˇ»Żc|°çb'·ŁU×XĎnNJRŽ$& Bㄳ:§˙Ľ®.xHÜ––f7OhŔd©D\ˇVkÚŻAŻÓáŔţ}×x#%Óţ–}4Y*!–zB©Rµ_}INĆ‘#Gě6FäpĆůâ›ßuřő˛ń†};yalW_[#H_ZŽ ą©ůe¶2:×í ~ÉhBWo ~Ď+˝ĺçľó|@zAY«{M9R]c=#"˘ćđ¸!BăeË–aŐŞU Hí˝Ů„,­r©Áh€Ń`@A^.cdG8‡9”ľRxŠ\ńŰc˝űŹçAâꂾR@|¶w‡řˇł‡"Wú«RAă„ÉAŰă0uÖdřvëŠ;Uî¸đř“¨ňŔŁĚÄVO9î–UŕÂŰË!¨¨Ŕ˘ţ-ybܦÓ"yĹ&2/Â{ů{P{I0± ńë (?Ź˙÷müvĄă/Gü–$t++Äă˙xŮĹft?¸ űSłĐĎ5s Űë±/·»Č1kâÔęm_­Â~ł+&ôĐ@Ňw†tńBĺ«/ăO_Ü7|ÜŐ˝1¤‹*ž]‚Ł* ¦vëřâđe#/,Ů•ÜJl9}w%ťĆ¬)ł!ß®Ć>C©0kô$xŠQůÎřĹŐ÷ę IČ éâ…˙oďÎĂš:ÓţĂ– € q#HˇăV¬][µu«t±Rśnúľ­ejőšiŐÎL÷˝ż·Vk§vQ»¨í´Š¶N«­Ë(V[ÁĄV­ŕBU\BÂIH€p~PR"„ďçşzUÎňä9÷“rnžĄěÇŮ>pă­©řU‘€´ÁrÔ/x9ŃńHźu;úÄ Ŕ¸0ŕĚs/BJ1oÁ<|*Ăb#ÎĽł ~•xě˙^ĂľŠzL¸¬AÎű[0čâ9,xëşpÓ.žÄţ/"¶J‹/ľŇřą-ČĹ_źFśŐO<‰Ňj âw~Ť}ç´­âľ»îĂżNň‹ů–łÚj\c2ĆOČ`u“¨p BE5ظa=_´i%F:M1öd[pϬ?1 Ä„yźÍ·ÍuËfŚź<Ĺ/‚•÷= »î_‘6E«żÂęéŹyĺuĚĽ?…ßţ«ošŐ¸í‰żbÚ#˙‹vŔ–›îFX] ůÇbLą}:öć—`[ĘPT–aÎëŻ ášřF[‡#©·ŁXw3żř†`łdN$_Ť’3y¸óŰŤh(/Ǧ«®ÇűDă|^.nÇ÷¸üŃĎř$ů.”ËúŔ|d¦¸´l#VÎŁęľţ 3ďĽĺ‘Jś®¨aĂ’G92l ,Ą…¸{óż€úzl’'"XJ Žŕö][P]T„uCÇŁL15'ÄŃa”˝»Nš*I,߇i7_űď˙Ď3¨©o@Ýçë1>m"Š–¬ÇŞ»4~nßyw§ß‰Ók·bő¤Ů€Ěż=…›˙ç~ü¸%_OČ€TÔ€??ľÓĽűäaëŘ»a¬Â˙>˙ ¦Ś‡ďK*±=ův(ËJńŕ˛7‹Żk$8–zÎţv 3Ňţ„Ż ulX""""&Ú¶%őč~:„#wüٶíă±épü8öÜňŔďŰ&ĎĆ„c'±+e†mŰÚ»ćáÚ#ůČMI\ Ć'îĂ5öŽĽ ‹Âş‘· żAŹăCŻśT ¦˘ú:ä8~ŐpX5°ŠĂűD7>¨ OF]Ńq\5ĺ˛>€ďFÝ‚ę3'Ë}0ú7® ü͸(Ë9KU@żh6*yśĽ h¸čAä‡˙ö˝ p4q4ęϝĥ„±(S4.gąmčX=†ü?¦ŁJŇ8|hëŤÓpůŘAż{>ję?·7ĎDŮáĂřię˙Ř^ă“ ÷aâ/yŘuó¬ß·Ýö0FÎĂ›?·&Áź¤=‚‘ů§±wô$@eH8ÖŽąĂĘtŘwu2@5źŐ'#ŞÎ„C†Ú®ˇîr8Bţý-0ôF6*yĄčâôWcoÎp ęj‚źr†Ž±Űf Â⯷ŰV‚ťWlł„Ę;4Ůn[y¸{ĂĺvŰ´QˇŤh·í¤2¶ĺCYtb‹mżÄ_ÓbŰľá€Hd·-÷j>‘g;Ůop‹m' můţŽKj±mo˘ýű»Á?9Ăě?{őb vÄ]g·­6$ąW|ľ+B#°7Áţł¬—÷Ç>ąý’—űÇââő(ë íËĆ$"ŻĄRĹŔ`ŞĂgßěë×[[×µÎGçd8uĽ^ŻGnn.TŠ”U™a®łúô{áńE‹‘ţ0‡t´&952E?Ľ°äĂCpŇH˘îpE˛Z2×YˇÖ‘––ćÔń:ť[¶lJ‚ @>Úy~*‰Číp """ň2&łW:ę-D`OI"ňNL8ye¸ę|´j%“˝ÄőyĽDDDä0jôőPëŚ>}ťSťíßG~:̆ďľŰő@DN©2Ö˘ożËĺQ&Č'$&&âÚënôů„ąÎŠňËŔ±źF~^ß•iµx÷ť⓵źC«+g@zú!ÎT ł„1É) F1 ëÓÆŹ`0zPľşÓď”ĎxŻ2á@DDDäeN_0Ř–˙Űą}Ž9 řŇC^>[·µ *ËJ!“öšk÷÷A& DaaˇGŐË`ŞAHNMí±:”iµŘ¸a=†ĹDxdŰLuÉűaŘđá=úŮ9śłĎccÔ1á@DDDäe¬  4U°6€ď÷dă›o†ĹlfpĽĹlĆÜěܾͶ­č‚ÁnŤŻ ‘`xL$–.]Ę7ÄĚ34j5ÂĄA F+ UU¨Đë#„‘2ZęqüL9Śćz@qQ>\˝ŠC,ĽÔÉü||¶n-r÷ďĐT:UZm“HDä˝""""ďd®ł"ݤ*Eô‘˘ÖbČŹOň&ju vnŰf·âH•±gµŐ0Zę v( Lź>ďŻŮ€Úş„ČĂđ7‘ł68«­Ćń3ĺ8_n¬ÇĆoţ­ľ‚Áń'óňlɆ¦¶ĚWW2Ůŕ$ą\Ž´´4¨uFŰĽ&Dä9ŘĂ|‚^ŻÇĹ Ťěő¦1ďMŚ–zµŐ€¬ov"뛝źz¦Mü#bchvĄTB&“ń ÓĂ´ú ěÝ˙rý‚2?ś/7á|ąÉ6/‘/`¨ i4$&&2Ý ''ßnÝŠá1‘Č9Ąe@ěŮ˙öě˙ JEŚ»qÎź(˘˘0$!Ç#J©d şÁ`€F­F, *U L5f:š‡ĂGóqččďsmś×Šhđp’@Ŕ ŤZŤh•Ši%F–šj”iµĽŻô ”«•řhĺ \š>iiiL8ů$© 0 Óé ęqZ]9˛÷ý«†teeĐ••!w˙~‰ĹP©TP©b­RńAˇ“ †2í%¨KÔ(.:m*©€Ţěg—dhŽÉĎ.A¨¨7¬Çă‹3 ­ÄH§)Ćžl î™ő'„p "Ç´jˇ+!+•(ŐhçˇDÉ€ăÇŹ###ˇW~Ů‚ź‹őč&F”L‚Iă×ŔZ‹ĹEE(.*$]{-ĆOČ€9éč‘#P«KP¦ŐÚMüŘÜ%í%ü\¬g°¨×aÂě„…Épů˛ ęB± čtđe~ŃCĐpľz˝[¶lńnŤDć:«m®?äabȤAI! ôÉ+†®şĂ#6f¤ÁŰůŧO7ÎÎą ě˙ńÔŐŮĎbmPe¬…ˇ¦ĺ—-śĚp Yxc¡A[ •饄ĘƱĎŁŁŚ®H84űň. Iů…ÝůLJő×tlÝşR©'ňŻĆä9¬ ´Ufh«ĚŤ÷¦@˙ß&ÜÔăŕ/§mÇ)}«€XŐ„Iüńß_ŽŘ•Ó4ž]ŠŠB”˛Ż×$#,f3ĘĘĘeZ-,µh/]‚ĹbV«…L&Ăłç@«Ż€NWü‚b”éˡŐW ˙T1†©"ŕS-Śćz®0Ń µu PëŚxtŽs=ĂL&4 dŇ@Íő>=“©l=¤BE-Ď«2Ö6&-ję8ˇ#ä.QJĄíA¨ţh6üÄł—CoK8äďĐ8ź‡JĂ€t‘aÇă@n„25´%đS2Ö>›tP"hŇÔçíoL虏o—Ľßę~exăwsťV«ŔˇDDL8PW›•Ő˝]¸ëµl r›ö&H4Zę‘sJŰ"Ičg·­¶®ˇÍrňJ*ZÝW[×ŕÔDŤMóWÔMTŞ ŚŽF©F†Ó?ĂŮ—]ľ{ëŮjż]‰úĽĆI"Ĺb1îp6—Âě÷d̲őt@Uj·®DýŃl&|€P©Eížő¨ű~-©7&9ă'pn""GdŇ@¤\­Dff&q…ŞŞJäîßχé6hJÔ(*8ÉyÎá@í&=_˙{3J5Fę}ú~ăáŮ ŠR1XžţŕSÖ¸>zCĹ%4”Ú/G¦ŠÂwÍ`÷î"–H0yę4Čdá¶ei­§ŹŔzúe Db DŇp „mäŃźł -PoACu%„2µmřĐÔ»iüöl """&š? Ý3ëO(.*žݻqůrăč†Ň˘®ä]ÂÂdHNMĺ‡HNMŰ#°g÷nüZüŰg«Ş Ößşá“÷“ś‚kGŹîňáJz˙¬ ąŃnŰ=ĆŁPYźÍ©lB‹ó8dŽĽ“BˇŔôéÓńţš í®lBDL8ŹÁńńP«KPtú44j5te|ňş_ĚQQV©?dTŞÄĂČd2Üq×]0 (.:ŤÓ……Đ••Áb±08^$,L†(ĄńC†ŕČáĂ8›cë˝â¬ ±ĚžăRĎ#‚P1ýFţžt¸0â9›•Ńg˙~»s †*dć÷ĹJýl8"ň:rąiiixęÍŹ "&ȨT1|P%ę†ÄèkGcÔµŁ /·sű¶ťWk± ŞŞŇĄ„ĘZąCB kcRł+'ެőzÜ3ëO H —áâ…RčőzʍDDD>Bď‚ěKŚtá‰X‚8“O\˙ĉÎ-7š2ćzČĺr‡űdŇ@ ěŻěTúţ5Ôµ[FBBB«I(w”§Oo· ą\ŽëG ďTÚ,Ł®ĆŕÔ—ß¶ĘpGŰx[űşŁmÚ+ĂmÓí«Rq4"_Ŕ„őjµÚĺsÄb1˘”JŹ<ŽN‚Ý\K8D)•XZ h˝˙úĄR)ŇŇŇ:UFbbb§{˘4Mb×Óeđ2ÜŃ­×mĂöíš¶ń”ö%"&¨ťĚĎFŁf šľ—•Ál6wčÜČH„††2 UU Â6nXĎ [ěÍΆX"éđ˝Š|‡2J‰™xaÉ F+†Ťzřc͆o Á„Coz04`0ß‚‘HÔˇs++*PYQÁ 6ÓÇjěŐ×/Ĺď]c;Ňs†Č‘2KK…:†č7X¶lR®V¶9٧/K$P©b|ú;K&“ˇŹ"Š1ň L8ô·X !ELd0šQDŔ,»4s|VV4 úX«!ďĺŘW>l§XÎôę$Őj öŹ„N$ĺ‚ěśęŰásÖ•Ű%TÖJ¨¬îMv ě°Čq›ŠÜŚ ‡^ň ”T«a ®đfŘśęëŇĚńÁÁÁ€Ë¤Őä1d#·1§úA-dĘ;>Sw†é(ëťź\A!1ˇŻŘĄ×¨ŞŞÄvs&Číü"""ß ·1±ż """ŹŔ„ą‡TµÁRgEťŕ‡XŐŁŤIB¸||Ë9ĄĹGo˝„®áőa"""Q ÄÓÇ\[ŤHĄÄc!śç‡¨-Ú*3ŚâžYę±:XĚfhÔjȤŁ(U<ĆOŘcu0 ¨Đë<6F˝DDD˝X"A|@ ADäá´eZlܰĂc"ŚVäź8Ă9ű#„ąDDDDD䕤R)† ‚*c-¬V!ň0L8őbju ŻÂ@‘WR©TX´hňŐ•0Zę"Ă„‘ŹH¬×âŐ$ADDD """"""" R„ŕč‘Ă(((đú0á@DDDDDÔÎC\„¨o˝ą„Áh#FĄ§Ź#ký F·ĂĎ?DaaˇGÔ‡ """aâL5Ç0‘g`ÂČG¨ý#ńa‘ÉĄsTŞĽ~šÁ#"""·cÂČĂ…‡G`Lr Ô:#ŃŠčâ®fŚ<DDDDDä• ™™‰”«•I}úZe2’SSů0Ý•*ń‰C#Ŕx'˝_ • D'ü‚:ĆżÇŢűŚ=ő"š€D.ďáňk l¬äŔ۶ż@ź:Ť /•+‰C®$ŽčA ęhüĹqČ3ţ=ňŢgě©É íň9ŃŃ*—ŽŻŞŞDŽIŠŮDśÜŠC*ĽLtt4@DD­RĹĸ­,…BÁ€Q‡±‡—Y¸p!Ôj5á6l@iii‡Î3f RRRÄNZ¶lcO|ź·!==*•k=:Úwë‰'žh±ÍŐ×&"ňUFËďK[Ěf%’©ÇÉ‚_=>Veee=úúgŐř†ő L8x©TŠÄÄDÂM±ě(…BÁvč!Ś=ő&*•Ęĺ÷»N_]yąËŻĹĎQë,µV@Tß~°ÔÖöXÂA«/÷ÜŐ5Ć(>>ľGëqN]Ú«ß«y%xň±˙EJňőQ© ""ňjµ®^ĺŇ9ááRADDä#š–Ĺ g0¨× Ŕßżý%#ŰšÁeHýŘřwÝ DËźw@ŚŔ?”–›`4×·:Áaó2ÚR[×ĐęD’ţ~"„H:UĐŘ;Ł=V«Đć î(Ł;Ű×mÓVîhgË`ÂÜB­VăĂŐ«đř"ç'€T©bđÖňĺ y˝Xe(ÂC‚Ú=.ç”¶KË —´Ü088rąŤ‘ˇbD„!_]ŮęñŁ2Ţ÷uĆVç5‘`xLd§ĘŕTUĆZä«+»´Śîl߼’ŠN·M[e¸Łmś- """""˘N8±w3Ţ|óMś>}Ú©c[ăŽ2¶lŮ‚­[·Ú%.\ą\ŽĄK—BŁŃ@$áž)©řË_ţâT­ytNŇŇŇî+((Ŕ˛eË:Udff¶[ĆuIðţłE]ZFw¶ďÇË_Fbbb§Ú¦­2ÜŃ6í•‘Ŕ„‘;dddŔd2őx‰¤E˛AĄR.\hK:ś'J©Ä‚ <""¸+ŮĐÄŮŐ+|…Z­FMM €–«>4ß×$::Úc–y$×0á@DDDDDä¤ćÉ qÉÇăÇŹăřńă€k®ąĆéäÉdÂńăǡ×ëmei4đé¤Ă+ŻĽbű÷Ǎø›n¶ýĽnÝ:¨KJ쎿˙‘ľQ ľ˝ DDDDDDNĘĘʲűůرc8věíçśśĽöÚkN••››Ű˘Ľ+÷'%%!))Éçâ83#*U ŔÔ¬GĂĚ{2o±XřćóBśĂ¨+Ój±bĹ ‚ČI mîoę­ŕ “ÉÔćţ>}útj¨†/ůaß>—bKž=|„JĄÂCĎuéłĹŚâ˘"ŹČIóćÍs¸}Ë–-Řşuk‡Ë]ąr%ۆ}{żÇuŁŻµ›ď<{8ů©TЏ¸8‚<{8Q·ylţ|1˝{8Q·‰ŹŹ‡X"a z&|DAAž~ę.ťŁŚRâ±ůó<""ňh÷?đ 'ĐôBRADDÔ‹‰%ÄDd ¨Űˇ"—z9 ŠŤ…T*eđĽ {8Q·Y±b´eZ˘`ÂÜŽ """!•JëŇ9ju ˙ë_<""ňhŻľü /Ă„‘ŹP©TűH&ADDD """"""ę6qC"沽DDDDDDÔm,X€(Ą’čp ""ňz˝ŮŮ»]:G"– nđ`ŹÜŽ """ˇÓé°{×.—ΉR*±`ÁŹ<ÚÓĎ>‡ÄÄDÂË0á@DDDDDDÝfÇŽ0 D/Ŕ„u›í۶ˇŞŞ’čp ""ęĹ věŘÁ@‘Gűaß>čőzÂË0á@DDä# &LčŇ9UU•ŘľmGDDmßŢďˇÓé/Ă„‘ŹËĺxË- y„†€şËcóçC€čpp`Íš5ČÍÍu{ą™™™-¶ăŃGĺ/DDDDDÔ+ÄÇÇŁDSĘ@ôL88pěر6÷;]–\.osMM 4 ]H­V٦¦Ćaě;JŻ×ٰ°Đa{·×ćDD]Ą  Ë–-Ăă‹;}Nxx&OžĚŕ‘Gűă¸qP( „—aÂÁ… béŇĄ¶Ňččh$%%Ůö7˙w{šŽm>ŁęîÝ»me'''câĉ zův·ÜÜ\‡˝`‚±|ůržĽ†L&Ă”©S""ňhănş™ŘóBś4Ň•J…… Úz2h4Čĺr¤ĄĄ!-- *•Ę鲤R)RRRlçęőz»dĂś9sđ.¤P(Úí‘âJ{¶w,o‚DDDDDm{üŻ…Z]Â@ôěáĐĆeóžk×®¤¤¤t¸ĚćsC0ŮĐ=äry‹+łgĎîp;fdd ##ĂösNNŽí˝Ť… 2čDDDDDD`‡6]ŮÓaíÚµČÉÉéPYL6řF;6ç(Ů •Jp"ň*ju ˙ë_""ňhź®[ µZÍ@x&şáa•ÉßhÇćl "O”W_{ť ""źSrîL&áe8¤Â…‡ŐŽ Ż`˛Á7Ú‘É"j‹ÉdBnnn§Vżq$77§Oźn±=99™sĆ˝ůa•ÉßK:0Ů@D­%˛˛˛Ú<¦#÷ G«áŔ©S§°hŃ"žĽŇ[Ë—ŁDSĘ@ôRчUgşĺ3ŮŕíČd9Ł˝Ţ}úôqiIĺ!C†´{sÄd2áĚ™3l"""ňěáĐÁ‡Ő¶ţBÎdo´#“ D䬤¤$Ěž=ŰvźĆÂ… ]Zv·ąć˝L&–.] ŤFcű˝Ň|µśćÔj5>\˝ Ź/ZěÂý0o-_ÎF$""ŹöôłĎ!&z áeŘᫎţBÎdo´#“ D䪔”Ěž=PSSĄK—vz6mGÉţ^!""o·cÇ ‚ rĺauéŇĄL6řXŇÉ"ꩤ“ DDä«¶oŰ†ŞŞJ‚ rĺaµ°°_ }(éŔdőTҡ;“ łĹEEl8""ňh\“ >¬2ŮŕSI&¨'’ťI6( L0ŃĄşjË´X±bŤ<Ú§ëÖvz¨"1áŕŐ«÷Üs“ >”t`˛:Ł#I‡ÎölËĺxË- >yźH8D"D˘ŰOť:…äädµzŚ;V'NśčRýČł“L6Qw&8gő“'OFxxÁ„CÇţwěŘŃăߏ=ö8€uëÖőęFnŠyÓţţţčׯRSSńá‡ňSŕ éđÚkŻáŮgźe˛ş%éŔdő&S¦N…L&c p踧žz ‚ tIŮ/˝ô’]ŇB‡ŻuüřqŔÝwßÝę1=YżîÖTŹÚÚZěÝ»!!!;w.^}őU~®ŔDuWŇÁťÉµZŤWŻréśđđLž<™ŤDDDMĂďčL84 Ä/żü‚M›6uIĄ·nÝęÔqfłŮVźîälýzŠżż?ńŢ{ď€í˙DDÔ˝I‡‚‚·öl0™L8sćŚKçČd2L™:• DDDíÁŮs R©&???<ôĐCxöŮgaµZť:çâĹ‹xä‘G0pŕ@ˇ˙ţxđÁqîÜ9Ű1?ţř#D">  q¨@hhh‹aŻĽň D"ŚFŁí¸Ö†r\¸p<𢢢†n¸YYY-ŽűňË/1iŇ$(  22&L°K.8[?W®Ůh4B$!>>EEE6mÂĂáT*‘žžŽŠŠŠ·Stt4´(ٵú^ą˝¶¶K–,ÁČ‘#!“ÉŽë®»ŽĂ4śL:,[¶ŚĂ(¨×Y±bĘ´Z‚ ‡Ž©­­ĹłĎ>‹sçÎá“O>i÷řŞŞ*¤¦¦bóćÍXłf ôz=6nÜ]»v!99eee€±cÇÚ MŐŐŐ-Ę{ć™gZçČĺË—‘ššŠ#GŽ 77çΝàA‘‘Ď>űĚvܻヒ™3gÂßß9990Ť8pŕ––†őë×»T?W®Y,ĘĘĘ0gÎ<÷ÜsP«ŐHOOǦM›đ÷ż˙˝ĂíTPP¸ćšk:tţüůóńä“ObÚ´iĐh4ČËË\.ÇÜąs±lŮ2~şČi[·nEfff›˙ůbҡ “ DDÔ›Ál13L8tŚ 8p ćĎźŹ_|Ń6´ˇ5Ë—/ÇŻżţŠgź}·Ţz+ÂÂÂ0věX<÷Üs¸páŢzë­.ąřwŢygÎśÁóĎ?ŹřřxôéÓoľů&Äb1V®\i;nÓ¦MJĄx÷Ýw‘±XŚÄÄDĽóÎ;{T¸ĘŮk dffbĚ1ÉdxňÉ'ß~ű­ËŻmµZqâÄ Ě›7ÁÁÁxăŤ7:ż¦DËÓO? ™L•J…wŢyˇˇˇŘ¶m?]Ô®¦^6®ŕŘ=ňĄ¤CpppŹ'Ę´Z¬X±‚ŤBDDíÓukŰ]^š9}ú4 &&¦Ĺ'(..î’úť:u ĐŻ_żvŹÍÉÉÁŰożŤ_~ů/^„ŮlFmm- ®®Îĺ×îČ5÷íŰ×öopzŽ Ŕ~hIee%˛łł1ţ||üńÇŘľ};čŇ5,^ĽO=őŇŇŇpë­·búôé¸ăŽ;:ôWkęťärą]"Ť:G*•"..şňr¨îÁŐ§O|’HpózŤMEo*öňĺË‹‹ĂĽyóđâ‹/¶Ř4ţĄÜjµÂjµÂĎď÷Q đ÷÷‡żż?ęëë[} Ge^ąÝŃ1¨ŻŻońşWúꫯžžŽŘŘXĽţúëHNN†\.‡X,¶ý•żyąÎÔĎ•kvćú\i“ćľüňKĚś9łfÍÂ_|áňëmܸË—/Gnn.A€źźŇÓÓ±jŐ*®­KDÔtúr—ju 6mŘ`7”\g2™››ŰˇżÂ'%%őŞL55(Ń”ştÎ[o.ÁO<ÄÄDľŮĽH@Wż@XXžzę)<÷Üs?>D"Q‹ŮĐĐPTUUˇ¦¦!!!¶í555¶ý]!** .\€ŃhDXXX«Ç˝đ hhhŔ'ź|‚qăĆٶwf…žşć+5 áصkW›ÇµÖ“"==ééé¸xń"ľúę+ĽńĆذaüüüđůçźóFDÔÍ_vĎś9°đp¨›IĄRöÜ$ş‚_wĽČĽyóW_}-ö7e©š/Ůüç®Ęb5eĎź?ßćqMs4ÜxăŤvŰ:Ôá×î©kľRÓ*‹Ĺ¶ÍŃpŤK—.µYNż~ý0oŢ<ěÜąđÝwßńÓEDÔÍÔj5>\˝ĘĹß…1xkůrŹ<Úý<Ř«z0áŕâCíóĎ?Ź>řŔáţ;ďĽZ¬lđź˙üpűí·ŰX›x¸­IDATmoĆŕĘüŽ4Mtřý÷ßŰ=đ‹Ĺb»ä‚R©`ߣÁjµâő×_·ýÜ|ȇ3ősőš»Jvv6 55Ő¶­i®ŇŇŇÇ5—žž…B˘f“Ť) pX""""""*.*‚ĹěÚ˛bcąZ™juH…Z­¶uďď+WnHIIA˙ţýqöěŮű§NťŠŐ«Wă•W^R©ÄđáĂqâÄ ĽôŇK¸ęŞ«0mÚ4»ăcbbđ믿â‹/ľŔ°aĂZ}MGŰ›˙ű¶ŰnĂęŐ«ńňË/cĐ AŚŚÄóĎ?ŹÚÚZÜ~űí¶c'Nś5kÖŕŻý+ž|ňITVVâ˙ţď˙ ‹‹łgĎbĺĘ•¸é¦›äTý\˝fgŻĎŮ61™L8zô(ž}öY„„„ŕŃGµíONNĆ—_~‰§ź~Ź?ţ8ŠŠŠđöŰo#$$FŁŃv\ll,ôz=~řaĽüňË Á?˙ůOŔ˝÷ŢۡŐ;¨ă4MÇĎsď”NDDD­Z±bffd@ĄŠqú“ÉÄç Ýf"ČᤑkÖ¬Annn‡^pŐŞĆ®śŹ<ňH‹}ĹĹĹŘ˝{·Ăý5558|ř0JJJ`6›ŚAaôčŃnńĹhßľ}0Ť¶MĚre™ÍëŇZ˝Ş««qđŕAh4444 ""#Fڰ-y 4®BqčĐ!ś={fłˇˇˇ2d’’’PRR‚ýű÷Ăd2!((łgĎvş~Î^skuo+ÖŽŽkÎßߡˇˇŽŽĆČ‘#íć°°X,Řż?4 ¬V+”J%ĆŽ‹ożýŐŐŐx衇lĂ.ňňňPPPÁÉd¸úę«1tčĐËpQ÷x|Ńb§ŹµÍŘ´1 Úv†Îą“« ŤZŤŤÖ3pč™gžiu¸‹Ă„Ă›oľi[¶‘ĽGX »°ţ{Ó*DDDÝÉŐ„Ă[o.aĐ<ÔôéÓ‘––ćp_›«TDGGăž{îa‰Ľ„€Žő.{â‰'<""ęË–-ëĐyéééś8ŇËڱ̈́Cpp0×9%""ň":}9tĺĺ.źÇß÷DDÔ]â†D,qů<•JĹßW^ĆŹ! """""˘î˛`ÁDý¶ ů6&|„^ŻGvön—Α%<Á#""ʦЉá˛^ """ˇÓé°{×.—ΉR*±`ÁŹ<Úłçpţ/Ä„u›+V L«e z&¨ŰÁl13˝DDD˝X™V‹+V0DDäŃ~Ř·z˝žđ2L8ů•J…‡žëŇ9f‹ĹEE y´}{ż‡N§c Ľ DDD>B*•"..Ž """ŹŔQwylţ|1˝{8Q·‰ŹŹ‡X"a z&|DAAž~ę.ťŁŚRâ±ůó<""ňh7 …‚đ2m©¨©©Aaa!ŁDDDä4ŤËç%řAŕď{""ę>"ŕâŠë®żz˝ž+Ux™€öľ¸,]ş”Q"""ňaYYYP—”0DDÔmffd@ĄŠqúxŤFŤÖ3p^Ćá vU!""""""˘ö$%%µşĎa‡9sć`âĉ0™LŚ‘—P«ŐظqŁ‹ç”@]R‚'žx‚$"˘n±lŮ2—Ďٸa=ŇÓÓˇR©@˘R© •J[ÝĐÖ‰DDDä=‘4ęZčĘË;t.‘»L&ŰÜBrąrąĽÓeŠD"D"DGGŰ=äęőz·”OîŔ‘;âý÷ßиÂĸ›n¶íS©T][3*J‰¬¬,ŔýG"– nđ`ŹÜ.%%rą˛T.»­\‹ŮlK6,Z´¨Í9¨g‰A"""ďR^Y‰††ě޵ Ů»wŰí{|Ńb—Ę —…!00°zĺJś={Ö¶oöěŮHIIaŔ‰¨S.\şä¶¤CÉŮł8›Ăd`ÂČ }ţůç(Ó둜’ŠŞŞĘß“áÉd.·L«…Ůb†`mŔńcGńŘcŹńËąĹŃăÇQ]mD”RŮá2ÄAA¤Š†źźęp ""ňBjµK—.ĹUcň”©n-›_ć¨+äää`ý† Hż'ŁCIţ~ň>l)"""/¤R©°páBś).ĆŽíŰÜR¦Ĺlć—9""ę2)))•‘ŤYP¦Őşôűé_ëÖ˘˛\ĎßO^†­EDD䥚'J[뼣š&ŕ:qüżĚQ—ižt0 N˙~ Dbb"če8¤‚ČË™L&řűű㜦 .źĎŮľ‰¨»´»z?y?&ÜP‘Ŕ°QwłX,.'řeŽzR[«W:pgĎüĘßO^Ěí}&E"‘S˙přđaÜ{ď˝HLLDXXÄb1 „űďżyyy ąD,ăb©ĆĄ9jjj “ÉřeŽzD˙ľ}Qc2¶ÓA„űď»—żźppL„6˙sŐK/˝äS‰Š­[·"%%ű÷ďÇ[o˝­V‹ŇŇRüż˙÷˙°mŰ6ÜpĂ 8tčP—˝ľŻĹ“]uŐUNO$)  ׍ĆâĹ‹ůeŽzLŤŃh7‘dó ŚůűÉ»ą}HEW )¸á†přđaŹŞŕěőŹ1yyyرc&Mšd·ď_˙úîż˙~Lž<Ű·oď’zzK<‰Čuí-™i1›!“ɸyڦ%3Ăe2Ě1ŁFŤbP|@Ź~Ë0Ť2d .]şd·ďüůó@tt4D">l{  TVVâ/ů  „   8ţóźˇÓélĺTWWC$!!!FŁ<đär9Äb1D"âăăQTT„iÓ¦!<<JĄéé騨¨°«Ď—_~‰I“&AˇP ‘‘‘0a¶nÝÚˇk?}ú4`ěر-öÍ1|đž}öYčt:H$$%%µZÖő×_‰D‚ .`É’%9r$d2ÂĂĂqÝu×áĂ?´űăŹ?:ڧ3±4Ť¶Űb†)S¦@­VăčŃŁ¸őÖ[[Ťemmm»u$"˘ÎiľzĹOć·H6p5 ""ň4M«WČd2®FáK7 ¸Rlnn®ŕďď/Ü}÷ÝvŰożýv! @8pŕ€ĂrM&“0räHA"‘YYYBeeĄ••% C‡ŚFŁ ‚`±XBż~ý„ąsç ăÇŹŢ}÷]áí·ß2™LHMMrss…ŞŞ*aŢĽyá‘G±˝ÖŠ+”)S„‚‚Ál6 §Nťn˝őV€đĹ_¸|ý‰‰‰áĉí;kÖ,€đÓO?µŘ—źź/î»ď>aîÜąáoű›PUU%”””“&MK—.mµťśŤe]]ť-făĆŤ8`ł1cĆ×\sŤpđŕÁVcéJ‰¨st:ť`6›…‚˘báż…§…cżśžyćáĹ_´ÝۉşJŹ'Ažy怰qăFAá‹/ľĎ?˙|«ĺľńĆá©§ž˛+ëůçźK–,ABPPpçťw őőő-Ę\·nťmŰŮłgÂŔmŰnľůfA*• ĹĹĹvŻuęÔ)€0lŘ0—ŻíÚµ¶DČŞU«˝^ßę±ŮŮŮáŃGm±ďÉ'ź?üđ& mAA*ÜrË-­¶“ł±l~Ţgź}Ö"fW&_ĹŇ•:‘{Ífáx^>“ DDDÔ­şl‡vzUŘý\WW‡äädh4ěŰ·cÇŽĹUW]…ýű÷# Ŕ®Ü¦sÇŚâçź¶ßóóĎ?côčŃ7nöîÝkwîŢ˝{1nܸu---Ĺ€V«đ÷÷G}}}›×Ńtl`` jkkÖł-[¶lÁóĎ?ŹŁGŹÂĎĎŁFŤÂ¤I“pĎ=÷´B‘­V‹ .@"‘Ř^?&&‘‘‘ČËËCź>}PQQďľűS§NuŞťAčP,Ŭi(L˙ţý[ŤĄ+u$""÷ŮĽy3Nś8ÁŮľ‰¨ŰôČ*W Ägź}Á€ë®»FŁź~ú©í!Ö‘S§Nl·=!!pîÜąç >ÜaY}űöµýŰßßßö°Ü\NN222đ‡?ü‘‘‘FPP-aŇiiiřůçźqúôiĽ÷Ţ{‹‹Ă?˙ůOŚ5 ééé¸|ů÷őhçÎť‹ĘĘJ|ůĺ—¶m»víÂůóç‘™™ XĽx±­Ü©S§âÝwß…FŁi·‰ĄŁ9ËŽÖ‘:gňäÉxîąçl ""˘năQ«TLť:Ű·oÇ”)S°m۶6Ë h‘hN,Ăl6·Y'g·őŐWHOOGll,^ýu$''Ű&žlJŠ4ŰŮU:Ş««1wî\¬_ż™™™řŕ:ťDjj*˛łł÷Ţ{/ľţúkś?ááဍ7bůňĺČÍÍ… đóóCzz:V­Z™Lćđ»3–®Ô‘pčtÂáŰożĹôéÓ1tčPśx˙}L07Ţx#, 8<żă__|řřřŻuĺő®^ą&“ rąK–,Áöí۱fÝ:ôë×:ťź|ü1Ţůç?!‘H “É™™‰ŘŘXĚý­Ľ{ď»/ľđ`ěر¶×yhî\üačPlÚ´ ŹÎ›‡ľJ%qäČ@RR’]<”}űâ|i)îĽóNDFFB.—ă…^@´J…ń&ŕÔ˙‹ô™3QYY‰“'OÚĹ®)fżţú+>Z˝ÚaĚ®Ôt~II ôz=ť7ËŢz :ťY°0hĐ × ŔaŰ´÷^t¦Ś+ŰĆŃűůŐfI%G×ë¨}Ďž=Űj]•áŽë˝˛®Î\ŻŁşňzŰ®k{×ŰŃ÷bo»Ţîúěő¶ëíŽĎď­Ľ·ňŢĘ{ ď­Ľ·ňŢĘ{kó:p Í?P;śĂÁd2!//UŐ—í¶+Ł”˙¶$#¨Ő-‡Lš8Ń©LÇ OĂh4"mÚTÍf|´f BCC*U ĘËË1mĘd„„„ŕý•+q2?Ë—-N§CDD$~ĚÍEuu5>x˙=l˙î;\şt AAA2dfĚś‰;î¸ÓV×?$ ěl,‰X‚qcSmu1 ¨ŞŞ´»†?L&ÉdÂ[K—bű¶ďPUUeßľ¸uŇ$dĚš…â˘bĽúňKĐjµ ± hz­đđ»‰Ë´Z-fL&|˝y3rrrPŞŃŔd2!88Ń*F]{-fĚČH[]Ł~ëeQ^^Ž›˙8ř"+ ÁÁÁvmóŻĎ>Ŧ¬Ť8wî,D"úőďŹi·Ý†™3ÓŃ·_?[=ţłs^}ůečt:„GDŕŁO>ÁW›ľÄŢď÷ŕÂ…  B||{Ľ·ňŢĘ{+ď5Ľ·ňŢĘ{+ď­MeäîßíĄ‹řŰßţćZÂĘtzč› ! Ď V—`ę¤I=ç°¸Ť†%""""""ę*Î$ü&ďňćo@*•⡹s """"""ňXçp0™L8sć dڍ««Cyąď­x˙ŮąożóŽm9N"""""""Oä°‡Z­ĆG®ft<Äő׎Â-ăÇăÄ/Çńá'źŕÖI“""""""ę1ĂFŚŔ]3f´yLĂäůŽťČcČcČd2 8 Íc8‡ąDDDDDDDävR©±±±Śµ»?VĽóN›Ç8L8¨T*<<÷F:„C*Číp """""""·spĐëőŘ“ťÍčQ‡8Ú¨Óé°{÷.Ś5Š"""""""";É©©P Đć1RADDDDDDDnÇ„ą]€łćçĺád~žÝ¶ˇĂ†cŘđá¶ź÷d¬ĚîôŚY¶—iµř~O¶ËeÜ<~˘”ĘVËPDEaü„‰mÖµy°qĂú×ŘĽ®]u˝WÖµ˝ëuTW^oűumďz;ú^ěm×Űź˝Ţv˝ÝőŮ㽕÷VŢ[yo录÷VŢ[yo录÷Ö®ţě]wÝu:eŠk ‡ÄÄDLź>W‰·m5Xŕow\BB›S^¦…^Ż·;¦yQ}"QY®wąŚˇW'B.—·Z†\.o·®ÍË€¤k®iqÝÝq˝WÖµ˝ëuTW^oűumďz;ú^ěm×Űź˝Ţv˝ÝőŮ㽕÷VŢ[yo录÷VŢ[yo录÷Ö®ţěőď×m ‚ €ČŤ8‡ąDDDDDDDävL8‘Ű1á@DDDDDDDnÇ„ąÝ˙Ů“—4ěIEND®B`‚ceilometer-10.0.0/doc/source/contributor/4-Transformer.png0000666000175100017510000012235613236733243023555 0ustar zuulzuul00000000000000‰PNG  IHDRýbę|đHsBITŰáOŕ IDATx^ěť\“W‡3 +€ěŤ,E¦¨(¨ ([¬ ëÄŠ«­¶ÖݶµÓjŐÖEëÄ˝nEÜŠdČ^˛÷&$ä;!.¦Śěś·ůYxsÇąĎ}!î=÷"›Í&ŕ…@H ) @’‚1â‘@H $Ŕ!€şź$€@H@Z î‘–™Ćq"$€@¨{đ@H $€¤…ęi™i'@H $@‘$őőő•••555𦠵 Ńh$‰H$ĘËË+((ČÉÉIŇxq,H $€@§Ąîihh(((ČĚĚĚÍÍ-***,,,...//g0ížL&úQUUUkĽ455 tuueddÚŻď"$€@@€(.ń{@Ö$7^IIIoŢĽůXâČdE%9ş¬śĽ¬śMV–LˇÂÜPehĚz .FmMmMUmMumuUEiq}ý…«A €LLLLMMÍĚĚ´µµáŽL- $€hF@¤u¬ë€Đ‰‰‰yőęUVVW˘‘É5-]u-]5MťÚJ=Ô•”‰ÄÎ9*ŐTWV”–”ĺĺçć˝)Ę͆;\4JJJÖÖÖ666–––˛˛˛řÄ $€@C@učXÔyöěYDD,ók UF×ČTż§™–~O }><ź€Ňâ‚ÜĚÔśŚÔ¬ÔוĺeśN)+++GGG[[[đâyŹŘ @H $ `˘Ą{JKK>|řŕÁpŮňŠJ¦}lŤĚ-u MÉŢkť¶Xĺe§'ĹĄÄGädB=ýű÷wqq166n« ŢGH $€Dź€¨čž´´´ëׯGFF˛X,š¬™U_s«~:†&Âuµ)/)JŠŤ|ő´´¨ćR__ßÝÝV€`)Hô§-DH $€šľî‰ŠŠĹ“–iéZÚ;™ZőĄREę€;;=%6âQJ|Č2ew7·aÆáćţ8!$€/ÂÔ=qqqçÎť•XÔ1îmm7h¸¶~OQĆWUQýô şÚ:ťîĺĺ5tčP*•sv /$€@H@ô G÷Ŕá¬ăÇŹ'$$€â1ł˛ď?t”ŠŞ†čĂâZXϨ‹~z?ňQ¨4aÂđţî~ś¸ C;‘@H —€ uOUUŐ… îŢ˝ gÔaŤgŔp/U má"čZď z˘Âďľ||BAě???##Ł®5…µ@H Á¨îyúô),óTTT@ôť!ŁĆÁŃtÁ ’˝T–•<ľu)16raŚ1Â××C?óŹ6¶Ś@H ›¤{ ϡC‡^ľ|)#Cŕ:ÚĘa0¤Íę¦é˘S=7+íÎĄ“H]]}öěŮ˝zőŰĐ$€@Hŕ=Ač;ÁÁ+++ L{óšDWQ•Ľ h`±žßżůâa(ěߍ5 ~ ä G„@H@¬ đW÷0™Ě'N€7$Ě2Ę×Ň~XĂú¤ńňâćŮĂ%…yŕëł`ÁXţůd,€@H ŚuOIIÉ®]»ŕ:d–p7]EMS`ŁbG őáÍçĺĺĺçÎť ©ľ„h vŤ@H |L€_şâ 3¬ń8{ŽçGF-QžČÄw.ž`2ëaĂËÓÓOą‹ňdˇmH $ =ř˘{žŘŹB/AJ/ť/żüršJČăĂ@H $ Vx4ůĚ™3 z4u ĆÎ @ŃÓÚ@tróî7Ä-''gëÖ­555­•Á{H $€ đ@÷\˝z|z`{kŚß|şî¶9a]GŰ8şdffţóĎ? ŁÍrř@H $ŔÝŐ=áááçÎťSVUźYyÜŢúÄ,9{řZôśśĽwď^ŘüDi| $€ŕ)néž„„„ÁÁ°±+=čČܱy!3YßŘüĹ‹ ;VK!$€@Ľ!ĐuÝS\\ô/Íöśěë=Ľ1G Z!I“üa[ö?~,#Ć!"$€]Ô=proAŠőˇŁ'k‹ĘhÄÄđ‚ňš:ÖÉ>ś••%&VŁ™H $€Äž@uśZOOO·ěçdaç(ö „1şrČŐZß(«««…aö‰@H@ętE÷DDDÜ»wN­»xŚ—:`Ľ°ioG—QGŹĺ]«Ř@H $Đ&NëžŇŇŇC‡QehîăfČä6Ć7:@ źł›®‘)¤q…«ű@H $Đ-ťÓ=pôz˙ţýUUUCFů˘/s·Ŕ7Vçc§AWpô)))é~Ř@H $ĐÎéžGŹĹĹĹ÷˛îÓw`;Ťâ['Ž>ÎăkkkŹ9ŇńZX $€čNčžŠŠŠS§NÁY$݉]č «´E —Ťˇ©ETTÔłgĎÚ*÷‘@H îč„î9yň$ěp9ąů(Đ1Łx÷É7ia¨×Dp™:qâ,üđ¸il $€xG Łş'55|oµôŚ,űáďşŠŞł{YYŮ•+Wxß:¶@H 4 t„¸3ĂRd“wfpĆíH,ÓY¶‡Ćľxęâ⢮ŽńŻ;ËË ”@ZZÚ0Çś@ˇóş3 …âîmxÍŰuŇ=L*%%ĹÜş¬÷ú€ÄÖ>2™2ŘÝçęÉý·kŢĽyb;4\*\ľ|ůĺË—R1T‰¤śśśŻ/ü5‹"źÖ=đ']HH™L8ÜKŠŔc¨Ć˝m´ô{‚wóčŃŁuuu…aö‰:D ˇˇĘý˛ÔşCĄ±čČ-ŞýçpwEĎ:´ đ‘Ŕ§u| ggg[9 >‚M70Ô#äHÍ… "$ âÔ{ĐDÜB4Ż-uőĺŠBźđk†ĹžK—.‘)”~CÜĄŽŕ‡¬oŇK×Đ6srrß;ö@H@˛ |B÷ÄĆĆÂpo›ţŠJĘ’ BtFä&88‹ŽIh @H É đ ÝĂůô%ᨑdŚV,Fa`Ú«‡şÖĂGŹŕX»XŚF"$€íéXéőÎǰ¸ŚG"ě$Ú Ćb2!é˝D @H@T´§{îßż.6ýťEĹXľŮÁ.:áoH·°:V4˘%›ŰôdĄ<Äř(|›sl $€¤‘@›ş‡Éd>~üX®l`j!x0ŚĚK; ó˛ Ű1Ôňôžőßť¦ŕíRŹ ŐÜơ¸¸ÖŰ„dv‹@H@ ´©{"##+++-ěI¤6Ëđ‹GÍómSýţ»Á´ž<ůó…c‡ëFťÝ1Ëďhj=żz˝v-íQWôLC‹@H +65 77x/Űţ‚Y}vŘ˝,AŶĂ{×ü°ńÇw‚–ŚćÓźZÍ`XĹ6¬iäë@Ć&ź­Ř“Xů3 9ű>;#˙ Ű=ÝÇśnojóUpLÎÝusű)ŰęL[{±€IhČ=0Ę8Ż:ő×䱜•$Ý)+gÔµ2>Fć…­ł¸™ŃíŤ ¦,ţ'şL.Ô4uŕ]Wך‰­XŤ·@H Oh]÷@VđWŻ^©k驨j|˘>ĽM¦ëŞ‚]Ygťz]É"•V޲m“ź•;˙ôZ˙őaIşغ؝wzůň­Q Č&#OKwţuK×g†‹B}ZčZź€c¬fĎ·•«Ś^ô˛šH•ĺDiĚص+Ę!`ëö©U‰§}s(ĄůöYőłm~Ó÷‡Ő ^ýߏ_)ľ¸&ŕë‹ĹBP>SK;҇ڱI$€@ŇH uÝźµőőőđą+$$Ťq«żsW!¤_üÖcŚĄę÷1ßţô0˝šł°#k7oĎ™ťgN}5çóŮ+f@*‡ěűĎŠYđ‘“.µÁČăď ľýcŠ>|S˘µtÇňeëżžŞM ĽŠ.Ĺ)C´|Ă*/ŹY?LQ"âNß‚7>ľŞ"¶ťÉ$ĐFü8ÍÇ}ÎŻ‹úŞně|" ácjŮ ă.Ľ51żAH $€şD ő<Ü5 ›.µŮíJDů^óĎ\ó‰¸wĺňŁÇ÷źßż{5^»ýφ-µ•/{´ăĚÔ:¦Ď,KL¨‚·oÍđ°äV€Óăržę˛ďżĚ°Ř[]qqq, ňŁ ¦SěE‚ €»Ň„¬·jjjůÍmáńÇ‚ú¸nüjç#rżŐ÷LŇ+řwéś˙r?¶LmT$ga‡,Cn\ßiü÷ĂŐĐŔnT@ě†zî:QÓ·ß®)ŚŘ´a‘ĺŰäCD9­ž2MÔ7ú&˝_>ľť””Ô»woAő‰ýH,řřř”””M›6}ůĺ—(}$všq`H ´K •}®ôôôŠŠ Ca_o4µ>ëČTď>Ş_‡ľő'¦¨šXé‚ a1Ş˛žĆ§Ťé„YCm{k02J 8»“®7Ďb‹ŔĄ‡]ťô“KËRťăôţ˘(›÷R ŞËäĚť:čkYTEEŮV@}\ŤO_sgOłó Żt6[TTŇţ•Îá㨑r­¬÷Ľ~ý č÷޶ÓpŤşË‘ÇÇyGş´Đ –Ä=˝Í"ЇM`[R%‰Çvś"ĐîüŻB!äe\ązwô´Nx"Ĺě\°¤Čţrďµ:ÉvĘő¦;H }ůčž:ö|Ő·ëKܕ·\Čí˝öäĄŐ°u&řKÇĐbůŔźé‚ď{”Tهę\ľ›«>’:ż8.$€Ú'ĐĘ2Fjj*ÔŃ10nż&˙ŢĄ/ü'dÇ´á}dRĂBĎź¸y7™f5zöߡë˝ôŚ>ŰĽ`6űŐöŤ;"­~˝řÇ";™š{űw?,oę›ÜžmÚ~KF—]ٶëI‰šőĚ˝żů6wśQ°üČnż=^íýnĂ–Pׯ¶Ż4Î6L¦hędddŔÁ®ö…ď!5XkâH}\őé00,€Dhe˝'1)IYU]NAQx%)ZÍ\ŘúÔ°k56ôßźźż˙/ůćo á{×=ĄĄĄ`Ä{uEůćÓ'ă°K±" ­.K“iľ» #@é#VÓĆ"$ĐuM>8kjjŞ««µôMşŢ^jŇ45ž˙ţwü u}DŇϦ!˙ĆáŁĘźÖÁ¤]RŚŕ¨ÝÇ˝ą ŇÔÓ”H˙؉T@é#ÓŚDRO ‰î))á$~ «p>eůwQLćčíŮ|Ňç̧ĺŮ•–~’C!×2 ý÷}k´~NŽ“VNZlŘüÁ7»ĘwźőPfĽÜ>vÉIŻĐĎçś>éK?\ě˛ëÓw-ŮwÍWáőÖďţ -C¬ČUš|x‡ %.Č{Ö±Ź&Ĺ?®J5öw¨ÂńŹ…'ťl|ÜŞ%{Ă-ëš›aĘŻ ÎrňśáÎÎÇĆâ×H€çPúđ)6€Hh˘{¸Ç=hr|÷iˇčN^áôÝ®¶oi• ĺ^­[6ĄŽJ.‰/Ń­aÉ*;éÓ$ş–Şˇ’ą2‰X§ŞL¬¨c×f=ȸ—ŕó D`®«¨±+¨'4P&ŽîÔW™D ÷Y¶ń/BŐíSt Wΰô­tKŽd×ufH­YR|?ńSft¦“vËr•(ęžv!á›<#€Ň‡g(±!$€DŹ@+ë=ÉP!k6w˝•ď·g-ĚĎ–Ôg\ĽŹąîŕqw•Ş+KÜ÷Â-éÝľ‘üî+6Ü'iĆs˝3ť‘ľĘ×’ŢísQ4Çm_ĺŘśÂ.6ű=Ü&Bspüíˇ–OçsoŐ’f4鍷ßp•h«‡y۶†¸Púŕ“€€¤hâ“ÂM~IˇňkżćcDĄˇ‹ľP<ąů~%GŹ0«ň«čf&ŠäúÜ[‡ă+™m‡¦‘ŐÜ»řÚől9Śä ?o|QN5ň?q",âěŰד eýŤšß/fIáż+É®ą’›x5‰Ď2T­őhDY:»¸Á&° #˘ó%ł43!Łşô‰Ŕb°šŞ¦ÓOÓmnĆ'*´hˇ7D"¨Ż'í:Q‹".ééK»Č«!$ ščî'+|Ę Ä`˛ÖŘź'ĐRŠ8źćr¦Sg+đť9qŇ?9“ýíc¶üx±°uíCTńçW–§—y9űyM»¦0ČD±ĄµÄîÔ·Ďswś2˙Zďą“ô(dş^CČJ_źŮC×O1ŁÉŰ-UőS ˙Ěď·§čkSY „†’K˙›ľöU5Ş?Ě"{íÂŔë9Ěv0U\?iF;Ő»ňV]]§¶çşŇÖA@éĎ@’G€ÁߏęęŐ«gĎž7{‰Ž±$ µę¶źßń…ÇvşňŰa›Đlů±‡2ýűᅦ.Ž9rçÎťď,ń;˙€cËď \™ŰŐÔÔV­ZĄ˘˘Â˝żm۶ččč uJL ĽÉŻůig¬§§çřńăĹth6č&ë=¸˘Đ5¨E"7łÇ śŔśHQ6„ŽŔ;Ç‘@<&ĐÄŻ™FŁqšoćĚă…ĐśÂđc!Ă…Đ/»d1Ńą‡‡8±©Ž`˛Ř˙ťJµkkk''§ŽVĂrH Q%ĐJ¬=ýHDnşXL&…‚Y)Dn^$Ű ®č‰Ś/Ń€O dO7Ž H &şGVV$3GHÉT´;̆†*•Ún| đ’Š^ŇĶMtŹŚ ç{]MµČ×h3ůŕhëĂ«ŢYĹ.˝:Ůcʡü¦ľŔyŮ' ě}ˇN »ÎÇDÇݱ×(Ç^^Ţk_ŐtŞ6ß 7°`›‹ńv’ď˝aH€€˘$€$•@“­ΧZQÓ=#7?­cGźU ñݎčéŃhÓŰŐyçéŰPSBqŮsîŔhJ ˙ŃÇť:ť.©O!ŽK¤ č©é@cŕ-&ë=ÜOV‘[ď!P´GÍŃ}ş?Ş‚3ö†â°3 v“úeî4ŃËeŠ«ÓWâß{$UŢđ˝ä.gÁ ŽşŽ<›Ë"0łB×yO÷uýläŕĺŰÂ+iű&Np¶ńyűę7sTMM9[^¶e ޲îrkµ5śe,®*ír#X t„ŠžŽPÂ2H /VÖ{Ş«ő…(]$ÍQ“L~?ó´tĐĄâ»Á™ý–Úss>Ęc39¨wëłËn}2Ź)ˇâFIéË?–Žů¦¨šnűů–Ő3lDIUWrfDQQ4WŁZçŽwĹ‘Šqś5´ Nh˘{ 4k®(+îT‚(LŇ4ĂfŰŃĺCűŢ?žá°ÜIE9µISČ/ŃúU×<¦łąKVéŽđv çź˙aâÜŽ÷[4鍋;#0;˘aZ!™PôHćĽâ¨hJ ‰îsŞĘĘĘĄ%˘G‰¨äôąŐŻ˙>Ž÷ Éw[fGË8Đ<Źé››&m‘@´eÓ=˙,;‰ˇZ×m´Ĺׇ2ë"Ą{g¤™î¨*2”VÂŢܡE"D@GSÎ\Ď–«™(zDh’Đ$€řI yHřpMKK‡ä‚ĘŇŐŃÁđ™4`Íöű«Ü·ZČ3“šć1eľK¶Az—p”Öp´ĆÉcşăzö<3Cvň… g –~iyLý?î·ţÍáÉßg¬ÜúŤ“|ůóŰIŞv†"´Ř†6[ď177ţüyYU›k\eŠĺ¤Ś„ćJĎ©öó2Ł59€˘GĘ.jÍuŹ––VrrryI‘˛ŞşQ´ňsÎ:ấ ĆÉc:×wć5÷Ďýíż†<¦[FsěmL8čĆPWż1áhcÓŰţËĽŽĐŘ 5ďż~m%Ź)Ugô˛Ëľě\O`Ň,ć-ë%Zş§¤0B+˝OŤäŘx‰Řě 9b@`Ďž=Ož8p$€Z×=vvv {’㢄Ą{’‚ą“>Ť@˘k©*™+“uŞĘÄŠ:vm‹<Łl­ach ¦¦MéíçămJ‰z;­deĂąK[o¶¶H·p5‘‡ŠúVş%G˛ë–|Ů yrłRÁmô%ČX]{˙8Rehtĺjšşrň 4y*U†BĄ’Éo' Öuµ,&ł¶¦Ş¶¦ş¦Ş˘¤´4;;űă§>ŢŔ+\Ôá_tÂu ×{i $€¤–@ëş§WŻ^JJJ)ń/‡ŚňVŕŐDň»Í6N®yFáŢš=a3˘Ă.\Ý5vĆőwOâÎf}ĆÁVr—¶Ő, ˙q.6›@$µš»Ô¦űű\ Vޤ%¦'Çg&Çîá*C“Őëi *GMKGUC›®˘*+§ĐŮ'˛ľž E źŠňł‹ór rłž5^ЬýXYYÁîü űbťmË#$€­ëýúő»}űvNfŞ®¶şÚ@,Ű"Ďč’W·FÚŽłÄҢföŠČ’ $‹Áb3«ÚČ]ÚZËěŠ+ •ýȉĎ2T­őčF#šĺ.m­R‡ď54°2“_'ĹF¦%ÄŔR ÔŁÉÉ›YŮë™iô졮Ő}q kB ™ŕeÚÇ–kWeYIg1)\Ô!+\0­ŕ"íŕŕ`oo//ĎYÝ $€€Th]÷Č| şçő˧"¦{Z楑ÍȡK†¦*™Š_ď3ęa‘˝xa iSŔlĺ…Mr—nőnkrÉt˝†•ľ§¤°†nÜbĆłM.XÔ‰{ń8>ę)×Y˛žY÷ŇÓÜRSĎß>ăŠĘ=Ěáeec.-ĘOOŠK‰ŹŽ‰ŤŤ‰‰9zô(čZHíŢĽŹ@H@ňÁY¤­Q­[·®¨¨xÖňu°ÓVɸ_uŰĎďřÂc;];˝˝ÔćřŮ™É ‘ŹogĄ&ÂÖ™ś˝·m3«ľÚśôgBĽŔ]:962ţĺÓâ‚\0C[GÇÝÍmĐ AT*UVa×bG`۶mŃŃŃAëÄÎr4KŕM~ÍO;c===ÇŹŹL€Ths˝(8;;ź:u*ńU„•Ă`©‚ŇťÁ‚Žb‘ŹÂs»őMĚ­ú9őěeE"‘»Ó,Żę**©Ř ŻĽ¬´ŘŹc^:tčüůó®®®nnnp4ŚWa;H $€D@{şÇÉÉ >ŁźŢ·rp‚Źp´žW&) ?2ĽűŤ±áÜÓ;×J óHdro[G»AĂÔ4uşß.?ZĐŇď ŻA#Ľ_={đęů .Ŕ ľQŁF’‘éľ7?LĆ6‘@H t—@{şÎţŔČ˝{÷`ËĆŔ´ww»’čúpýŢŐ3…ąo@ńŔňł»]YôG,§ č8ĚĂ~°ë«ç_<ĽućĚ™ĐĐP__ßÁwßŐZô‡Ź"$€€´hO÷ Řű¸˙ţËđ;¨{Úz2*ˡ†$ĆD‰ ;Çţ.Łŕ z[…Eó>…*ÓwĐpŘŹ‹ żűâQXpp0Ý©S§‹¦Áh@H ®ř„îŃŃŃ /Ż^˝Ę{“®ĄgÔµ>$·;ćůŁÇ·.ÁŃtmýž.žÔµőÄw°,ŃÁe$$e ż}ÎńýńÇŢ|i4žťn_8h9@H HO§ßňńńˇ>ąsU2Ě«Q”—ťŢq÷ĘipXvóýlĽ˙±=ď±Č+*ązOÂÂÂ~üńǸ¸8^AĂv@H —Ŕ§uOĎž=mmmłR˛3R„k«čôţ:ęŮÉÝ=č·hU/8Í+Q~ß°¶7iŢŠĂ=KËʶnÝzúôé÷YQEgĐ$€@ť%điÝ-Ž;Ľ\޸ˇh:Ű„•ŻgÔÝ8{čÖ…ŁDiÔÄŮ#ÇĎ×` #w8°ŽĺŕřĽ5qÖ 7o~§Öꎩ|ŞkfŮwÂśĄJ=ÔBBBöďߏî>|âŚÍ"$€_ tT÷€ĚWOO¤8ŕ«M"Ö8űáÍI(ŻH7ű iŘŰj‹?śđš8g™ŽˇÉăÇŹ˙ţűďęęę¶Jâ}$€@˘I ş‡L&Ďś9śo_:ÉdÖ‹ćxxk›ÝpűâÉ—Źo›Ë„9ËÔµÄ8<OČĐää}¦/„µź×Ż_oŢĽą˛˛’'Íb#H $€C ş ‚ľÁRy? ˝(ű„Ř žĐóGă"õ ŚÇÍ^˘¨$y'€‹L¦Śś0ăéś‘±iÓ¦ňňrtŠ] $€ŕ Îéčśőôőa·+#9ž'f#ŃsîdV‡Ł[ŢÓćËĐ0QůÇEOgđďÎÎÎŢ´éŻŠŠ ŃśD´ $€@3ťÖ= eŢÜąT bŘ@j* ĘľséTbĚ ˝žfcüćA f·†ţÝý†¸ĺćć@`Ăšššnµ…•‘@H „@§uXĄ««ëççWSUyýLp‹%;Ú Dh„í--ýž^SçBÎNö-Vť tm;Ŕ%33Üś †XŮŽĆ"$€€4čŠîNÎÎÎC† ]÷®ť•0l1Ďľ ż ŽĚ°˝EEŃó©Ů2Ęע””}ű8çü?UßGH $ L]Ô=`ň´iÓ ŤŚb#E=ą'Ěđ´ď´„{WĎ‚ óh?ôéé Yâ°Ń“aC0""âÜąs¬Ĺ@H …@×u•J]ĐŁGʇ7/€\ŠőĽí´¤0ďćąĂT=xz«ălI$’çä9°BvőęŐđđđŽWÄ’H $€L ëş Ń(KŁAŞÎ1ĎÖΨ«˝zr=1bě4řđ4{wpŢÍkĘç˛rň‡ĘĘĘ÷á ýH $ ©şĄ{ Dp^´hÝpůřžüěLńĹrŇŽö2¸·µřŽB–ÓUTÝĆMŻg2wí Âă]Bśě $€Ú!Đ]ÝM[XX,X°€Ĺ¬ż|ěżÂÜ7ít&˛oĹFŇ08F$€řďu´®ĄĄőÍ7ßŔ9ݤŘČ3űţ.+.Ač™)ŻÓ“â Lz›ZöAóÄÝ$Ąjp¶ \|®]»&îcAű‘@H@bđE÷EEĹeË–Ť5 ‚ źÜýW|¤hťîihh€äŁhxđHÜáâ×Ăl7přú\ż~˝¸¸_}`»H $€:C€_şlU1qâÄ€€*%ěâńë§kkŞ:cËľ~ů´¸ ×˛ź“Ş†6»‘î¦ÉŠ“›w}}=ćí’îGŹ!|Ô=ÜQBhźďż˙öĽ’ă^۵!)FřŽ® ,Öó7!×z—Q"4’hŠi[M]§OźćććJâřpLH $ fř®{€‡ŠŠ ňňóócłÉëĘń˝Âőř‰‹ Ż(-¶vt–SPłé?s‰ŽĂČíŰ·c^Ŕ ˘ýšYö…—š–n×›nZܨ“c_†‹®ˇ1ŻÚěx;ŚĚK»Ůr;"·*ÉkŮzLüúŹ9Ăt„?C7JBÔ3+űŘGŻ_ż† Ýh «"$€č:QůŘĄR©.ŤWZZě†<ţҚ«‡ş–‘ą%ś Ň10&‘É](I3@úôé;Ž—u§ť®Ô­yľmŞßÁ,‚ú€É“­UëRn^ş}vǬtŮŰ7gS»Ň ÖéÓw 螇˘îĂŮC“‘@B@ű\­"ěŮłçĚ™37nܸxńbGGǚʲČGaíÜ»é!G‚žÝ»ž•šŔ¨ëĘćMRL$8š[;´Ú/_oÖg‡ÝË"ôWl;ĽwÍý©Ő 6Uü`ĂĘ‘F°-elňŮŠ=‰Uśť †ś}źÁť‘†ížîcN·7µů*8&çîşąý”í u¦­˝XŔ$4äeśWťúkňX ş˝ˇî”•‡3ZKĘČĽ°uö73ş˝‘Á”Ĺ˙D—5đuĽ­6 a± ’´ŁO«đ&@H ~•őžfăÇgô “ÉLJJЉ‰‰ŤŤ}“š•’Ŕ-©¤˘ŞŞ©Y&”TÔč*=ŕ_9E:l¦´Ĺ«¦ş2;=IŰŔX®ÔVţÝ'ÓuUI„´¬3‡NyřÔUTvXu«żňO¬ő_˙i=ů‡uŹ~ŮqzůrŤţg×ŘQeä9 A‰;˙ş5Ńg†Kđž{ˇk}’Ěű ť=źą#(*xiĐřßő”ĺĚ_Ć®]QkľÜęórSŕńS‹ľ±r žkŇde¬úŮ6żé3MĽ˙÷źcÍąż7® `]Ř5Lđeji÷üŢŤWŻ^Á™>wŤÝ!$€"Ş{ŢĎ  Ř R}ŐÔÔ¤¤¤€ ĘĚĚ|óćMZB Ľ>žEH%'Ż C“/`3‹&+ÇăY,&M¨(/…f}ě„2ë$Ťq«ż;ąpýÍ‹ßz\ü–ÔŁ—ó@×±>3f:Édíćí93‹nŮŻŻaPúąkeßV̲ӂXŹ`k‘˙Ćß'ŞÇÖ^Ľ/«DkéŽĺľôآó3÷羊.dől,C´|Ă*/MŇPú˝+~ÇâNß*ô7ŃúhśUŰÎdh#~ ść®ČŔ¸yé—;ź{{Ş Zř€Ăčž/^ îĘsť"$€€¨ëžŹgHNNÎŞńâެ®®†¬OEŤWEE„‡©ŞŞ‚űµULP9µµŕ6”ohŚŽhla#ś)'Ę÷šćšOÄ˝+—=ľ˙üţÝ«AđÚí6l©­|Ů g¤ćU˛¸Ć1k™ďϸęĂGÄŘ4@˘G@ň×{Ŕ…655UMC‡*óÖąEł@R´šą*x檖}«8M xßĺ~ř»Bű3¦Ľ»­7ńRĹÄwßčĎ1źó »(˘ńž˘ĺĽí!KŢ7Áů‚¨6ĺŁęDšÉÔŐ'§®nRDhßčôĚĎ÷’ tńÚ4v8Ýl×ń\őî,`ďH €äŻ÷Ŕ™/pîŃ6č)`˛Ř][´ôŤá-ŁmŔű‚! (OY1Ë\_KW}{AH@Hľî`?˛r‰n´¨7ć\LHCčPú} Đ$€L@ň÷ą˛˛˛€©š¦¶€É  »¦›Yč7]@ŇY¬Ťş‡74»Ý WúlNÄ ŻnłÄ’żŢ“ťť l ©…̆t†zhhggç°ąq‡¤cÔ˘M˘«%ęźä¶Ó&8rýđąöů˛i{<ŕĄŘŐy_Ż>5ěZŤ ý÷Ĺççď˙ËEž÷tşEş˛*Ôě"=z@Î $Ů˝{7$étCXˇKŕŹ–őpĂ«%Ľ€äpÝ+ ,&“®Âů”ĺßEÓÔxţűßńÖőt«Ť©!˙ĆáŁĘźÖˇň(DWáČĐ=fffđśíŠ‹‹ŁRITŠä/@ ď'»ŢË.+Ó<ŇJźO˘ĂH ; ×=eee0CŠJü ML1™ ·góI˙ť3?„FdWFXúEH…\Ë4ôß÷­Ńú9A:NZ9i±ń´1}2Cc_• ضcÍEVVčúE{#«ŘŐušľ›×/¶/:0mĺž„ú·U{âţí*a˙›»űYY=SËýçý3ëúýŁé¨UX”—^ańݦőcĺÂ|§ś˙ňÔ¶ˇňµz­6;zuĽvă§Z}ęŮď7<—˙bč˘L«3×vąnOźÖŞ%«ŚÂţŘŚtľ‰îzOI '-ÇűË{¨Ž§łşźëˇĚxą}ě’“^ˇźĎ9}fÎÇÓĘ.»>}GŃ’}×|^oýîżĐ˘1ÄŠ\ĄÉ‡wŘP₼gKđhRüăŞTcďŔq‡*˙Xxb7ąUKö†[Ö57Ă”_Aťeĺ8»mÜŮéÎă‹uůAĄ?¨b›H éĐ=rrüĆMŃťĽÂ7č»]/l=ŢvEV6”{µnŮ”:*ą$ľD·†%«`î¤O#čZކJćĘ$bťŞ2±˘Ž]›ő "ă^b€Ď/ą®˘Ć® žĐ\p@™8şS_eÜgŮĆżU·OŃ-\M8âAßJ·äHv]gHjÍ’Âäű‰ź2Ł3ť´[–«D«ŞŞÚ-…o ŤJˇˇÇŽ‘ŕ3 ×=ÜOVZăęź/Yłąë­|ż=kaΰ˝”qpń>ćşÇÝUŞ®,qß ·H¤wűFDň»ŻŘpźH¤Ď=`ön2éű§|} éÝ>EsÜö…P®Ů©o6Sąń6š|^Ťw$ź{«–´0Ű_ţ••çĚęľŔĺQŁ(}x›AH@´đÍC4†ÉÍ;-Căűz —¨4tŃŠ'7߯äčfU~ÝĚD‘\ź{ëp|%łí4ٞz{_»ž 2‡‘|áçŤ/Ę©Fţ'N„Eś}űz´¬żń@łâŰáĹ,#éď/üwe0Ů1W`Ż&ńY†ŞµŤ(Kg1ŘVaDt^Łdb–f&dT7€e$‹Á⪤6ç…¦ŰÜŚOThłĄĽAˇPˇäé@Y,"4x¸]hč±c$€řF@*tĘăŔŹ&kŤýy-ĄŁ:äL§ÎV>ŕ;sâ¤r&űŰÇlůńbaëÚ‡¨<âĎŻ,O/óröóšvMa‰bKk‰=Ü7¨oźçî8eţµŢs'éQČt˝†•ľţ>ł#†®źbF“·[4Şę§@˙™ßoOŃצ˛ %—ţ7}í«jUEöÚ…×sÚË€NTqý¤Ľ¤H&S0%;/ň§-”>üክ"$ 4DÉŽ™{öěŮ«WŻNü|™¦®ˇĐóˇăŞŰ~~ÇŰéŞŔ‡ĆÓäž?żÓŐŃţöŰoˇ»čččm۶ŤwÓĂó\‚ßŮ^*«™Č"+ŻÚÝÝ}ňäÉPć f-hťCg›Âň"BŕM~ÍO;c===ÇŹ/"&ˇH@0$Üż‡ ‘ܸ«‚—¨ŕîBŠšUhOK÷ő©˝6ç+Ďëď<Üšä´¨ä”=_CSÔg…Ďźyř@Bť¶ßWI›ĚĹ÷Ż‹–S‰w ÝS_W+a3­0üXČp±“¬¬HĆy{®|Wúü¶;Ň—óĄŹŤĘ8¬üââôzŽ‹YCĺ•ďě` Úő«^ăŻ+Šş1źăR0㏄HPůjďÜ9ÖÚ‚đ ä3Ml Ź H¸îˇŃh0Z>:čâÓÔUőŚ:Aů]uŐD¬×”@lryqCAAÁĐĐđńăÇüÄCR·é3Ʀ±VIöfČ˙˘;ÂÝÖĽ•_Wl›Hné×-㪊ë*–>.F–ť8 ĘKřŃf·Đ`e$ $ÜŻ™{`›ĹlcÁ\&Pl‡Že\U*¶#.ĂźDď;—&''żbĹ MMMaľ:i®ÍBóß^ü=o Eď›ĺ/9?ÝĚü¸_ýŞgş¨·húŤóŞŃUŤďÔ$Ď·]hţ{Ü…?˙ém±ŢUrŮńŰSÎ1GXJĘ»wy˛ÇײPEo!}ŕźóe”×nq t:XN(}<Ě|ˇňâלȧu…g~ŰncŔm|`ŕő»EŤ ´´¤±;łß˘Źţ°ŃŔš]bęýIAîÁo6č›Ŕ·K-Ü‚ŔěśşĚŇk[‚ěű~ÁiÓlÍđµáqŐŤ÷[¶Éą‹@<# ẇ»“‘óŇĚ䣭 ¬Ź]zu˛Ç”CůMĎyϲO@X#ú13oţĎ}ĚŔ>ýí—íŠŕü‚e<üuĚD×~c‡ ˙ń\†ŔşN RÔ=<ű9ćsC‹>fůíĐ8Hd9 áMČ…S:ŢWC–| «@¬’‹˙Y{‡ľěżďâî˙đř'Âé`Ď_R8?DD˛,…täČv˛ŰőČťőŃ«–ËĽ\x-ŞŽĐP1{öů‡ć>WB×'Ý^˝íÔęż—?•_|~õ©t‚’ăĄç›36™+6TÝřnĂÄCľ[űř—¨˝î:wN»Nż?K--iě.ů虓IĎâ·ç\ă´ű„ÝWl>‹IÜ‘wÜ™qéDŔŮR»öáĎzţ™d´ ňÁOO¶:2Žîş*şÔTË6; !$ĐQ®{9žµ5]Ty-)µ IDATĄŘÁr#7?­'Gź˝µ¦ˇčéŃhÓŁÔ›g‡ě`s-‹±Ëď~»)fâö{qW®üʏoÍ­ü†ÚČß˙|ęąéZÄąSe›Wß+‰MżÚFÝ;&-Ç€wDŤ€h‰¤Ż­©·üë»Áîý z*!ş÷¦µQWç5ÂĐÂXgŕhŹ˙ —Éľ“€»`*N[-Ťd‰Uc?}Bf«r6#/=®Nnč”®š¦ć&—ÎtańW˝edĺ”d M ]I^YŽÔPµţx™ńs·ř™÷1P·ćöď/–”čĐ1áÓÜnwšÎ?ÖS‹FŃ4ČKťP!3hýT}e2Ył˙€1šěřđ‚Ş˘WëöjNźp±µ]O-Ç1ľ‡×ž 9–Ĺ>-Úµ'íAâM@:tOµhčE{ÔݧűŁ*8MCqŘ™»Iý2?wščĺ2ĹŐé«ńďůUŢđ˝ä.G@žQבgsYfVč:ďéľ®źŤĽ|[xE#mßÄ Î6>o_ýćoŽ–ĽőŕŢĎŤd$%cCůÚ Fí›»ŹÇxŔu×ŃúѡŻ9M ýŞ«ĺA§Ó…n Đ>Ń=o핱°0ďO¤(ÓJ/oŮiďôµ˛ÉbŁžW„şÚşĆÍ(¸ä{ëż=ÓI”ďYçę+lš‘­Ź^ͱy›>ű=ěTx~Iś…‰ĄZs˘šÔ×1 ˛ŽÎšďz#ö°ěeB({_Ĺmľ‰%ŤÝ)őŇÓä6C–Q“'(ľű–$Ł*O¨­¬ŻI‹ɤň2|÷@Ňu˛6%d_Ixű  e›oG‚˙CH Űš˙w»AŃj€»ŢSSŐ(4Dŕ"iŽšdňű™§ĄF(ß Îě·Ôž›óQúŇÉA˝[·“]vë“éK95ĎH1óoü}OwÖ6B^VąĽ{ΊQVU“P›᯲T7ή÷´>Ő"sWdEç9§S?¬“VĄ¬·uK]ßß cˇ¬$S˙pÍŻÓ˘?p¤PIď]ź?|A7ßry•ő?×ţ;~bň? Y-ßĹS¶-łÖoúK‘U iä@Ż|đť&É)€»su»AÝÄ’Ć>ÉÔžÖPŤLiŢ;łş˛ŠPaƲfŮ*ąµmµ)2" $,ÁYí„ř,ç™§2™ GIäřź­RôI¸îQSS9¨(-•™ i šałíčňˇ}ďĎpXśÚ$}黿P[\×ôĄÜJŚÜKË—ţC_vh¶Ą.î˝ŰÚj–ŕ«E‚»Áťîě®Wě©3DYô4Žăf¨Ž{x$SfěţŮ«F6ĘvŐ“šy˛QÔMý¸xŃŹ eéÉç]ükŰ$µźďű+}̉¬›ĺuE\OčĆ7@ ÁrĄ˛*ŤL¨ifÉÇŰůš¬@W$Đ\7ąˇ/,>˝żňÚJdBy×Úl§;|K" TVVćĺĺ6^EEEĄĄĄp§˘˘ţ办VG Ňžh:, 4^đKX]]ţŐĐĐ€ó  ŤZ­%a7%\÷€¶•——//+™i#*9}nő뿏ă˝BňÝ–ŮŃ24O_úÁÔ·żŘŮ\-Ô"ohËôĄ{ţYfYv9pIú—G~rR'ŇĂPą*«IPˇ°« óÚę"2§˘”3#đó&2ó‚†4! ň˘§‰µ µŐŐšŽ*…űĂĚŽÜŢ@Pý„ÎŻL‹ż#ëćŮSŤLR62źőµ_ŇĄź¶<.¬mŞ{äŚ-l(źÜÉŻu6lüáaGÇ'ÔVXĘ“Ą]{ndŤzŰQď&çŇĚzis…»¦4±XVdʤĹë!¬Ő ’’’¤¤¤¬¬¬ĚĆ«Ľś+‘?”¤ÉĘÉĘ+ôĐÔĄŃd©2ś.TRŁ”Ă=ě†6»ˇ®¶¦¶şŞĽ˛*?ż€Ĺj’şLˇčęččëëĂŮ…ž={Q(’©$sT?2 dóň Zy„t „Ϥk¶ďŘ_ĺľŐBž™Ô4})óÝÚ é]žQZcžŃ^'}éŽëŮóĚ ŮÉ6ś5XúĄ=¤/őo2 fĆî˙m&/=Ć=đ–Ś®«Kí˙.dĚ^nT|=$ŰaŞ…H„a+/-ăšéž´7U÷ž iZ¤®[]MYSÖă˙‰—虓3îcCz~úß'óľ·VĘŚŢđó}u%ÂÝô'©–m>c¦?ZĽŕeß3k¤K«Mą{ý@:Ą_ŕ{?ž·ŹIĂćűi*#víűŞ÷Ś@'•úÄg߬Ť'žľ¨7•Đ^»öž(’šőşŮęΛţ]®5-pą0íđďбą|oŽ—HüaŇžńřž ÇÄÄ$&&‚âEť·]‰Ę*ަ}lUÔ4é*ŞJ*j𢌱2‰ÔąŐĐ@đ'(ü6†ËJŠŠ róň˛AQ=zô:˘R© ~ĚÍÍ{őę˙J’’|ÝŁŁŁYUQ¦@WäóÚv_ŠV~ΙăC'\·”!Đ8éKçúÎĽfbâţążý׾tËhNŐĆ<Łţg uőóŚ6¦/˝íżĚëŤÍPóţë×VŇ—2˛.ě O*Jóľý4AÖńÚ~q©Í×ß ýüĎ ‚šÓ7ű+7ó(hŰL~ľSRݤ¤ô~§Öä ·ńĄđâg·Řö2qË7}©pv¨é%v˘Ě'ë Ú÷gňÔ: ČY-űiŃfĂgůcO-ńÚV{yJ[ł®â2íć™Űť7×Âź şĆľß-ÝÄ6KIIüwQQQoŢĽá\^‘BGŰŔDS×@MS‡»śÓM&°>DÓÖS×Öű¨vyIqa^vnVZnfJrr2H®Ë—/CDKKKkkk[[[ 8Ź"áyIa:!/)d'3mľˇ©E7ź¬ÎLfýî k,z÷^ľ|9·Ař9Ź‹‹«©ázKđ¤l¤=đC‘‘‘şG|T>ş:.z0/i{|Ĺá=ĚK*‚ł˘?yňäéÓ§°ĄćQ¨2Ć˝ Íűč™)« Á+ ľž‘—•ž™ň:=1¶¤ă, ˇ€űôéăččhoo/ľ‰†$˝GOŹ#f‹ósP÷ČĎyIAě4sç…k¸ÚÁ"bž4Á]Çn6ŇŽ‹i@„cD#PUUőđáĂű÷ďçććB§ŕŁcĺ0ظ—µ®‘)řÜĚŚ–Q©2úĆćđrró./-ő“÷266¶Ţ>Ü·oߡC‡Â.XËŠ"~GL†^6?;S0Ýa/ź$PĂ™ á‡ýý¤ˇŇTEŹ4Í6ŽUT¤¦¦Ţľ}űůóçőőő°uŐËĆÁÜşčŚÎzę`ny¬{™ççyţUÓP]PZ-Ż˘®@˘őő” y°˙/Î Ą®]ő©a×jy[ňňÄyk¦.9ubĽ^ł€]›|ě·ąO†];÷Ł'ţŻŻĘň2Ř'îׯŻĆöşBEOW¨a$Đ%/_ľ3§ó%ÉŃeř(żĹĘLI žćť7kđŠĹćŔG`Ů#$äbMM5řÁ¸xއĄ'ĆOß°»Ű^/Üú÷ßa˙łĎ>ÓŇŇ‘áK‹î±łłÝ“÷R0şÂ;);éÓ$ş–Şˇ’ą2‰X§ŞL¬¨c×¶H/ĘÖ6†¶`j`Ú„‘Ţ~>ަ”¨·ĎYٰEĘŇÖ›­m Ň-\M8.EúVş%G˛ë–ź\ZddĎ^ś\[šÁňžĽ_^{Üż›—X·ş&Ä«‡535QW sÁ«±ť®€<·000··şk!ö @$žŔöăî=ĹÂnż·Ú·GđďR(Ôţ.ŁzŰ:>¸~>>>zýúőcÇŽuwwU$xcšő(-şô¦‚‚Br|”“»÷Ç™śů7f—H~7Ďśä[-ҋ½5{ÂfD‡]¸şkěŚë˙îžÄ5Ş>ă`+)KŰj’®sS{A2F"©Ő”Ą6M5ŤŚá¬ŁGgĘ/OúşxËÎúx“c_ÂŘú÷ďĎ?ěŘrGŚ7~‰Â/ ŽX‹e€€ež›7o^¸pBňô¶í?dÔ8đü#űyk*ś%ňśěεw.ť<}útDDÄěŮł!yo{élkҢ{Čd2—|đŕAŢ› -=ŁÎbâ]yŮéE—ô¸ş5Ň6pÜ%–5łWD–L X ›YŐFĘŇÖŚaWÄ\I¨ôčGN|–ˇj­G7ŃŁžŮ‹’6ĚV^Ř$eéVX«jý"ÓőBVúţť’Âşq‹Ů'7ą>´˘4úTĐ»ďńżż7˝(ł vҸ›Fٶ]^aßéóřđçNQ^v^VZAŢ›˘Ľśâ‚óCţĆš&H~řĐ…ŕ¤Dé}ś®ęň (ƬŻg±š'{„°1 y-`SB1AŠ`G­cŔ»H !€cę°·I'ŔµĹĹs‚4/ó´śřUďę=Ő´Ź]XȱŁGŹĆÇÇĎš5‹›‘şea~ß‘üĽ¤ď Âa®µk×–—WĚZľŽ'Élů=7ożę¶źßń…Çvş*tĽŻKĺçŔuđŕőzrűĘĆŤgÎś9pŕ@ľöزq)Ň=0řÁ‡\Ľýěľ„é…áÇB†·ś\>Ţ)/)Š}ń8ńU¬Ę@7°ŠâZ÷˘Éuzk¬#†Â~9Ľzš[AaŘËÎHNOŚËHŽ oĽ ř^ż~ö k1íWG`b$€xB ++kçÎť………fVö®ŢS`˝‡'ÍJv#đǰýŕ:7ÎÜ»w/ś-ť8q˘ WîĄK÷@Ę4Çţý!’Ň›´$˝žf’ýlńctज˙*6âaVjŔ†+= Á¸tŤL`Ą‡=¶Ú&Dy70é /ařArŕäŘHpŢ‚ Ü€†6&Á ˇ­Žo"$ ¦bbb‚‚‚ Ć cě» 椰˛ji6¤ťźřů˛«'÷Áń·üüüůóçĂé––ĹřqGşt„ř {^<ş…ş§SĎS}=#>ňIÔ“»°Ňj]ßÄÜŇŢɸ·‰DîT; h?@˘CrŹCö Y9…Ń~sĄ$ 3źŕ7§ç˙;—OĹ˝˙ăŹ?–-[&€\îä~řOăŮf! lT•—ö˛­¤!˘F )Ŕ ĘőÓ`¶–śÝGŽźaba+Ëž ö×Á«Ä]EŽĹĹĆ@śnX†“_čřĚÂÜFžĂuyŘ&6%HUĚ;Ď ĚĚ̸i›ٵ¸÷uţüyXQ†µŠ±3Ô4ńG »ó ={q7c٠Ɖ­­-żW}¤n˝ŕB +++Řš…“Ap쨻“&ˇőłÓ“!ľxaŢ»Đč(»ĂŢÇÚý“ÉH!Ň!\SÄý›”®ŻŻ/8¶wö|™č-DH@``™çęŐ«=Ô5ÇÎWTXżß‘ăP99…{×ĎmÚ´iĺĘ•|Mć%Ťë=đSř ,.ČícÚů›ˇ]ěž×šŞĘŰ—N<ş!s¬{NžŃ6a˝GěGCGßş˙ •š‘’ř"""66~Đé§űS‰ë=Ýg(Üp˝§ üĎś9;\=4´ĆÎXŚ˘§ ŰŻ˘©g(Ż ř:&V} <Díoż|—ß•RÝŁ˘˘«ôŻăbTT5Ô4…ś+¤Ë“ÇŹŠ ŃĎŻß›źťľ2“ýaŐDÜOfBThŘů‚cöµŐŐIŻcď?xűw°ć'Čc“ü)ᶉşG¸ü»ß;ęžÎ2Ľt鬫jhűrDş v–_‡ĘÓ­]éő«/## ŻQ‡Şu˛ŕÎwŇ0ľ?~<śš »ü>¸0ß»íj««®žŘzţHCkŘčÉľłC.Ń6ąÖÁg#ĆúůL[A€.^Ľřëożĺäät˘>EH@Š €› řôŔyđé‘Sŕ×:„ţ0tKűA.ă!Ó٦MUTTđ‰”®÷J®Śz 9ˇ M-řWŚÚĚJM9ňoAN& đţlAă! ÜţWDKű t“^Ç<|řĽç„%]ŚڶLĹőž¶ČË}\ďéřLAdÔ**©€OŹ"]ąă±d×pŹČ%ĹEAlÇĎó„ŚŇ»Ţó1jÔ(]]]ß Ű:]›‰¨Ĺ~v÷úĹ#˙ÖŐV5nĚ´y°""ăj}Ü:cüćQ¨´#GŽěŢ˝2Ă·^ď"$ ő’’’Ă!ÖŃ~ó É Ôóps×ĚôôtČož ĽíUŞul†ä 6;,äřÇ9Ăy‹X”[ WŽď{z÷,„Lü|ąí‰\ći9°¬5eÁ—°¬őôéSÎ^-Ëŕ$€¤śüfŘąs|FŔńôđĂŕě1ηGGGCÎWŢv-ŐşPB:'7778ŘőřÖEŢ’ýÖ Őů™}§%ĆšőŃ#m?ŐSĚűł…v†żÉÎţý÷ßEĘĐB$€F ¦¦f×®]••Ă˝§j ¬_ěKrAÄ8ř` ˝{÷î˙Ű»°¦Î.ś°÷Ţ K\€ Š Ąî·¨ŐşW«Ö[Wm˙ÚZ÷ÖŠVÜqď-(*{‹ Ů3lňźDF dÜÎ},…{żqÎűÝäľ÷|g–¶Î{Jpp653 zšś-Bd >¸ň\ôÝ™—ťáě>hř¤9ŠJbq›'8ŇŐŰs”§×´˛ňňíŰ·í‡ŕŁx" ŘlöŃŁGSSSˇ‚&f¸• ćŤg·„ᓿčąÓ§ĎÄÇÇ7nв3Č{(°Ű5wÎy:B™Š-Ă‘\˝R>Ä^9ľ¸úŹđvőŢĆSůŮ9:Ťš:>`°‘ /äZJ”@ÄŔť;wŢżân‡‹c|S@Ô44‡Śź e°:TTT$`/ţÍ÷pđďćI“&AľľŰŽW±Xü!#űŐ¤¸Čëgł)”a“ć@Ä ŮŐ‰üŕĘăAĽĆŮłgˇ°—HĆÄAD€¤€/3äeV×Ôä5µŤ¸<yĄ`“Ńuŕl˙÷ßÁ×zQ‘÷Ô`Ř·o_www¨\ńäÖĄÖĂJŘŔ›çĆąŁPCš0zżî2iéŚńY¬©­{ůňe® † bE ¸¸řŕˇC`˙jśTë\8¸€tuígŐˇ3T—‚"!váÓ yĎp¦L™bii •8CEéBĹ} _¦[çŹŃéň٦Î3±´‘đěÄź^ďĆř,Ę;زŃ_`”@Dޤ·(ČĎw8ŇĐÔBäă€-E€ ‰gá+Ú˙Úµäää–RÓyĎ!9ŇÂ… µµµźßőOŚ o%˛Dëž–”póś/×ŇcŐX›XH‘>rę°ú€•2´6Ń O#€l"Á Ż_ż† ]]!©B c{ŚšĚb±`·«˛˛˛5’!漢žĄK—*)*ŢątâSRBk%T_Ôż~滪jŘÄŮÉiŔŤâŰ!yăéÓ§Á±‘cĽŠ 2@~~ţ©S§¸ĎWtë!ŕ˛rřhĎľź>}şrĺJkÄCŢÓ=SSÓ P)ěgŽČFçŇ’˘›çŽV”—{ŽťffŐľˇÂřw# ‹#äf…/ČćüáÇF×ń"€Č đŞÎ=îÇŔ~Š Ş'*ązŚĐÖ7„Ŕ[HĺÜb…÷đ€ÎŢŢ~ŢĽyÁ9ŕÔá¬ô-Čs ÂÓ€ŔäfC4¦µ}ň.eI!Ydh›ęž={ Ž@ĘŇŕô" fBBBŢľ} o†ö]{Šy*ľĺĐčô#&V±Ů'Nśhqý ä=Ľ K—._=»˘ĽÔ˙ÄţŚ”DŢŤČpöÉ­‹©I»ąBö-2ČK Á¦ Ë  đÁ™L&$CQD@¤@‘>Řá‚gjżaăE:0&zŚĚÚAy餤¤‡¶ltä=Mâe`ÁęŐÚˇPyjb\“í|!ę}`DđK}cóľĂĆXLâŠćčŇŰľ[Ď„„„sçÎWJ” @Z‡„pćććşôńÔÔŃkÝHŘ[¸yŚPVU/ź‚‚–¤FŢĂo‘śśś.\@aWś:ňš_Sâ]On\TRQ:a&ŤF'ž€ä¨ßĐńúĆfđbQäĄDaČÎÎľ˙>0žn˝<„é‡mĄ†€˘˛ŠŰŔ‘eee-spFŢÓĚĘuîÜůŰożUVVľďúő“Ű-»™ĸ n=w/ůÁîĚ ŃSÔĐGŻ‹¦ďÁă|ýüüĐѧ@bWD€ @Ň řŞt8ß şBĽÄęĐĄ»ž‘é‹/ †ŻëüÎ!ďá‡÷šŤŤÍš5k ôőÝ‚Ľ•ĺÍ÷‘v‹Wodg¤vęáµÖĄ- éç‡đ.÷!cKJJ NˇH˛¤“T`űÍ›7ÝĂ>ȵ¤P»× QŕÚ|ţüya%GŢ#b?üđCBTč…wäçd ÔMJŤŔ‘ůýËGÚz†˝Ť”’˛6­}×6Ý˘ŁŁź@FŚ=`Ăďí9ş c@VŐͬě,m;FDDDEE ĄňAáRUU…”†Ă† Ňsîđ6đ´§dŰUU±ś…ý¸#'BI ÉN.Ëłő:Šő\ĽĺŹx† IDATx’›É˛ž¨"Đf©>¬ě±$I×ÜŐc8…J J~ä=BŔ%''çĺĺµhŃ"Hčüčú9(ű%Ü…č/‘¦oź?ČÉLsěîÁ~™°­L˘¬˘Öű«ŃĄĄĄÜ¬­čŚz"2Ť„qÁSÓĄßW2­Ą,+§khbŐŢ‘Ë_×yŹŕXŐ´„Ô>ëÖýęččř!:ôôţ-1ˇo„BlŠoźÝ nŔ‚ń5ş¸€e’› kVµ 8"€´ŘŘXŘąngë odÖÚ±°żôčŢw0W…ř@Ţ#0TujhhŔž×´iÓ¨”Ş{WN^;y ?Ż\ݬ¬€ŔyĹ–(†}šA€ ™ Áěwöě٧ mfĽŚ AŕÎť;0Ow4öHmńMQ]@^‚‹ş€ł 暴ÍŔ™Ľ_ż~ëׯďÚµkrBôŮC÷ň˛Ň†í$řwVZJtČóöťť%8mŰšJGߍٓDN>{ö¬miŽÚ"2„@ff&¦0±´´®2¤VUĄ«[Đ’0 ¨?ňâÝLKK Ü}–-[¦§§÷ţĺĂ“{˙xűⳲ‚wk1ź |xܙݿ•„ĹŠtĎţCˇb3xŇUVVŠu"@Ä„<#!'E×~b‡•$Ŕ_Áęüö­€é›‘÷`uŔ×ç×_őöö¦ËQ_Ţ»ć·ç÷÷ŻK8ÍKŠŹ˛°±‡D"P ‡hHÝąG_ČařôéÓ¦[áD (‹ëůóç ąťť#AED±„D KĎ~PJ\ŔŠ]Č{„D·‰ćt:ÝÓÓó÷ß3f ›Ĺz~çĘ;7˝ĽPRTŘDźz ą¤)=úń¸8/Ŕ¬Ş¨¤|ăĆ 4ůđ‚Ď!„F (( ‘:ş¸»ˇEáFŔÎŃIEUŢEY,Vłť÷4 ‘ ‡ţÇżŹ7NIIńíóű˙íú ÂÝÁ#Ö<żŮé©ŕcdnÓÁŔÄBq±iKŇÓ©{°©ľ|IĐ4N-Ő ű!˛Ź8çŃh4ô”Ą•–ă,¨KaaaXXXłz!ďi"ˇ@1Ż!C†üľyó¬Ył,-, Ü=ŕÔˇ»~ÇçĚOIB'@‡ŔÇĐŞ«+Ç· É ĐągH yďŢ=±2ZÉ肳 m”””Ź?¶kďąÚŽÖmASűn=AMA"N°L·¸îŘůęU}ŔÇ Śo`YÇgř§ˇĄcaç`ic®Xty…ÖO_RÄ  ^]ćÖZ?Ž 𥠯o_B–tpđ°6Cé"ž= €}WÎ3YBJ3šZ‚˝‡Á`¨««óQ ypDsÉĚĚlňäÉ'NŚŚŚ„xďŢ˝ z ˙ Đ7lKAVec kcseŐľ|D‡AőőÎÝűF\E`:÷ě ĽçŃŁGČ{Ć "ŇD¬łđ Ş˘¦^Ň”çP¤=#őăëׯ=<<řĚ€Ľ‡8˘ĽÉîŕéÇôéÓ“ skZR¸ÁLŕ“Ąch¬ŁgŐżŐµt4´tÁ˘ ¤˘BĄňŰ‹,/+ {ý ˛Ú8tĄ¸„‹ťsv¶Óś÷F‹ü˙é $}!—ʎY»ĐĐPŘT†T–Ň%@ľ@Žfř´vęÍüľWůމ‹€µ}ç'·.#ď!Ö"˛®>FŹ Ń@>|ŹŹOJJ‚TxźăRb«¤¬˘ ¤ 'Á—–{©˛˘ŠŹ2+*ĘJ‹ąYśÝ¤’ ą"9ŕđć-ţÓK@4Ă.CĆŻţsvă¶Ă¦şą>HIËůСC‰uźˇ4"Đ(2ç¬í»4ş‚'deUu 븸8ţď˘mç EÄE•——o_}p…«¨¨€,˘YYY9ŐlRĂO¨… ćŮâ‚\n3S ŃÔ4ŐŐLŤą-í»şJA˝Ň7»'Mţ/…˘×ÓŰ»“NyÂÝ€‡—öú|Tzxw†U[©f6x˝ DŢ#…;§D„AľEÁ&–ÖÂôödBŔşCçÔÄ8đ'‚ MÉŤĽ§)d¤p^AAśŕpnHT°zőjŘ34•BŞőĘOž¤P(fßîöŰŘޱ F˙µčż$#ů’ 6E.ďŮÖßÖďyť[EÓďčőó†ßľ¶SĄVĄťîş,˛ĂŻ˙Lz÷ĎźWSŞÚ Zwze»łkWlÎV±÷9´sýHÝěc3z.‰°Xřó¸ÇŢL.Q·›đ×ßLłhTp¬"ůęľµż]}™[©e7âű_ţXÜYSҶk0łYÚvŚŹ|ź––fll,ŕÂa3D<`VĎĎĎďč䊛\’_b3ZŮwyrű2!áĂ{$ýśňma"đMGđě‘Bö-šş‰Ü>)Ośţ©Eˇjş|ď·}÷ÖÉŽŞěĚ kgmzg2~ýŽEžÔČ +VěâTŽ!(vß?÷MFMď«Z™xoí¨…Âg~ÓEą(ęř˛ďK¨ňJ2ž´ËÂ{&ŮÇž_đÉf-y˝{ň4ßĺ˝×Ú°Ň=÷ÚO W_Ë­’²Ű8tYÁc] să”" 0z m-l: Ü’Uu ]ăčč&łáCŁVä=ä[×Z‰ßżżŰJÉŁYNßkÍĎžZ”Ź×~2ÂAg ç7xţ±„ 2)uť{äâľ‹çWÍţzć·ÓM(”OO_çr˛hV§G­˛śő÷˙ćýřçDŽa+ĎpŮŢË7­ždDˇd……fâ´‘s[±ĺűaC|–®źĂ‘îĂ…şGqđî‹ÉĹ—Nĺ9ű÷Ý(ĹwöJřXÚÚC"îZÔ˙@"!Ľ‡*'gfeG$ˇPŃ#)]**ĘÁ˧©ˇ‘÷4… ÎCPěUë ş/&b•¨*íżąx+đáßľ?ĚM3íńÍ«÷íżóMUYĄŕŮ­óş÷¶TwúĎ'YĆä˘ęCß©˝ŤBÓ2Ö‚?Śím5ä(t mřŁ˘öȸmş´ÓŕÍmÜIţ—ť]Ż(ł 6¦B)ż?}‘»c×Íď(ÖÇČt)T„…$L¦íl“““Á“Ž+;ţD˘!…) ‚ň»@Qa˘ÉFy `v–…şSĎ5e‘¨…b[s\fa?¤©ţčßÓ2D?%ąąą:OŞ%f¨ F.fĂ?`6Ů š(ęôçŁ<ţ^µďÍyÍßG&f\6űPz]8iň4ÎźrrĂMVmß©ţů娪bW3 vU%×NÔp'ŻÚ*¤:pë–5ž?TeĂv"ČYO Áţ€r°ă"ĂĂĂ!MĄ`=°"€H`‡Hs«š ‰ÎÝŇÉ0`¶eČAJ<°ÁGEE5Őí=M!Côó\2 O\) Z™rrŇČŽ:CVß+ŕzŐĐu¬M€Đ°*ŠS‚˘ŕśÍ8ź~]:čW$ĺl!]o˛^GäŔî,»$îet7tĐ«"F×´kŻJˇ”(ŰöčăâębL+cÉ«©)Iç~6Ż^ŕ=RZ ś@šA2†@ x"6ÓŽ8—«f]fvňöţzţčĆŮ!0;ůÔ‡z¦oâČKIh4Č lžś’!Ň<Ą’Îs‚§(xR(¸›—Ć6Bő]cyŁ^ôË+3ĎxŤ2zŐŇ9«§÷™°â)‹˘ŢwbOs[C(öôŢóÇ×˙ü{”X“nÜ|ĎÉň#čľoŢ’˝;ľ˙ţű[ĺą.ęUŰjŐn F™PŘoľ˙qÓÎSŰćÎ?vţšËŮB’+Aei®ť¦Žd€MHHh®!á®CIyȢI8±P D >ËăęŐ«¶ŁĹŔŔ&8÷Ŕă°Ĺ#H¸cť€ŮZ˙÷†cŹ,ÝT÷ę€YVîł-ß}eéŰRVÖSż=[Ě1ŹCŔěT8óŐ_Oe§îdÓyŐńđ´Çëć8k:YOY{-‹I©J?6 Úôůţü?ŢŁíŐť,L&~ç—TÎC9Ý1łç [u'Kó‰‹v…ÖĽŕňhIŔS°ˇÉb2!30OŮ÷đ„…'á)«¦ˇĄ¦ˇ)-YéVówůďť2 ŁÂ‡÷®ś˝ű8^ŃqřĚť÷6 3µśşmž»;lĎß{ß9ţ~íĎ]Jźř~^Xß7™źäF“— /¸±{`žn§˙ţ1٢>í-®ž+NžěŞöďĎ[¶ßSđXµăřw¶ŇŮćâčalŢľ”óň8¶-˛ íĺË—˙Ůş¶Č"3ĘŮ6cj@@ŔÖ­[[F} sđ{H°.•ü®-[2 mnÜ^†ćíŕ—¦ŢEŃż§5ŘJ­/<±ŕ°ut’šś‰ĺÔg||Ć÷Ť…ĐęłđTěÂÚó}źľúÜČ7iâçÓ¦ăă?˙aöÍ«·ßpţ`çWźSs»ÇIíś_¨şët§*ZOZsnŇšzM¤ö‡‘ąU|d|Ě\\\¤&„s+É—WTěÚąsé˛evvç"$‚Ř\RpÓéęłrĺJ]]]ˇfNOO‹‘'ĺiŽę€Ůsó7Ý…€Ůk?Ęi·ďăę1zÔô˝,U¸ł>ęÎÝL)n/ßú§:`¶«ať€Ůńze·zM©Ł‘se†o:'`¶]ť€Yą~ęOnL>Í ťeͱŇ>ęĚzޱ{VÜ ŘĚ 9Tʶ(| Şp77/99th,w? ¸Ůle~˝ôŤ9ösîşKYs#V¨Z}ȵpmPZKc.őÖę“’)V)z†¦d f[±ZPÝRU]3%5•çČ{xÂBô“PĚ DÔŐÇÁDY)Č”˘p×…(2 &‡µ™ę˘I6H}C [Iwg˝ń_™µ€ú|úÄI¦ˇSý!%ŐÁ ýeí[—Ţ8đ 8esf?śźłj߭ʦ‹˙ľtöč7ú¬Ţ!Ň€Ů=goćţ;÷ź·”f[¶dđťśť•ů wGŢÓśá>_Iř1nŰęÍ,ĆŰ@BT\o^ÜĎ- )š¦6yhŕh«ÔGđµĆ–RD`poĂPźšE2ń mí]î\°• _„Ľ§1&$8k uÚUÔÔI k›>fQ©˛’”Q¦H}ÚĚ}JzE[@}€÷Ŕk ©2bŔlkoTm}Ž!Śkęk0ňžÖ‚+•ţ`é…r¤â›ş2öc,,×>ĚăfO.{ł|ěĚEâ›OŘ‘+?<¸QJˇ?ň™řÝaÂă…ťIöŐ+’••%D"5EęC¤Ő@Yř! őt…đ…©ĄËIüN˘f[ąXÚz0Bfffăq0ž«1&D?µHËĘĘLĹÉ{Eý7˙Űĺ¶®#ń˛şWeŢń;ĄŮŞ‚ő?~¶?QÖ‹ËDáÖÄ*’‘ňŕRź˝gâ1‹”ë×–„ęę^¸“Ňl„„ľÂ~‡ş§© mŐr©krVś§ <ňžV!+•ÎŮŮŮ0Żš¦í=0>ÝzöBÓ#ŰÎÍÚ7òVMvQđ±e‹ýÓč´2¦Ĺ¬Ł?Znš}Ŕ¸—aZbD”⥓ďEĆôÜ˝÷'w5VĘ˝M ţ}WĚ.)7łmÓ"§ścSľ;óyHŢhĽďžĄZ~™sřuA%ÓĐó7ßĺó'ď2čať“ń‘a˙óÖMٕڙxeĺůÝýTĘ‚˙¶ĆöÔͱF4Jĺ‡KżnyóJeÍzýőýö|snÉąý”‡Ó¦đ”ä{ËWżŐĂU]l&Nőęáů1“Ę}ҲI‘ú´ 7ě%y¤>ÜŹ¤şéxŹä•©Á7óüBFŢCľ•.*âě8‰ßąGłçĘooNßđtÄa×Ď ± Ňä=wűNvQÍ9=ßk[ÄŻěÂTőqľ[Ĺüä5ůęGçČ]^8ęhÔŠŢvĎ~Ř_¸ôđĄ!šď÷Ś^rnŘ˝Żg_¸eĽľě‚ŰÓöć,9zkŚjôŽźÝËAe¤kxűííLŹ<0ŇçtĚzÍk;Ę[Ť\ęu‚ŃăĎőě©9Mă)ÉżŻĘŠa#®ä†*Şj wuŔźŕСC÷őáćD©»$Üß‘ú4ĆD†Ď_şt©©› ŠóŚĘ١>Ü'źůě=ÁžĽbPÁä>—Ť@ŢÓ˘źá>YÁŻYÜ‚ŇMĽżsŕçýo» ©™Š¦iˇ¶nůÄryZ^TžI)KIŐ®—™"ENÝPÇBĂNSŽZ®ŁIe”łËRž'=‰]8j3d",g”vÍŞ¤4$Đ&R˝W7M9 ­ăňż˙ˇ?<ŻnďaÍQËĚŃ$ďä'ń‡M«,ÇK’ěř§±Í‰ŃôB^QRŠaŘ…„źPE|ĎŐUéŠ M 9Ş›ĺËXO©s{)ż‘úw‚ ĺÁBGS[(`ŇŐŁH§h©Ř[ńäh–úpł¨Cv{bŞ&a©ęg•đä’žŽc› ś^N®žťyʤW˘őóqź¬Šâç=Š’íśMŽc~ĽdoW]˝2éżEG™ëţ;ă©U|c‰çżpęËýDĄ}ľł8ÎĐTŞ˘Őżă m?ßa}'®>÷yź‹nŕµg>´ăV]Ż…MázRĂi* u-ję°7_×”§$ŤÄh=üMŽ ¤Ěá= ٶĹÄ!ć=;‹w;˛IiZ}©O«!$Ó«gwęC&‰ëČĘźúpż0•«Í±x´)Ŕ:®]p¨«×cĚbsvhSčJVYnŤY%%±Ű{@-ŞFż‹ŐÎm{ZÄá#ĚâĚbu[k5Zeú}ż¨"fÓĺ¶”L{wČ˝uűĐśŠř«żýý¶PŢrÖŮł‚/Őü <°Ľ»•«mîĂWą,JEÜÎĹłö'1ŮŚđ1°‰Wű:I§“©"UIťť›Ă©Â—šQM™ůÉ1I%l9(űÎâR$~ŕ+š4Łů>üĆă{MQI®Ë9ßVdş^dZ­¶-+ź/.ď‘€ĽmݵWŞ&»%% C~‘÷qµřË…f8 Ş«¬˙ ŽţmśbB‡u(ŰLš©ylĚŚńvĄyĎr ßľáZ6oîCŐř×*‡ ˇő™ř÷{kđüEÁjÔ?ŹG}ľTüB7»jźg»{hôůöňŰú]§ůM3n/>Vźźvťë{Ô÷„ßęŤ%iVŚú´ć/99Á}™[ nxµ4ě"xnxÁcO^^B{$%’Ľ‘»Ó»ő†_ßůówYUνťËţŚUÖ,Ë yüőďt{•‚€Ń3/Řő3/‰ LµÚ]>7)ěťâô#;űÉż:Ľň·PşvUľ|ݵ{¦uR­űĘX•˙ěđ÷[ÂX´âR›éŰ~ďúÜkćys7kĄ˘¸hŐ‡ć•-X•±ýß…¶¬w«gírxżgőüěÜK#ë4;ňăpjÁó:łě™8íë :VĘ63~öÜP;ţý•ë 3ňăÔŮlűWĺDż—źá·nHĺť]^>9ŕ0–uőXÂgí¦h\˙mńţl+]ŐÜwsN®Roş†J‰o]¸d·®ďw.‰Ý âS­ÍŤĚ}˛R9ţ/x şĽ<±‘4H}D$#vSř”WPűÄ_&`ËO]ă´oă¶{˛ăžf2fłŽ,ůʬđňčůg˘Ľ×9S©l¦Ćčůzeě迦rĽß«đ<ĎDZ…ţôČé ďâöěđőł6źsđzďw3µĆŞ®hÓÓôĹ ÷ă‡gšú=Ď.ĄŇXęă–m¬’ĽwöÂÓ)3xk)WżYňŕĄÔ“ugąŇk“!çý랯ň÷~á~ěóř…IŹë6»Ük«LičĽő_ÉżýnúÎ÷%#†ößĂBiál×ô;:G–×h⤾-qě©#>&~§m«L®7Ýĺ!'¦éKäńE«¶Á7DŢĂűF!ňY:]6WMuŔi˙DľyŮ„·7?&AZ4 >‘ Ĺ@#Đ€úpÂy$ýť©`ę˝uÂÂ{^ěT-UA1űćúŤwU©źâK-¸®‰t s}y¸˘˘˘ĄˇC‡_”äĚňśŘOéÖý&O©*¤ËkkymŰ:±VĹ’7ßö rצQhÚ=}FPŘůńňÚÖ&Ŕé ŇV­řNa“űiu›Ýc¬? ŁR^ÇÖL™ZS 5şvü’ŕ#ő„aTŇ5-ŚÉ(ŞŇX•ŕ,ɵEŐ×®4?ą\ݻޅ®ăč¬w„Żl IŃăvĹ—;˘©Ę$˛ůmüIĄ3ÔjĎfe…,)%şTV”sWG6Ôi E]ęcjf&“:˘R˛@]ęu*jăB%§Ý|ÄĆ…OľŮřšĘ¶Ł”Fîţ5´˙ő]ٵÓ|żZĂ'şBA×ÎÔ˛ëźĚ´+OKĚP ^Zşâú§>Ł`ífˇš›ĹôP-zľ7@nĘ0Jen\j»“\vlşµ®r"3­ĽŠÂ.M˙XXĎó˛nłvZňňUőfQÓ{PýĹ%ŻŰ^ŁÎřŢń]\_;šš|njaEą 6,·rgŘúăXJ„ôÔŠÚ8ÖyŹu$ř)ůęÍ‚ç#8†âOI‰xu=D§m-őILLݨ8" zj©Ť&§ˇĄ'ú š‘fŮaɉzć˙ţ°‘ŃE§˛B“„Rżućrąľ®×Ě”.O©?ËĽšä-&opű2ľ®…}=a>7ű"Ź‚IwŤgŹŰhg®ŐnčÚYş[}Vľę K—SáPŚFJYđČĂÄŹÖťĄËsVc3<µń©ÖM„˝ĹŽŔíŰ·/\¸0ję|3ëöbź¬Ů ń˙Ť“°îŐ:W® ;˙ćÄI˙ŽůďÔt:űŠNž|fţéş>ËÍŽüą»4âÜ÷‹ÎÇSäY•şC˙Ü´ĽŹ&;ëůźł¶ŢI«¬ŇpůöřO^Dp«)/+ý÷ďµNNN ,xôčŃÉ“'猳"oţ>ëó*4÷ß‹ Áu.|šá%"#šYşq_ÄСC!>n×?Vt&oţ>8껕r÷e†‚˘âśŐżóiFÎKěü«cĹţĎwĄ_Š€ÍD†330(ĂÖµłNńYs®Í=¶µ'Ç4ŽŹq‘×Ož:uj˙ţýëÎ/ç"i(,ĂsŞŞrFY)'/°ôşĺ É†§^×HS•t*Ôfú`=ŃĄ).zö«ońĘý.nŃ»đs@ łěÝ˙ţ şőVđĺóK ¶­y’ËÇn,9„ĘK9‘«**’Č«$9­ÍTTÂĽý,ŁŃi<ŕvŔŃ9ű7 ˛l…%îTvúýőŢKçMZµGnâ˘îŇ"= ł’w¦Üç"Ú=ÓĽ<Ü'ki 1x…n4x¶ÉßF˙^ꔪÜcşz;'˙zLmůŇÍ3íąZÝăݠΨ^ZłĺK˙îk˘ČČ(bQ4Ę Jčúş*ĚÔÇ/ŐF,1W Čéy 7[w/şd`Ż/ÁďÍ#(žĺeŢŁ¦&Ëiaôl;›’Q˘§§Ç­Ź+,qTD µé{µ¸”Ą˘ŞJ­_¦ µCĄ?UkôŐ“Í #`łć°ä|űůÂh‹µY‹ăŐ8yŹXaËŕÜ'+׺ – „TÎ`đë˙] Ęw¨‘űřx˛ó2'jzZťňĄáŢ:đ“]pżŮňĄP­bí ‹SúÔ.Î2ůö¶‡^UXJˇŠ'€·˘’Ž%+*yIź÷”Ug•aŢSKz<==322÷đľ©ń,¨%=>>>wîÜ)(j±—2˘bG€ëËő­;îs‰z‘O ­­ cć‹|ä(§ď6˝sÔ©g…Ě´§g’\¦÷Ňâ”/ÝĽ|â…KvFĺĺ”V55pyuůŇ_Žę7uü’űŚŇ(_Úđ`<\ł=kő©Ç—1?±ř|bu›Ď[[ |5ě-Áż…y0›––l–?¬KzĽ˝˝%+N…‡@]ŇÓ«W/č̬ŞČ±pÓakÂ"ŔŤznc‹öÂ.Y“‚ďś…ů9M¶ôŞFŻŻ?ř2jć ĺ]“Ž5,_úE˘šäŁ5uFŐ ĺUľ4ď˝úWMä)rÚÝű'śK`ö˛Đ,NÉeR´čě’ě Ş‘!6ďůą &lI~ńχ¤Güă ˘A 1éÇ€ôô6â‘&ifѬ™ŘF©¬ÎöÂő­; Ú{Ĺ؆”`T`p¬ 9€řLč™xzŻ/Ăs†˝JĂňĄĚ϶ą†uF(_ęlŇ^/'0‚FٞİtíFj&}Ë®&•SXé·ý?ą|e/EmX IDATĎą/+P(ŁĽIA>e(Fł4&=Đ{,x¸ĘDÂ3fŇą‰˝·…~®Uţ~çO?® ĽYtř6`eü5ĹcîÔá>^s/ĆËHqĺ˛j/Xä=|Wž< Š ňŕĂL‘Ő'÷Iľ–?`U[!´*çâHź}qđ„(·zĘ‚»Ś‚çç_ş`ÚâÉłN„7Š€-˙p⏨ÇžĽľ˙Ýó[î4jŃ:ąĄÓ›őÜ÷ŕ>—tÖŁ•łšššFEEĺegčš¶r(QuWvŮv7©f0EĺKGúä\mTg´ůňĄTGďťë{”čt_yůěJQÉ.‚qŘ쪼¬ sŚE!ôf)PfhŠô@·ÚÄęšĎH’TÍŢËúl]?}—AĆCa=¦-iŻ¶Ň«¦6V¤§Oýő+d5(űŐÁű·µf*ô˛ü"&$ůcUŽͬ©É7)Ďiv’ýÓ8ÖyŃVJ y€÷@»ÜĚtâđä–ÝFąŮLf%w]jµ ‰)Č+$´u<ţ ł˘–:ŹĽŹHzd÷nĺˇŮŁ×Y*J˘KşĹc†ÖžR—ëÝMOQ‡oŇłŞ«s˛—µV‚ôWęä=…µćlĚW#®ť/›ľÁEűÖŽ/•żš±"›_Ů/N'öŚmŁ6ü˛j–ˇą’’˘5íxýONŘš3MKĺŐžřç`šü(ĎÄ"<öJ‰ŻJŘÚÚÂ+ZfZ˛Ń`&_™ĺ8ČŮÚĂ^ÝÉÂĐĂŮzPŻq˙ťś:ń»'­ĘxZůáÁµÚ`ŃČĚKÔK©ŤbÜÉwńáÚqHw§ĺűG2´^ČŽ•“‘Ú®];H1ĐúѤ8’)‚ŹS …€€¤Ć„Mg°ŽsÓk 56&5Üw<÷ą÷ueíěě@ôä„h+@7ă~/8ÁݧN_˙ŕ„{/.Îzňě?}[ců¬ĘĽăw*´T´±‘ĽDkʱ~U|¸z;˘¨z6váă·†Źßó$ňĆŤ_¨GşźŮp[§ő¦~ڇ„čÄićŻ)’ţřŕUâ 8é™Ářjdd”›E&â ,’äeg‚°ôŤuÁ-’Ć㌽˝=ĽÄ¤$D÷ě˙ŮĘ)6Á‹îŹźxv‰ďäĂs÷2LKŚR±´cň˝Ř°‚ž»÷ţä®ĆJ©_^ÔU)Ĺoă˘mQ,yfąîŔuŚŹoyóJeÍzýżWk]Z¶¸nÉRćĂiSx »ĹmŰěÝ= łs2>2ěŢşihńSľ;óą’…ĽŃxß=ßvákYyüB®ł§•BUë˝ă?g5] UĂĘBĄŚQĽGÄt?).°ďÔ©“ŘV@ě#é;Ä8Šôpç„€äädpmV“…Pvá(ëĂäfĄŠ bląJ#ď!ëâé“OdddiI‘˛Š$j€SiěÂTőqľ[Ĺüä5ůęGçČ]^8ęhÔŠŢvĎ”˝é°7~Ř©“‹íŘIţßi YęĺÇčńçúŞźä”,mbŘh'*#]ĂŰoogzä‘>§c†,ť}áâlˇ×KNI·şŽ3óÎÎ'&>»ŤEĎ'ÇGAPŚĄĄĄĐÂŁ’b¬JŃ<- =0¨±±1ü“ÁyăÉę­iK×N´ý—TóĐĘZ‹śĚtxJęëë7V ámŚ iÎtîÜ9""âcL„}·žZNŐ®—™"ENÝPÇBĂNSŽZ®ŁIe”łËŞË‹Ć.µö•ĘĄ]łŘ†ýG(Λ´4qÜW#'ŹiC©Ć)YşnůÄryZ^TžIuÉRŢĂ–UQŐí=¬9»kfŽ&y'?•S›Sł"éřĚ5ÇăËňc“X#˝}UŚĽn[ŇIR‘°bŮ.őĺ'fVď‰ňČÉL+ČËquu…˘i˘WRc!é‘Ň8OkHÍ(őV\Ę‚*ëÜ‚ŁŽČ}éŢcaŰQŔ.boĆJ»÷ëâÓqĹŚ‰Z®ť ”´=ţřmÁ_ë[:qeüŽUÇ]˙·ÁM4U{x‹×>űÖ™´îS]t9ŽŚU9÷÷¬Řř†©Z•/çüăŃeýôô ČÎËJ‡M.ž_ˢ~´tͰ_ €`éłgĎĆEĽ“ďˇ|ą‡¨´Ď·8Ç‹¦QyQ8÷Ó‘ÓC\˝ąôôŰOŕęW™ôŹ’ĄM E׹APtť*׸dé‘]Ë;×ßçR°đ9uʇRx}ÂęÜíű¦›U ÉĘşľtÉ˝•'7öŞţ¸ŠöŹ|:;;‹vX‰Ťö&‚“×ÄÓÓ«¬K sś¨e@rEř®–ôŔ\\[l槤–Í+–^4ăA›/˘”†Żë·ŰőÜľáZđŇuĚ×¶l·]=÷‚]?ó’¸ŔT«ˇÝĺs“ÂŢ)N?˛łźü«Ă+ ĄkWĺË÷Z»Ű«|×Ď˙ ˘ëĐŠJl§o™ĎŘuŕUŕ“cYWŹ%(k–eĐ<ţúwş˝JAŔč™PGGGľRú"’B/ ÷–‘č Uótuu3R?’K*•ÍÔ=CŻŚý×TŽ÷Ű`ţ癨B«Đź9ô]Üžľ~Öćs6.ůöíťçHM ÍT5×ßĂBiál×ô;:G–×”­ň^çÜÄhĄÝh¬ őqË6 VIŢ;{áń—î¤Ö$qR„ĘÜuëšo*RÎŻŮv=);ěU~äÔ`ÝŽśn«– U!w#­F-oJâé)śµ†¨gž"!ďá iNöčŃ#>>L>ťş»KOčęň˘g-vR‘]ˇ;ňźßŐi¶´{KÜýäU©L5—ŐG-ŰŰZ4©ÜÖ…35珙qËÚÚóëYN«·o¸¶cdS‚ÓÔM«üżł3!Őďďí¶Ínr}Gcřů5U¤\Ý÷*.'qäĂ?ŕ ÍxŘžkËşŠĆ ăe~JÎĎÍ‚M.ŘHnJ ž‡ř^đ{čÖ­›——a…DÁ@2µB"–QŁFąąąµ xE **,PÓĐlŮěE×0×—§PTT´4tčđ‹’“YžSżÖ„ţĚ]ănýnlb•ÍÄĺë;sÝX¨ ŠŮ7ż”­¨¶—ó ®Čk[›Ŕ·*ř¨ßÓôÚ¶u Ě&lŰ:ˇäÝş™!>~>65vöŞ˘K7Ľuücădbů$Ą'ejÉ6P yź•&ÁĄž={ž?>ňí+‘óµ—î ¬A@mŕ…ëśß=üűVźi·üÄőę_äüůt@őoŤĘ‹:ŻÚuwUő%î1âŹ5/[űž-Ż9·pĚtÎo#ýTź¨?lńĂÉ»ŤÇ®ÚçѬ!«®¨5#ůźB»%oŢ.itZD'˘ŢÂHîîR$ť-׸Úúőë[Ţ{"’BŔÄÄdófplůŹ@ŕ=`ňQÓčŇňQ¤ŮSA·^­ E•äH§GǨ”Fţ9xű}ď €»8r÷ŻĽĘVđ”»27.µŚÝI.;¶@Ý”qqéŠŰźj˛ź)p*WôiÎ+ Şŕůţĺ{T–Ůč˘E$Ďޞ°ĐĘĘ*\öĆĘ#ďiŚ ™Î@xůŔç9+-YßX¦Š*++bĂ‚á5”ě™{5J´î–Gjb¬MG’ňůúµ&¶Í \ř}Ëv5MZI‘ËŘéZ eÝ5žmE_ŰGĘjµlzŞÖč«'eŃŘ^NÉ 1ZZZŕÖ4Č{šB†Lçáé«©©ţć9d, “Üd–őÝó\ĆIf%PvD  !Ľ>ł««Ęŕ!«dĄ§ÂÖ˙Ä"Č{daőét:|ĘJ‹CžČ‚>„×!9>:=%ŃĹĹ…gŃ;‹Ź"mHx S>Äŕűˇ /B$§8¤çŕŁ#ň>ŕéŇ€ 7×ű—ŹĘËJÉ$79e |t^GŽl2ő9ŐB©GňŞK@bL¸ŚëنŐúH–Źs`ĽGFnÍŇóöů}Q‰¨j$D…@Â{ČśÔTr˘ Žr!mn=ř·u dT˙śŚOąŮ]»v…=>*"ďáÉ.őďßrÉ„>.ĚĎ%™čä—Ĺbľ¸ëa\ŁGŹ&ŹÔ()"€p€ęÜPł))> íâ2yCĆż˝š­–ĽGvVîřńăÁŠ fŮŃŠ`š@ě:ĐJđ¦ŠI0ŃPDh(ä_’qŐH(ł|îÜą×ÎőžIˇP‰)')¤‚”H©‰q°˝%ŕ‡ŠJ‰DČ'o˛E2"yň’ź”83ÂVו+W‚žvéŮJ–G0”¤@WYIńđáĂ!Ă Ý‘÷‚)Ű 4("""<<ô틇N˝K†††+W~±ä^l”@CDĆŹľŹOn]¬¶’2Yé)aožC9¦~ýúµ@ä=-ŤÄ]ŕľpáBČĽ’}Őďä¸$±2˘6ţřźyűâlo­ZµJ¨ĐŃI#!€tčŃŁ‡ŁŁ#Ľ~“Ž8«Ŕ@đÝŁ€ó6{úôé4Mŕ~_"ďihäîVźyóćAlvFJâEßťçEn}Z-}EyYŔ©ĂQďlllV®\©ˇˇŃę!qD P}^ źŢş„Î_ąđ×ϳҒáßŘ-•¶~ýú–őÄ^äEÜś!BrČűw±aÁúƦÚşäU§5’ççfůź<™šäââ–0ěhÍhŘ@HФâ„/FřJ+x»öŽ$ŐBćĹ.ĚËąuḊ˛Ę˘E‹Zw‚ĽGćď“&´łłÓ××÷îmTČkŤnlŢ®­•/łvŔ©C%ŚBČXĺ·Zf2m_Ľ€ ¤BŔÚÚ:,,,.:ÂĐÄ\SGźT˛· aa‡ëĆŮŁ…yŮP‚©59ô‘÷´‰ŰĄ)%ˇl{çÎťĂĂÂb#Ţg§§š[µ§Ë·‰Čmg{y?ŕéíËP{ ÂÜŻzMˇ„çD - Aݶ¶¶Ďž=KJ¶ďÚŁŤ|’heß˝xő>ĐÍÍ 2ҵFlä=­AOúB-7¸ŤRSSc"ĂcCßčšČüž±‡ž„¨X±bEűöíea!QDh5ÎJµż{űm»¶5x«ńăYi)÷®řAĐÉâĹ‹Á«53!ďi z2ŇvI{öě ŰŰaaˇ‘ďJ‹ĆÖP¶FFÔ«ŁFUUUČ«Gw.ť(fäCý^¨˛.x _ŮC5BĆ@˘ö>ÄD†)*)šZ6n€g$ʤęń÷Ű1(ŕ…iddÔJ¨Pδ•C`w™A¬>ľľľIIIjZý†Ť·´sŐ@ŘČ{p4µ´¦O›ÖĄKYŇuAQ!Ŕ`06oŢ\XX8fĆ"Cłv˘Çi)ě›g}?Ä„yyyµr‡‹+ňž–.„Śö‹Čť;wüýý+++-lě{5Z[ĎěşB€$$ ~)úôé3nÜ8˛+…ň#€řŹŹßşőeUµ sżUVÁÔíâCşů‘!ł$•…Ś»P;H$ŽČ{š˝ ¶ČČČ8sćLxx¸ŤÖÉĹÝŮ}|ţÉ“YöúYđÓ»`&577ź4iD±‘Q”@$ŚŔÝ»wĎť;[]^>‹á›PÂłăt\ ęöćy_m-­µkׂ3†H`AŢ#esŕ=đ±OKK“WPěÔÝ˝[ŻJʢąí$€‹ĹŚ|ű*řŮÝbF!TÚ5jd4Çâę@§@dăÇŹCx—]'gOŻ©čă,ůeÍÉL»ě»KNŽúý÷ßC2}Q €ĽGTHĘć8°íőâĹ‹k×rssäĺě»őěęÚ_]K‡ČÚ‚i'"řEHŕ“’˘B(YEáŔ„„D^2”  &L&sÇŽ111®ĂÁěML!eUŞŇâ˘óG¶Á×8”•„D»"TyŹÁ”١ŕĂ/=·oßÎÎΓ‰U‡NνLŰىd«U„¨ÁË0ž×ĺ`íßż?0Q™FE('… dA|ś˙üsKVvV˙áśÜČ"6Ůĺ„Đ­ËÇvĂW:”Ś€úTTT ź<×Ě 3ťŠŕ/B2ý[Ž;ł««ëěŮłĹńeŽĽGŚë'ŰC———‡„„ű3l„˛ŕ÷cnÝÁÄÂÚŘÜJMS[ę——–¤%HKJHN(Lź ¨ĘŰ˝ú€\«âÇDD"Ű·oß^Ŧ óžmfŤÔG,wžŰʇpëS$ ň±,^›´´´¨!ˇˇE WwČ|h`b Î?#uMm(}*,,P…®¨ ?/'(NNĆ'H<żCö°ěíí!ŁÔĚĺÂŽŚíD¨¨¨={öT±ŮC&Ě ·°Ý±= ÷Ö9ߏq‘đĹy™éb«€Ľ‡˙BŕU!€ÜßÉÉÉđV?óňňľt¦RUŐ44´t ’Š*ÄĂ+(*——WPâ¶D;,f%ł˛˛¬´¸¬´|ř‹ óáĐ˙ÚA Ů vŕ€ÚK^|ź !tƦ"Đ–ŤŤÝąs«Š5düLËh/Ýe¬¬¬¸uŢ79>,=óćÍë×;ň鮵,Ď^TT”’’űâééé–““˙±ŞwÄš=€iiiéU†††ąBÇĎ7«6@@ŢëvíÚU^Q^»ąĘ€FRWRę_?sSśťťçĚ™#VŇĘ"ď‘úŠ·-`S âB‹‹‹Á'šĹb“W¸Ńˇ<*viá§8ÜŮÚÖ¨-"€(bÔ§ÁčŃop÷ľ"ޞŹČÄ•‘ź{íÔÁüś¬ľ}űNť:UL>=uőGŢCÜ»%CD &`Æ”†âŢŃɵßĐńXȢeË”‘úń湣%EŚ‘#GBVý– "l/ä=Â"†íD@ ®ÁÍůÇFćVC'ĚTVĹxRá÷Źo\€P•)S¦€±G¸Î­hŤĽ§ŕaWD@6Ś@eeĄźźóVň201oĂ`ˇ:dÁ}~çJhĐS55őůóçµo/ŃĽČ{„X*lŠ "€4@ŕŢ˝{çĎź§ĘÉő4˛sŹ>XÁ”˙RTwç҉ô”DsssW‡ űüŰ‹ü*ň‘CŠ""€´-˘ŁŁŹ9RPP`ŐľÓ€Q!UGŰŇ_`m!'á˙ÓP=şwďް˝±,wYCä="BDhł€»ŹŻŻoXXěy 1ŃܦC›…‚§âPjôĹ=˙ŕ—JJJ·e(x6“ŔIä=§@D} wëť;w®\ąÂd±ě»öp˙j $h•}µĐ>ş~ŽQgii9wî\:‰« ňq!‹ă""€´A>}útěرÄÄD0üô6®ťťcˇVeČIřâţµ¨÷AtmÄC‡•@†ţ€#ďáŹ^ED@„CjxjIDATâ•Ŕđăďď_¶ű öŇÔŃnň·ëWDđ‹Ŕ‡7ˇúTňńń111!‚ZČ{° ("€ €¬!Y Ďž=JŁŃşőň€mgŰ+-)áéíËPL’ďŹ3¦_ż~ÄIÁŹĽGÖ>i¨"€ q 9{ö\VV&”dvväčŇ›N—'Žx"—$;#őŐIq‘@túôéăĺ奇D>KkDŢÓô°/"€ "Đ °ŰőŕÁ[·nAµfpúöcß­'ŤFo¦Ů.çfĄżyz7>âěp988Ś72ôP ä=\ @D@Ö€ŞĚŕôs÷î]¨Ç¬˘¦ŢŵżŁs/ŮŘůĘHI ~~?16ŠNX[[Ź;VÂ)…şW÷6FD@Zޤůą˙ţŁGŹŠ‹‹ôttrspvÓŇŃoůŇëYĹb%D‡†˝~Ţ< p×rt$züňéÝ283"€ @›D ¬¬ěéÓ§`űÉËËŁP©¦–6Ž.îíÚ;eó«0/'âíK(,ZZ\~<]ştĆ–R,&ňR, ‰ " k@¸űű÷ďź1ŠJĘÖö]ě:9›XÚ'ú©.č%EŚřČw±ao3>%Á––†††»»;”R—|Ť­ÖÜ Č{ZöED@Z‹@VVÖłgĎsrr`,5 °ý@â3«öňňR¨`Ő@źüśLđÝ­OI ěŞ*ˇÍ,77·nÝşÁď­U^âý‘÷Hrś@Dh„|‚‚‚‚ˇÄ)\§Ńé&6¦ílŤ-¬ ŚÍĺ$H2Š…iÉ ŕ¸“UÇacgŮÖÖ¶gĎžÎÎΕ§‘ř¤9Ľ‡4K…‚""€´€}üřJśBÎCřţ­!돩…‰…®±®ˇ‰¶žśś(m-Ą%E9źŕ_vƧŚÔŹąŮ\¨â€u§SőAjşS{ç ďi "Ô@D€”@üW|||LLL\\\rr2¸qŐ€ & m= -um] -]H ¤¬˘ ©•”9?›ŞUYQ^ZR\VRełJŠ‹ ósůą…đ/7»¤Q şşşťťXwŕ§™™YSŁ‘P yIĹFDh[@â(zšR}Ż Â¦  Ë+Ôuľ©(/ăÚŤ·§ÓézzzP<ËÔÔ2 ÂOřłq3™9ĽGf–ADh[Wh @ůůů H ?Á,—X,V-ŠŠŠ@ jÄaÁO8  (Ž––1ĂÇÄ´–Č{Ä,‹ "€ „C@Žpˇ@"€ "€ä=âÁGED@â!€Ľ‡xk‚!"€ €xř?! ›]ý<IEND®B`‚ceilometer-10.0.0/doc/source/contributor/events.rst0000666000175100017510000002715713236733243022445 0ustar zuulzuul00000000000000.. Copyright 2013 Rackspace Hosting. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _events: ============================= Events and Event Processing ============================= Events vs. Samples ~~~~~~~~~~~~~~~~~~ In addition to Meters, and related Sample data, Ceilometer can also process Events. While a Sample represents a single numeric datapoint, driving a Meter that represents the changes in that value over time, an Event represents the state of an object in an OpenStack service (such as an Instance in Nova, or an Image in Glance) at a point in time when something of interest has occurred. This can include non-numeric data, such as an instance's flavor, or network address. In general, Events let you know when something has changed about an object in an OpenStack system, such as the resize of an instance, or creation of an image. While Samples can be relatively cheap (small), disposable (losing an individual sample datapoint won't matter much), and fast, Events are larger, more informative, and should be handled more consistently (you do not want to lose one). Event Structure ~~~~~~~~~~~~~~~ To facilitate downstream processing (billing and/or aggregation), a :doc:`minimum required data set and format ` has been defined for services, however events generally contain the following information: event_type A dotted string defining what event occurred, such as ``compute.instance.resize.start`` message_id A UUID for this event. generated A timestamp of when the event occurred on the source system. traits A flat mapping of key-value pairs. The event's Traits contain most of the details of the event. Traits are typed, and can be strings, ints, floats, or datetimes. raw (Optional) Mainly for auditing purpose, the full notification message can be stored (unindexed) for future evaluation. Events from Notifications ~~~~~~~~~~~~~~~~~~~~~~~~~ Events are primarily created via the notifications system in OpenStack. OpenStack systems, such as Nova, Glance, Neutron, etc. will emit notifications in a JSON format to the message queue when some notable action is taken by that system. Ceilometer will consume such notifications from the message queue, and process them. The general philosophy of notifications in OpenStack is to emit any and all data someone might need, and let the consumer filter out what they are not interested in. In order to make processing simpler and more efficient, the notifications are stored and processed within Ceilometer as Events. The notification payload, which can be an arbitrarily complex JSON data structure, is converted to a flat set of key-value pairs known as Traits. This conversion is specified by a config file, so that only the specific fields within the notification that are actually needed for processing the event will have to be stored as Traits. Note that the Event format is meant for efficient processing and querying, there are other means available for archiving notifications (i.e. for audit purposes, etc), possibly to different datastores. Converting Notifications to Events ---------------------------------- In order to make it easier to allow users to extract what they need, the conversion from Notifications to Events is driven by a configuration file (specified by the flag definitions_cfg_file_ in :file:`ceilometer.conf`). This includes descriptions of how to map fields in the notification body to Traits, and optional plugins for doing any programmatic translations (splitting a string, forcing case, etc.) The mapping of notifications to events is defined per event_type, which can be wildcarded. Traits are added to events if the corresponding fields in the notification exist and are non-null. (As a special case, an empty string is considered null for non-text traits. This is due to some openstack projects (mostly Nova) using empty string for null dates.) If the definitions file is not present, a warning will be logged, but an empty set of definitions will be assumed. By default, any notifications that do not have a corresponding event definition in the definitions file will be converted to events with a set of minimal, default traits. This can be changed by setting the flag drop_unmatched_notifications_ in the :file:`ceilometer.conf` file. If this is set to True, then any notifications that don't have events defined for them in the file will be dropped. This can be what you want, the notification system is quite chatty by design (notifications philosophy is "tell us everything, we'll ignore what we don't need"), so you may want to ignore the noisier ones if you don't use them. .. _definitions_cfg_file: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html .. _drop_unmatched_notifications: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html There is a set of default traits (all are TEXT type) that will be added to all events if the notification has the relevant data: * service: (All notifications should have this) notification's publisher * tenant_id * request_id * project_id * user_id These do not have to be specified in the event definition, they are automatically added, but their definitions can be overridden for a given ``event_type``. Definitions file format ----------------------- The event definitions file is in YAML format. It consists of a list of event definitions, which are mappings. Order is significant, the list of definitions is scanned in *reverse* order (last definition in the file to the first), to find a definition which matches the notification's event_type. That definition will be used to generate the Event. The reverse ordering is done because it is common to want to have a more general wildcarded definition (such as ``compute.instance.*``) with a set of traits common to all of those events, with a few more specific event definitions (like ``compute.instance.exists``) afterward that have all of the above traits, plus a few more. This lets you put the general definition first, followed by the specific ones, and use YAML mapping include syntax to avoid copying all of the trait definitions. Event Definitions ----------------- Each event definition is a mapping with two keys (both required): event_type This is a list (or a string, which will be taken as a 1 element list) of event_types this definition will handle. These can be wildcarded with unix shell glob syntax. An exclusion listing (starting with a '!') will exclude any types listed from matching. If ONLY exclusions are listed, the definition will match anything not matching the exclusions. traits This is a mapping, the keys are the trait names, and the values are trait definitions. Trait Definitions ----------------- Each trait definition is a mapping with the following keys: type (optional) The data type for this trait. (as a string). Valid options are: *text*, *int*, *float*, and *datetime*. defaults to *text* if not specified. fields A path specification for the field(s) in the notification you wish to extract for this trait. Specifications can be written to match multiple possible fields, the value for the trait will be derived from the matching fields that exist and have a non-null values in the notification. By default the value will be the first such field. (plugins can alter that, if they wish). This is normally a string, but, for convenience, it can be specified as a list of specifications, which will match the fields for all of them. (See `Field Path Specifications`_ for more info on this syntax.) plugin (optional) This is a mapping (For convenience, this value can also be specified as a string, which is interpreted as the name of a plugin to be loaded with no parameters) with the following keys: name (string) name of a plugin to load parameters (optional) Mapping of keyword arguments to pass to the plugin on initialization. (See documentation on each plugin to see what arguments it accepts.) Field Path Specifications ------------------------- The path specifications define which fields in the JSON notification body are extracted to provide the value for a given trait. The paths can be specified with a dot syntax (e.g. ``payload.host``). Square bracket syntax (e.g. ``payload[host]``) is also supported. In either case, if the key for the field you are looking for contains special characters, like '.', it will need to be quoted (with double or single quotes) like so: :: payload.image_meta.'org.openstack__1__architecture' The syntax used for the field specification is a variant of JSONPath, and is fairly flexible. (see: https://github.com/kennknowles/python-jsonpath-rw for more info) Example Definitions file ------------------------ :: --- - event_type: compute.instance.* traits: &instance_traits user_id: fields: payload.user_id instance_id: fields: payload.instance_id host: fields: publisher_id plugin: name: split parameters: segment: 1 max_split: 1 service_name: fields: publisher_id plugin: split instance_type_id: type: int fields: payload.instance_type_id os_architecture: fields: payload.image_meta.'org.openstack__1__architecture' launched_at: type: datetime fields: payload.launched_at deleted_at: type: datetime fields: payload.deleted_at - event_type: - compute.instance.exists - compute.instance.update traits: <<: *instance_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending Trait plugins ------------- Trait plugins can be used to do simple programmatic conversions on the value in a notification field, like splitting a string, lowercasing a value, converting a screwball date into ISO format, or the like. They are initialized with the parameters from the trait definition, if any, which can customize their behavior for a given trait. They are called with a list of all matching fields from the notification, so they can derive a value from multiple fields. The plugin will be called even if there are no fields found matching the field path(s), this lets a plugin set a default value, if needed. A plugin can also reject a value by returning *None*, which will cause the trait not to be added. If the plugin returns anything other than *None*, the trait's value will be set to whatever the plugin returned (coerced to the appropriate type for the trait). Building Notifications ~~~~~~~~~~~~~~~~~~~~~~ In general, the payload format OpenStack services emit could be described as the Wild West. The payloads are often arbitrary data dumps at the time of the event which is often susceptible to change. To make consumption easier, the Ceilometer team offers: CADF_, an open, cloud standard which helps model cloud events. .. _CADF: https://docs.openstack.org/pycadf/latest/ ceilometer-10.0.0/doc/source/contributor/plugins.rst0000666000175100017510000001311413236733243022606 0ustar zuulzuul00000000000000.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================= Writing Agent Plugins ======================= This documentation gives you some clues on how to write a new agent or plugin for Ceilometer if you wish to instrument a measurement which has not yet been covered by an existing plugin. Plugin Framework ================ Although we have described a list of the meters Ceilometer should collect, we cannot predict all of the ways deployers will want to measure the resources their customers use. This means that Ceilometer needs to be easy to extend and configure so it can be tuned for each installation. A plugin system based on `setuptools entry points`_ makes it easy to add new monitors in the agents. In particular, Ceilometer now uses Stevedore_, and you should put your entry point definitions in the :file:`entry_points.txt` file of your Ceilometer egg. .. _setuptools entry points: http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins .. _Stevedore: https://docs.openstack.org/stevedore/latest/ Installing a plugin automatically activates it the next time the ceilometer daemon starts. Rather than running and reporting errors or simply consuming cycles for no-ops, plugins may disable themselves at runtime based on configuration settings defined by other components (for example, the plugin for polling libvirt does not run if it sees that the system is configured using some other virtualization tool). Additionally, if no valid resources can be discovered the plugin will be disabled. Polling Agents ============== The polling agent is implemented in :file:`ceilometer/polling/manager.py`. As you will see in the manager, the agent loads all plugins defined in the ``ceilometer.poll.*`` and ``ceilometer.builder.poll.*`` namespaces, then periodically calls their :func:`get_samples` method. Currently we keep separate namespaces - ``ceilometer.poll.compute`` and ``ceilometer.poll.central`` for quick separation of what to poll depending on where is polling agent running. For example, this will load, among others, the :class:`ceilometer.compute.pollsters.cpu.CPUPollster` Pollster -------- All pollsters are subclasses of :class:`ceilometer.polling.plugin_base.PollsterBase` class. Pollsters must implement one method: ``get_samples(self, manager, cache, resources)``, which returns a sequence of ``Sample`` objects as defined in the :file:`ceilometer/sample.py` file. Compute plugins are defined as subclasses of the :class:`ceilometer.compute.pollsters.BaseComputePollster` class as defined in the :file:`ceilometer/compute/pollsters/__init__.py` file. For example, in the ``CPUPollster`` plugin, the ``get_samples`` method takes in a given list of resources representating instances on the local host, loops through them and retrieves the `cputime` details from resource. Similarly, other metrics are built by pulling the appropriate value from the given list of resources. Notifications ============= Notifications in OpenStack are consumed by the notification agent and passed through `pipelines` to be normalised and re-published to specified targets. The existing normalisation pipelines are defined in the namespace ``ceilometer.notification.pipeline``. Each normalisation pipeline are defined as subclass of :class:`ceilometer.pipeline.base.PipelineManager` which interprets and builds pipelines based on a given configuration file. Pipelines are required to define `Source` and `Sink` permutations to describe how to process notification. Additionally, it must set ``get_main_endpoints`` which provides endpoints to be added to the main queue listener in the notification agent. This main queue endpoint inherits :class:`ceilometer.pipeline.base.MainNotificationEndpoint` and is defines which notification priorites to listen, normalises the data, and redirects the data for pipeline processing or requeuing depending on `workload_partitioning` configuration. If a pipeline is configured to support `workload_partitioning`, data from the main queue endpoints are sharded and requeued in internal queues. The notification agent configures a second notification consumer to handle these internal queues and pushes data to endpoints defined by ``get_interim_endpoints`` in the pipeline manager. These interim endpoints define how to handle the sharded, normalised data models for pipeline processing Both main queue and interim queue notification endpoints should implement: ``event_types`` A sequence of strings defining the event types the endpoint should handle ``process_notifications(self, priority, message)`` Receives an event message from the list provided to ``event_types`` and returns a sequence of objects. Using the SampleEndpoint, it should yield ``Sample`` objects as defined in the :file:`ceilometer/sample.py` file. Two pipeline configurations exist and can be found under ``ceilometer.pipeline.*``. The `sample` pipeline loads in multiple endpoints defined in ``ceilometer.sample.endpoint`` namespace. Each of the endpoints normalises a given notification into different samples. ceilometer-10.0.0/doc/source/contributor/measurements.rst0000666000175100017510000000772613236733243023651 0ustar zuulzuul00000000000000.. Copyright 2012 New Dream Network (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _measurements: ============== Measurements ============== Existing meters =============== For the list of existing meters see the tables under the `Measurements page`_ of Ceilometer in the Administrator Guide. .. _Measurements page: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html New measurements ================ Ceilometer is designed to collect measurements from OpenStack services and from other external components. If you would like to add new meters to the currently existing ones, you need to follow the guidelines given in this section. .. _meter_types: Types ----- Three type of meters are defined in Ceilometer: .. index:: double: meter; cumulative double: meter; gauge double: meter; delta ========== =================================================================== Type Definition ========== =================================================================== Cumulative Increasing over time (instance hours) Gauge Discrete items (floating IPs, image uploads) and fluctuating values (disk I/O) Delta Changing over time (bandwidth) ========== =================================================================== When you're about to add a new meter choose one type from the above list, which is applicable. Units ----- 1. Whenever a volume is to be measured, SI approved units and their approved symbols or abbreviations should be used. Information units should be expressed in bits ('b') or bytes ('B'). 2. For a given meter, the units should NEVER, EVER be changed. 3. When the measurement does not represent a volume, the unit description should always describe WHAT is measured (ie: apples, disk, routers, floating IPs, etc.). 4. When creating a new meter, if another meter exists measuring something similar, the same units and precision should be used. 5. Meters and samples should always document their units in Ceilometer (API and Documentation) and new sampling code should not be merged without the appropriate documentation. ============ ======== ============== ======================= Dimension Unit Abbreviations Note ============ ======== ============== ======================= None N/A Dimension-less variable Volume byte B Time seconds s ============ ======== ============== ======================= Naming convention ----------------- If you plan on adding meters, please follow the convention below: 1. Always use '.' as separator and go from least to most discriminant word. For example, do not use ephemeral_disk_size but disk.ephemeral.size 2. When a part of the name is a variable, it should always be at the end and start with a ':'. For example, do not use .image but image:, where type is your variable name. 3. If you have any hesitation, come and ask in #openstack-telemetry Meter definitions ----------------- Meters definitions by default, are stored in separate configuration file, called :file:`ceilometer/data/meters.d/meters.yaml`. This is essentially a replacement for prior approach of writing notification handlers to consume specific topics. A detailed description of how to use meter definition is illustrated in the `admin_guide`_. .. _admin_guide: https://docs.openstack.org/ceilometer/latest/admin/telemetry-data-collection.html#meter-definitions ceilometer-10.0.0/doc/source/contributor/ceilo-gnocchi-arch.png0000666000175100017510000034212313236733243024524 0ustar zuulzuul00000000000000‰PNG  IHDRdŻKrÍübKGD˙˙˙ ˝§“ pHYs  šśtIMEß  &în„i IDATxÚěÝTSg˘/ü/¶‚%€¶¸ľ@e’VJćBµŐA8k¤wJśŃU{PťsđV<÷¬Z;Ö÷p‰ëŽuťĄí]«Đ÷Ř™ńÇ{çT}ĄóÚ#JG—ÜŐ0ëÎ )Ő¶ śF:ˇXBŰ5$Thßćý#<Ź;ÉNż!|?kąJ“ě';ĎŢĎŢ;ß<űy˘<ŹDDDDDDDDDD4éć± ¦Y"""""""""˘)Â@–hŠ0%"""""""""š" d‰¦ČýÁž8wîl6[Řmٲiii!_Ă2Y&Ëd™,“e˛L–É2Y&Ëd™,“e˛L–É2Y&Ëd™3ĄĚĹ‹ăé§źĆT Čvuuˇłł3ě‚ěv;4MČ×tvv˘««‹e˛L–É2Y&Ëd™,“e˛L–É2Y&Ëd™,“e˛L–É2§­Ě/ľřííířâ‹/°lٲĂۉ4u»ÝŁ*¨˝˝7oŢ ůšţţ~–É2Y&Ëd™,“e˛L–É2Y&Ëd™,“e˛L–É2Y&ËśÖ2Ń××ŔTŠňx<µ'®\ą"W*‹-ÂüůóCľćÎť;b™,“e˛L–É2Y&Ëd™,“e˛L–É2Y&Ëd™,“e˛Ěi+SČ–––"##S%h űţűďăöíŰ """""""""Š$ÓČ YĐÝÝŤ·ß~N§±±±xüńÇą…&@@ ;00€ŽŽ@bb"kh‚ĚcM ˛DDDDDDDDDDS„,ŃąźU@DDDł‘Őj…ŰídggĎéşhmm•Ďőş—ËĺBgg'@ŁŃ@Ż×łRhJ0%""˘Yٵµµµµ0›ÍĎĹĹš  ………HNNŽřş¨ŻŻGmm-¬VkŔsz˝(((@\\\Čr¬VëŚ #×­[(..ƶmŰ&Ľ|—Ë…wŢy&“ v»=ŕůěělŤFĚşýáěŮł¨®®ÔŐŐń`ADDD41%""˘ĎĺráđáĂ>=AŐ^S[[‹ÚÚZl۶ ĹĹĹ[ű÷ďW b«Ő «ŐŠÚÚZĽüňËŞ«ÝnGUU<Ž9ńűŐjĹţýűárą‚ľ¦µµ­­­0™Lxůĺ—G ł‰Ć‚,Íhv»‡–dRR ˇ×둝ť »Ý»Ý“É„úúzŢŢ‚V«ڏúP†±YYY(,,D\\4 Ün7\.Ş««ŃŮŮ »ÝŽýű÷ăÍ7ß +++ŃÖÖ†¬¬¬9±)ĂŘ‚‚äććúÔ›ÝnÇŮłgŃŰŰ‹ÖÖV>|l€DDDDęńÇGtt4 ŇŇҦô˝ČŃŚ¦ c PVVćó|rr2’““‘ťťŤm۶áСCčěě„ŮlƉ'°sçΩ “ɲ.„ĽĽA«N§CnnnĐ÷S~ΑdggË^˛ţë000ˇµN§ (×n·Ł­­-`¬¬¬¬°zŹÚívÍfźúÍĘĘÓDbbÝ{=ŁĂ!ęMŁŃŚřÚääd$%%ˇ··7äë\.ÚÚÚ|¶]rr2VŻ^tŰř×s}}˝ü<a6—Ë%ßWĽVY7ˇ¶‹˙ţ6Ň:Źw9«ŐŠ÷ßLűQ¤ dÓŇŇPZZ ‹Ĺ‚ˇˇ!ÖM‹¦¦&$†ęŐ¨&;;:ťN]ŕrądpd2™P]]-Ç_}ýő×&zŞ®®Fqq1 UË5ÉŘŮłg‘ťť­:)”ň˝wî܉×_]ur.˝^Ź—_~9 ”ËÎΖ˝=›šš——˛ĘĘĘ‚l±‚Őj•áuEE… Ę\.Nž< “É´üäää “†Ť´ĽŃhÄóĎ?öÄYĘIąt:ݨĆwÍĘĘB[[>ýôSŘíöÜÓ§OʏožLDDD‰îű§ú§R>0ţ|,^ĽwîÜaíŃ´ihháăŽ;Âî )ÜşuK.˙ŘcŹÉú[[[ŃÖÖ†¨¨(466Âĺrˇ  xě±Ç`·ŰqëÖ-|řá‡p»ÝXąrĄOą.— »wď–˝u:ÖŻ_ŹěělÄĹĹÁfłÁn·ăŁŹ>š5k-—ďŹúúztwwC§ÓáÉ'ź„N§Ëĺ‚Űí–ëľ~ýzź÷Öh4¸rĺ  ±±n·iiia‡š€·—©xźŻżţŤŹ=ö’““‘››‹‡z€w¬Ők×®đšFŁŮŮŮĐëőřË_ţ·Ű ·ŰŤ?ü0 ¸vą\Řżż\^YGĐŰŰ‹ÎÎN|ôŃG>źQŠŮŮŮ>=(ŐÂŘŃ|f—Ë…?üCCC¨ŻŻGtt4|đÁQ•!L&TTTŔívCŁŃ ??ąąąr‚ą[·nÁl6Ëϡ$>źŰíFCCĎsůůůxňÉ'ńá‡âÖ­[>ŰÂß믿Ž[·nA§ÓaË–->ű€€±dM&>°Îz˝ÝÝÝp»Ýhll„N§ó™ĐBůYď»ůůů>źµµµ˝˝˝˝ş_~ůeĆ*—ˇłxOŁŃ8¦í@DDD4,X€ÔÔÔ)_Y@DDD3’˛gťN§őňŮŮŮ2ëěě č‰'züůOňTXX('vŞ­­EaaˇO|řđaąěóĎ?ŹŤ7¬÷ˇC‡`µZQUUĺ3&© zĹú/ďrąPUUłŮ «ŐĐ VŻ×ُ¸X~®ÚÚZÔÖÖĘIͲłł‘••2Ľ6Ť0ŤŘ·oÚÚÚ ÓépäČź×('S›kçÎťrŇ0»Ý°žŐŐŐ!—żxń"Nž< «Ő “ɲôxĂXظq#ÚÚÚdoé'NŕĉĐëőňöů¬¬¬˵Űí˛gŞN§ĂÁ}ęş°°Pnżęęę€`Y0›Í>Ë‹şJNNĆÉ“'xÇýUëyl·Űĺëőŕö˝(S­ţŠ‹‹ńÓźţ˝˝˝xýő×ĺvô_®´´Ôg}¶mŰ&÷“ɄիWËe­V«lżjmD´1·ŰŤłgφ= Q¤ŕ¤^DDD4ăMVşâââ€00..rĽŃłgĎĘç”ASaaa@Đx`”™ÍfŐ[˝-çsű·rĚNa۶mxůĺ—}ĆCµŰí0™L¨¬¬DII JJJpâĉ ď=q+}RRRаT*×ÓĺrÉɰ T—߸qŁ ŮĹ8Áj&"Ś<pk˝ŐjEmm-:„˙řÇŘ˝{7jkk†°Äľ ŃhÂX±ýJKK}&• µ>by˝^˝^ʏ¸8ŮÓTô˛ ¶m4Mᆕ”źçŕÁő''są\hjj XÎ?Ś”Ç˝őÖ[ňqĺŹ)jŰ_Ż×Ăh4"++kL?¶Ívě!KDDDO9é–R°†"«ŻŻ÷™H’…ęťh4e/Ö¦¦&Őŕ6XÎdWyyyČÎΆŮl†ŮlFkk«Ďg´Űí¨­­E}}}Đ1>C9räÚ `ë9R'0Xoމ c…m۶ˇ°°&“IöőO«ŐŠęęjŐ1UĹľ n˝¶ďäĺ塶¶6h¨ŞÓé‚._PP üÖÖÖ€¶őőőr˙ §>Ä0ˇzNçĺ塴´Tö´Vîë#MŔ&öu«Ő*ÇjVľţäÉ“Şc‹íODDD41%""˘§ěM*™îá•Ëĺ’A“2lTÎjJ[[[Đž´ă'‡îőŢmkk“­ËĺÂŮłga·ŰQZZ:Şň“““<«Ő*Ç 6Y“˛·l¨ĎęąÎÎNĽóÎ;2.,,ś°^ŇqqqظqŁÜ&­­­ňź/Ĺ„TĘáDŕ(Ęöůý©…ŞjűŁ——‡¤¤$ôööÂd2ŚĄ+öąp'şĂŚ´żů—'Ţg¤ĎŞÜ.ťťťrŤF·Ű “É$?Gnnî/ŃTůă˙üáŔ–-[|ĆŇźl d‰hFJNNö ČFČ)ĂRµ,T(řŽ[+‚&Ąňňň°Ö#T/Ó‰$n{߸qŁ6@ôŇ5™L(((u\__ʦ¦&tvv†=üA°ŢČŁáßłôäÉ“a÷-ĺ8Żv»gĎž•˝P•ď«ü\"dĎ{†"zŘ*{g÷†@HJJw Š2€˝°G»ŻWTTŕСCčíí•eŠrĹrţă3MĄ;wî ŻŻ_~ů%¦ô˝ČŃŚ¤ D[[[nI¸=5ÇBŁŃ„=öĺDö´Z­p»Ý#~ž¸¸8l۶ ŮŮŮ286›Ía×ËĺÂţýűeďJ˙Ď-z;îŢ˝[uىÚţĹĹĹ8|ř°śěLm‚´p´¶¶BŁŃڏ-’““QVV†äädTWWĂĺr©î{:ťnÄ@_YgŁUXX(ÇpUNz&Úp{ÇNĶHJJ ;4U†Öz˝§Oź†ÉdBkk+Ěfł|^ ©Q[[«:éQ¤c KDDD3R^^žĎŚóŁ dE/Ǥ¤¤1…˘Ę@×?|Őét8räČ”ÖÇĹ‹e}TTT„®*o÷WC9|ř°|}AArss‘ťťVŐ‰čń¨3¶  őőő0›Íhjjő~PYY)÷…şşş°–ÉÍÍ•˝‹;;;Ţł°°pRCÄäädčt:tvvÂl6Ăh4˘©©I¬áľ÷X{+·ˇŃhĶmŰĆüYü‡Ő=nEűŞŞŞ 9&/Q$šç˙@ww7*++QWW‡?ţńʬ!"""šÉÉÉČĘĘ9qU¸Îž=+Ă«`ާź~˛ q‹ľFŁ‘Á–ŤÚÚÚFě}ŘÚÚ:ˇĂ(CĺŃÔ…č•nŕ%&“ĽÁdYYňňň½`C({„† M&ĘËËQ^^POĘá vîÜ)Ë|ýő×G]§ĘĎ=šzó˙<ţCXŚT‡Ł ŔŐIăĚf3\.— •CMÎĄ&)))¬uŢ˝{7ĘËËqńâE$''ËĎ=Ňr.—KőłŞŐ^ŻÇ¶mŰđË_ţĎ?˙ü¸¶ ŃlČ  ŁŁ_~ů%îÜąĂ"""˘iSVV&ˇŞŞŞ°B®¦¦&Ů»QŁŃ ¸¸Xőu.— MMMAź·‡çććĘÇ•˝RCŤ!ÚÚÚŠňňrüřÇ?ĆŮłg'¤.˛łłe¸öÎ;ď„UĘu w¸eĐjX†`ź_€‡{TŚ)jµZCö䌋‹“’‰ˇ FCŮ›ôäÉ“aş"üTÖ[\\śü ľľ>d9UUUŘ˝{7ŠŠŠĆĘçććĘ}ßd2ÉşmĎ\±ţ"Ř ¶ÍŤpjË…?řĉŘ˝{7Ö­['_·oß>”””ŕđáĂA—ăÄ^DDD4—ÍcŃL•śś,U»ÝŽýű÷ű„eJ.— ŐŐŐ>!PiiiȰďäÉ“ŞaSUU• ŻDOEŔ†‰Pô­·ŢRíŮçrąäĐţËŹ—(KŚń*&yRS[[+ĂKŤFă,+ůO` ě},łZ­xçťw‚n3\ Ž[[[ĺv §~ňňňäú‹ˇ FłČőŢ˝{wĐ™.— 'Nśőš••ĺ*ë_ŚmëOŚ™ `\‘ĹĹĹÉĎ,ę:Ôv Fŕ ł•Ź‹@]ą]B}V±sssĺľ#ŢÓn·ăâĹ‹Şď©ÜáŽÇLDDD)8†,Íh7nD\\ś I+++qöěYźq'­VkŔ0ĄĄĄ#Ž7j·Ű±{÷n"++ ˝˝˝¨­­•!bqqq@Oľb÷îÝpą\(//‡Ńh”˝W;;;Q[[+Ě‘á±ÔEgg§ěˇyâÄ ś8qBÖ…FŁAgg':;;e]h49«Rrr2ÚÚÚdH©ŃhđüóĎCŻ×ËńKëëë%MŔŰ;TôŽăÓú·eeeřéO*c1­ŰíFkk« <“’’¬KKKQRR·ŰŤ×_=ě1mď°˘^ěv;ĘËËťNťN'?2$LJJ D,//OŽiŰÚÚŠÝ»wĂh4"++ n·fłYÖŤN§ĂÎť;ǵ˝ŤF#ęëëeýŽ%ŕÍÎΖ“„™Ífąż‹ýµľľŢgmJąśŐjĹŽ;PXX(ĂSĺgő››‹¤¤$ôööâäÉ“řôÓOeéííEkk«\Ö?ô&"""š ČŃŚg4‘śśŚĘĘJôööÂn·˝e>77W5HU#'µaŠ‹‹U'3ŇëőřĹ/~C‡ˇ··&“Iu]&köř˛˛2čt:TWWËńaőřÔét(--U­ ö÷Ćz5›ÍĐëő(--Eyy9Ün·ęçaeuu5Ěf3ÚÚÚ|žONNFEE…¬ŁÚÚڀ޼bÝ ăââP\\,‡¨ŞŞ LC-[QQÚÚZ9ś…Ëĺ’Ă&¨íC;wîT]·˛˛2ÄĹĹÉŕ]mßÉĘĘ’Że)ö×±Á°W+++ĂÚßĹř˝ŐŐŐpą\ŞźUě Ę},..±ŤäććĘá(ć’(ŹÇăQ>pýúuyŰRbb"Ö¬YĂZ"""˘Ł©©IöěÄřžyyy#NxtöěYĘŐŐŐÉŢz˘˘^ŻGaaaX'‰ŰÓ•=Dsss®‡2 5s˝ľ˛łłŽý*Eµ1>őz=rssG7VŰ­­­HNN–˝}EůʱKoĐšťť-f«Őę3¶©Úge»âV|µ°:śĎ}ńâEDŽ:ô=aŐĆTuNݬ;e˝ëtş A|8źOmk…ÚgÂŮ·‚­óHű»Úrb_©×®Z w˙$"""šLŤŤŤčëëŕíL‘‘‘1eďÍ@–ć˙@–ćžé d9©Ńá˛DDDDDDDDD4§<ţř㎎†Á`@ZZÚ”ľ7Y"""""""""šS.\E‹MéP‡, """""""""š"ě!KDDDsŠŃhäěîDDDDD4m˘<ŹGůŔŔŔş»»a±X044„… ˛–(˘,Z´«WŻžň÷ č!‹ŚŚ Üşu ·oßV]čâĹ‹Łz“Ť7Žř–É2Y&Ëd™,“e˛L–É2Y&Ëd™,“e˛L–É2Y&Ëś eN¦ C¤ĄĄańâĹň&Ź<ňČ„Ż8Ëd™,“e˛L–É2Y&Ëd™,“e˛L–É2Y&Ëd™,“e޵ĚxÓ!h ›’’ Ě2Y&Ëd™,“e˛L–É2Y&Ëd™,“e˛L–É2Y&Ëd™(` Y"źîîn;v Ď<ó ňňňX!DDDDDDDD$ÝĎ* š8ÝÝÝ8r䆆†púôi`(KDDDDDDDDŇ>999 e#‡,&$ŚĽˇEžÉc_@DDDDDD4W„Ć@?š››ńÍ7ß°â"Łv?ßYűŕ¨^?xë|~µźˇ&+ŚD([XXÓ§O{ĘEŃ„±B?ęęępýúuĽřâ‹"3°‡¬ź¸˙=Ş12ÓŽT“Ć ě)KDDDDDD™úúúđᇎ*ŚĐŐŐʼnľ"Y""Śuą\hnnFbb"–,Yâóo<üËKLLDss3†††ĘEĎ?˙k×®u .Ä÷ż˙}ôöö2”Ť0ěŢI¤âWżú†††Ťţđ‡ŞŻyď˝÷ĐŃŃ1겗,Y‚ 6„|ÍéÓ§9tŃ,ÖÖÖ†žžĚź?ĚePöß˙ýßńÚkŻaďŢ˝ľ 0%Rńâ‹/†üĺ©ŞŞ ńńńăzŹ-[¶ 55Uő9\‰f§ëׯchh===Rž2”ýÓźţ„'žx‚•<Ë1%R‘––6éď‘ššŠŚŚ V6Q„xýđ‡?śĐÎV .ÄřC|őŐWřć›op˙ýŚôf3Ž!KDDDDDDDD4N"Ś}â‰'&ĺÎ×ůó磿żÍÍÍřć›oXáłY""""""""˘qP†±c™Ŕk4D(ŰŐŐʼnľf)öo&""""""""ŁK—.ů„±>Aéüůó±pá {ż›7oâćÍ›řÍo~¤¤$Nô5 1%""""""""ŁśśĚź?Ź<ňŕřššš|^łqăĆ1—/Ę.^Ľ(˙îééÁĄK—°uëVnY„,‘Š·ß~ÝÝÝ“ţÁ~ÁJLLDII 7Ń —””„ýčGň˙|đA¬^˝`6›a6›Ç\vcc#ţă?ţ{÷•––úĽf*&&§‰Ĺ@–HĹőë×ałŮ044„›7oŞľćóĎ?SŮCCCřüóĎU—ŹŹŹG||<–,YÂŤ@DDDDDD4 %&&"11Đ×ׇÎÎÎ -?##•<Ë1%RQVV†#GŽŕ‹/ľ@GG:::&¬ěľľ>\ľ|Yő€ý×ý×X˛d öíŰÇŤ@DDDDDD4ËĺĺĺáľűîĂ­[·X$ÍcŠŤŤĹľ}ű°dÉ<őÔSX¶l٤ľźcÓŇҰoß>ĆMDDDDDDDˇČ1Uˇ,ĂX"""""""˘ą,Q“Ę2Ś%"""""""š[ČŤ`˛BY†±DDDDDDD‘­©© W®\óňkÖ¬ÁŢ˝{Y‘†,Q&:”eKDDDDDDůúúúđĺ—_˛"ČY˘0MT(Ë0–hîb K4 ă eĆÍm d‰Fi¬ˇ,ĂX"""""""Ť7n ©©‰aČŤÁhCY†±DDDDDDD4Z d#Y˘1 7”eKDDDDDD47ĺĺĺaÝşu¬ňÁ@–hF eĆÍ]‰‰‰X˛d +‚|0%§`ˇ,ĂX""""""""ňÇ@–hř‡˛O>ů$ĂX"""""""" Ŕ@–h‚(CŮ'ź|’a,Ń××ׇ/żürĚË/]şyyy¬Čs?« ň Ŕfł…ýúxiii,sŚenÚ´ }ôžxâ Řl6Öç4”Ini¨A{ĂŔćŐňqG—Ťg^¬yîgЦ¸“Ňś9FÔ*–wU„`*ŰŇdµ§Ă°.ÁÚ¶r˝ĎĽŠA·S>–S´ngČ妋ÚgTn“ĚüM0äof°őáľ;·gÔz9ş,h©; ë5“Ď~ ÚtwYł 1š‰‹w´çŇé&ęg2ę‚f{ ÷uqş©†čWLŮ~;Ńű,Ď!DDDcÇ@–f%ÇŤvô ÷މÍŇŚ–+o"ŐgĘŽMčŨXŹßăNGŹě]äs1<Đ/č熤9EŮ&LÇĘĂ X”mi˘ŰŚř$ĹŐE{ÂjŰ`˝j‚éXyŔăÚĄ™ř¸îTĐĺ¦Ëű珢Ąî^řőGA·I {AÎYn'LÇËa˝j ŮV]´\9…gĘ~5á?ŚŚö\:ÝÇ1Ó±rwU°÷0ŤëU\ź&hS±ćąźAżĘ8)ëătŘ`:V®z®›¨ó:Ď!DDDŁÇ@–f˝ś—6KłĽ(¶Yšqąr׌ęeC4WŮ,ÍřřĘ›X±~Ç´­Ă[űźÓIKCŤü[ĘÄÄ&ŕăşS3Şžß?ÍçŹr‡#UŽ. .Wľ§Ăצ`uŃ$hS‘ MÓŃA·SţĐŕtŘPsx6ż|vNŢáał4űôô' %^›˘ÚkT uăč˛`h N‡ —+wÁ¸«bRz™ľůŇSc:×Ńäb KłŢHżö;6\zmnŢh‡ÍŇ KCÍ´ÝV•jČÁ˙ů˙ţ™Ť@sÍĐŻ2ÎȱW‹Büp#zę¦rŽ%EłěŹć6Óńý2ŚÍĚß„µ»ŽřĆ;(ľľ>tttŕ3·YD=diNPö65ĄÍŇě3^]‚6ú•6îě Ű ÇđDÚĄ™>ĺŠ÷Ź;6XŻŐËÉCÂ]˙ĺ´Ă“šŢŰSú#jFřÁŹ~ß_żíďÎGYYwöY@›n@Ll<:ŻŐOŘĐŽ. lířL¶Ł_Y şź‹v ô+Ć©Ç ńšŘxyk¶xÍĐđ˛CŠ1ˇE›R[NŤőZ˝ ĂÄűŽÔ+jĐí„­ýźĺ‚-+Ž5ýŽž€cڞý«=6Ň1%F“€ÔĚďý|Ę㜲>­×ę}ö)›Č†ÇU-ĂĂkDÇĆ#˙ąđ& Ëß~ťĂŰŃfiz·‰˙öööúűޤťwüŰ˙hßOŮíß6KłÜŻřü­Övťl–ddĐŻ,»Ý şť°4^Ŕ Ű‰m* ů›F<ć…j—Sĺ[W/ľîřÎuFăQÝRlÝş•Ťm+Öď€őZ=z†ŻA]– ŰŃ»‹ăijć÷®Ă9׍tŽí~ĺľµĎOÔ{Źöĺ˙ľÖkő˛ť†şvé;C°íŔöFDDÁ0Ą9ÇöhŔŰk¶ńĚ«>_ś„FM–Ż+™‰7Úĺřs›Vű\+·^«GË•7U×ĹřBEЉÎĽŞş\‚6Ď”ý g^EŹĄ9E{&|b‡iűňw»}·>îÚłĘÚ]Gđ//ĺch \CÉJÔ&ţi<ý ô«Ś0ľPáóI´ÁŇP#{ű‰[řĹkR 9r˙±#]ůhSjË…s¬in§j» şťh<ójЉ͊6.ľ€*Ź5ţÇeűW{,Ü÷M5äŔ¸«"`»)ßű…_tÂ(˙uć1bęXŻšäąĐż9ě!A›ŠÍ«~PT¶‰Ć3ŻťkůúX˝ůĄ ,Bµń~ůŠŢ‡ţ, 5h<óŞęuAŞ!kžű™Ü?ýŰSăéWäßʡ?Bµ›ćóGĘUk7;ŢxoíÖg˝]äo?0l> ЉÄÂ9שëE;˝uĹÝ ˇ&ĆăKGÇĆ#ŐmŞ\7q®PŰ'ýŰ‹XÖŃeÍҬxďŔq«•˦(îQžŁÔÚ‚Z;UľoçµzŐk±-.Wî zčGóůŁt;}†K#"˘ąi€4ŹŃ©©HKK xžl„;wî>éĽÁŰCżěiĚ‹Kš“őĐxćUŐ‹^Ń»F\„nŘ{ĚçbŐŰăí´7\€Ąˇ‹—>6éłÂ;ş,Đ­,ŔÚ]G|.?ľň¦üďź?ę3Ţź˛×Cfţ&ä?wŔ·G b2˘™rŚĐŻ2B·˛`ĚCŽď—ű´˙ěÔ«‹öČTĚ`-ľHŠ/I˙ü“Gx{†Ó›Eôx=¨=–fhÓ aOâĺtŘdű]Ľ4E«}Ú¨cťçLdi¨‘_őb}÷Ř>tßri˝j‚~•Q®ŰűçŹĘ1 G3áŘĺĘ]ň}×<÷3źí˛şh¬WMx÷Ř>o~|żjŕ%Žgjëýî±}ho¸0|«©iÚ&Zś•••čsŤA÷7yâo¦eü‡Ë/p@fĂŢă>ĺćo? ĎCŽ. Ţ?tÜa…h˙ѱńX»ëĎą}uŃyÎď§Üm–fĆŞť3]ŮCµáô+ذ÷8ŠVűcůŰÔ]Ă™W‚¶ńľ—^{A¶ó`?FŘ,Ír˝ŔzÍŰ®•Çżýůo]qĚt;ai¨™ôë¶·‰µxi&nú 5Ŕ燼ż{Ł!`źQî—-u§ĺń4śsÝÇĂmRí<*ÚöĄĘ]2ěw:lA{şŞ]G‹v8čv˘áĚ+×®ˇÚ‹˙ůÍfiö ]Ųϔ h˙ˇÚ‚rülµó“h§ţ×âó;Ź>çÄůCĹŢęNauŃţ(BD4Ç%řŻQQHÚ˛<öXŔóĽ·!ÂuwwŁ»ËŠoowĂóőÝüŚb 'µם›/=%{Ä+z‹2a­ĘíT˘_üpʞćš7&ýóÄkS°aďń€‹¸ëwČőpú]¬7źC^ úąâ˘\7Eă5Źٱv×9ÉWsͪƨń*Ő=Cţfä ŃňëmŞ}|ĺŢ"öŢö©M7Č/ĎN‡M~6q«Ľ6E54“ÂČ IŃóq¬DĎ$Xľ®D5ÔŃŻ2Ę÷utY‚ŢnšbČQ]o奎®ö9Őć:::Đ×ó)ľ˝Ý=mëj,ő±°4ÔČý{í®#Ş!Żň<ÔRw*ě¶®FƬ.ÚŁÚËwĹúČwŐ˙IŞÄkSÂXŃ—Ż+t*Ʀ Ĺé°ˇ˝áoď`µv“jČńiçÁÚŤň\ŁIÇ7ĺą_­w­!3t+ doxší-\b?T¶ G—EţţöŞ_Ş!G¶-˙1ĆG"öŮCŽęy4F“€ĂmAíúSđţs,ŕ:ZŮĹŹpBKÝiŮŐÚKŚ&Áçqĺ9|¤^ţ†üÍH1äx{¶+Žwn§ü1F·˛@őü”Ş8oů_;1´ő«ŚŰÂűťˇ‹—fż/;AQhě!Kłž˙¸nÁDÇĆcCŮ1źÇÄ…•n„IV푿´+ˇź ú•ÁoMЦ˘ßŃăÓ{Âé°É ÜëKB~†NĹ$+D3ĺ čX†.P~AR~Y e֕Ȣ֫¦Im»ˇ(Ź5Á†GĐŻ4bđ9'´éůš {ŹŹř{˘ÇŠTNĆę˛býĽţ(†úa˝j Šo şÝŁcă14Đ?!!2M/±ĎÄkSB <YŻšĆÜÓzÍäłcČߌö† ňÜ­_eÄ Űy/TYi ÚmĹúhÓ3ĂźL1Nr¨÷†üÍxx$ëµú ?x¨¶›áŻoĎFµc߆˝ÇąCFmş;ŢxNGŹ‚ ŘëĆrŤ·ăŤ÷`ł4‡Ę&ś^žˇĆ_]´GżĘv˙·?˙-l–fźýÚ_°s¶r˙đ‚ę8îa;V¬ß!ďjQ^;^Ě-u§T',Ó¦P\q‰;î8utt »»}}}čîžşS±xńb,[¶ ©©©ŤŤĺĆ ˘IĹ@–"žč»b]IŔ-‰˘×ËH!ŤňůÉdC]řj—f˘gř–1ĺú(źźŞĐ†h˘Śeč§b\ÔPm&F“ ż@Mgđ'Úl¨včß(TŰőţÓ#Çč›H¶QŚ­«M7 ÇŇŚžöTźµĽX–fżž3·Ű—m–ć1˛7oüI–Şç»˛gśăF;ô«Ś>ÇPë› M…!?uÔí&śómŞ!í ‚î˙ÁÖKżĘ(i<ýŠś0I·ŇTĂ÷Ć41"Í| ÚTŐmëč˛ŔyósŘ,ÍčT™81\jűŰ Ű Çđ8¬á§GjKÁÚ}¨÷vÜhúŢ⇠1f«7 5Ź%Ľ-¨ŤĎj˝ť[@o\1fď[űź…6Ý€Cô+ ¦íßHŇ×ׇS§NˇŁŁcÚ×%11Ű·oGFF7 M˛4ë…ęMl&h˙/j#}yš)_rÔ>ËH·0*‰`Šh¦Y»ëţĺĄ|ď—«š7Bö¶0ŞýX´›ţ ·ZN¶P“ †Ëz­ťWMŢ/©ŁĽ%uD»ŽŽŤ‡~Ąú—ľ©’D„$b|Ü`_ĘĹşŠ[7ĺg ŇŘŃeAsÍa­C¸_ ů›e`ŐxćUŐ/ŕĘĎ?­a7ŹăogŽ. jo ¶Ř,Í>Ďëüf'Üă9ŢžˇăßTŮţMÇĘU×[Ůţăµ)>áĚŠu%ň5_ySu˙V.+>«ň˙÷Ô¦dŹ×–şSÁŰÍńýŠăBÉč¶Ůđ1¬'DPܢż¦a(‘yńI^ńlÝńSlÝş• -Ř—q·Ökő¨9T,,צôRUnĂ`ÇpÓ±ňÇwŃVü_ă?4‚ëU“Ď>l2>›ĄŮçPeąbůCŽl÷ľĂm?żµůqC´EG—%h[P®Ź¨Ge;m>TuY1ä‡˙ńM»4SÖe°óî Ű)ďÜ™Š;ę"ˇ˝)ŠGϬą.IĽ•^Ľ4u>7xü=€óţ§Ç˙ŕ—¸´^Ĺä¬ÄʦxĆőoBĎŠ·ŔĄá:¨Â˝ú’ÇßĂđÚąűĚöáíó‹ýWś;w.ŕyY@sţ ifţ&´7\€ĄˇŽíXľ® Ú ôŁóŞÉçB9˙ą3ňsň7Ăfi–źĂfi†6=1± rVöčŘř°Ć„śmî{đa,NY„Ś ŽëI”C¨QNćtŘđÖţg±|]‰ü‚ił4ŁĄî” †6ě=đcŠ¸ÍşĄîúoz{ë_¨”]R 9r|\KC ś–ŻßŘx8=>NţöŃ$ F“ŕs[y‚6©Ăť8n´Ăz­-Ăa’hßj_¸Ĺ×ÍšCŰ [eDLlü·¦n(;†·ö?+ÇßËĚß,{9n´ŁůüQY·ůŰĚč! xŚÝÎÎ*ĆÍáóÄ[űź…6Ý ÚÁ~9|…˛Ý({±{ĂŽT¬yîg2Ŕk˙łXľľDţűęÚ]ăZďMň·A”ŘGS 9‰Ťh˙ţűűŠő;`i¸€›7ÚŃxćUÜĽń'Ů6śŽ4ź?*Ű’ňł*Ź Í5oŕćŤ?apŔ‰gĘŽ ż¶çcĐí®>í¦ĺĘ)Yîšç~6ęvłb} , 5čÇĺĘ]>írp -WŢ”“÷ZfęŰeÔý pß#í;K–;§Ű×Íí¨9TěóÓŃŁžFÇĆcCٱ€óŹ~ĄQţ ~ąňEďđ™ßĂŕ€6ËhľćS^ăůßjŻM7 gř1 ŢŔ+§č%0 ô{ƨ(¤fz'ë“„YjÂş~ŚŽŤ‡éX9z, 3Óí^ÜÝŐ?|ţ‹Ń$@ż˛ÚtĂđ°őho¨Á Ű)ß_ůCŹ[Pž˙ďc˘ŢüŰ‚h§Cý¨9T Cţć í?3“Ď+Öď@óůŁptYđćKOÉá…ĶUž§˘·z$´·O>ůD5ť1߬ńţ}ýúudddđâa°± €ÝăÁ Ú ŁTf<|oąei÷^“‘6b ›´Ď;pčvx˙ţjčîŹ{äß_ EÁćżţÔęg!ĽĂ,…wL[0ó0%~!Ýu1± 21 ßš¤”bČQ˝Pž©źĂé°ů\đ{ż@Wŕ­ýĎrÓ¬  \^$Ż2¸« §_Á Ű‰ćóGѬö%wďqŐŰ1 ů›d/8Ń Ţ˙KÝDÚ°÷¸Ó.Ř­Đ9E{|zň)ż8Ş}ľxm 6”Ăű珢óZ}Ŕm«ÚĄ™2Ôu:lhąň&RÂř|Út6¬ĆĄ×^đŽoxĺMůĹZY·kw™Ń˝ciävVt°ďź?*{˘)o©÷·|] VíQ=ŠđAôXo<ýJŔkÄąh<˝cďµ_o;í?Ř>šżý€ę­ËE«q©rz†{öů÷îËú–Ż+‘aŻ˙2Út,·ßŃ3ęu‰ÄL„ľjĺ‹)űîŕÓLŚ]Š¸Ă ˙ąŞíJŮqŔŃeÁĺ×^Pm—úUFţz'ęSŚ1Ľ˛@) öŮĚüMH5¤Ęóě Ű˛Íľ{¬\ţpŁfuŃ|\w*h[*:XđÁ†˛cľç7żˇµâµ)x¦ě>ľň¦÷ü¦ŁÚż-¨ťĹő»˙2ţíTmťEů˙ř´şhśÚ.Ŕé°©~gvě "/q ÷ 7<ôú‰#ŻÄ©‹=HKbDÉŐ7`ŤšŃu»Ŕw}—?z˝E€+ÂŰ>'pó¶źô„ţśw†˙uëľá±kÓá h9ńŘôc á¶nÝŠOznábKćĹ'EĚç2äošĐ ťüí°b} >ľrĘçÖŕ„áŰ˝—X˙ŰS 9€J´m r†÷_F<ęs+×˙sxÇvě‘!\—' IDAT š Ç`ű~0úUF¬yîgňIµĺ ů›ˇ_iô-â7ÖśaxĆč`?¤¬Xż ÚTXŻ™ŕtô &6^ľG°¶=žçď'†üͰ^5ůk´K3±b}`Ź6mşĹ—`q•Ç’TCŽ uV¬ß!C.ĺ„B"pSÖŹň긓jČÁß˝Ń {Ý+oUŐĎ­V·ˇŽsŁ©«HUZZŠćOť¸Úĺšë#z›®X_"Ç0îm-öGďľ¶iÄ—+Öď€~•1`ŤQĚ?çŇpÚż~eô«ŚA×Y´ q«µĎ–!–Íß~ÚôLXŻš08ĐŹmŠO]iÓ ˛Ý×Ld»Q–ďß.ĹůžAĐô¶·p®QcbăˇM7„µ­Öî:‚TCŽÜźÄţćŢűOLl‚ęąÎŇPÁ~źáô«ŚŘńĆ{x˙üQ8=ňüáŽY]´GučńžúUFň7ăăşSŢýr¸śPűĽ8ż‰ko±LĚpH­üŃBíüćÓֆۿrýC]żkÓ (ţů%źvî˛â<.ŽĘu őy‰ć*1ćk—ÇϢ˘pGůdđőŃbiIQH\$&(C̨9U‡Ę×?Ľa­Íáýűúgô9żôÖ‘rč!ÉăÁҨ(Ů‹vwŮ)ĺńxŠ˝{÷θĎ÷‰}ż|ď îíĚi56Kłě=±ů`uD}a[kX„őYĽ™Ç˘ŮqڏŇÖ‡w-·ąa(býô©%xt†ÜBÍöFlołă{ř˙őĚü·~o4}-‡,(--ťCÜPÔŮLő€ëđöĚĽĆëSµŔ˛TŇ’Ł‘ć _iüş{›÷ż×?󨆴ţ’<<…eŰ“ÇMX{P=ü·Z^ȲD@ E°|]IĐŰý{׍W€ë:ő1`Mń ăaďpiZoĐąÖëu*¤%y˙yEˇĎ \ďşítŘÔǨ퍊‚ € Ţ@v€lpüŮÉÂ@–(ȉ/Ŕż) ·¬ĄˇFŽÍ•bČ™ŃđŃĚvŔZĹD\*Aě1ŢŢŻG!U+n˝gř:€Ľďřî˝á”=hýǤ˝ŕęđ?Ńs6 Ö`"1%ŠůŰ F1łsŞ!‹Ó r;áPLÂ0ł[ŃÔijj‚ŮlĆ]x{,fOăş´hVNČĺÄ>ďÁňG€Ś‡Ł†ÇéĽÁŰCżěiĚ‹KbĄŹDÓ¤˛˛}îŻ1čţ1Oü +„íŤhƸ Ŕäń M1Fě1Ŕ†<ŕé'Ä’Wě±Ooż˙·÷ńÁ¨(TxÓ;ńÜL‘ ŕżFE!iË<đŘcĎ3ŤpÝÝÝčî˛đŢÖBDÄcŃôrŰŃLrŔYŹ˝Š0öq˝%?ŠâФ*v·ÇěÓO§ţ`sxż ŕKŹkŁâ‡ÂÁfć0“_»ĺż/2ŚĄ‘Ą%e[ĽĽ ע˘ĐĘŞ ‰,ŃŐř S°}¸×#Q¸DoYe({ŢÉáH‡, `:V§ĂXľ®úUĆ9ńąť´©ÜxŚŕ1‚Fí.€«0Čnů/@ŢwY/46ŢIż<řŁŐ»?™lcµ¨b KłžÓaĄˇFń˙=¶864žywÝN¬ćN@ÄcŹDDDD4jŔ;<ďáä]4n[…?z§)ÁgîXČj Ŕ! hÖűřĘ)@Ľ6€7°Yš#ú3ż{¬Ö«&n|"#xŚ """˘1»áńČż·ţ€a,Ť_b‚ďĐd•¨b Kł^{ăŔŠu%2pQö†#"#xŚ """˘Ů*//ĄĄĄ(đř—Ý«ř;-‰uMCą/Ýau¨b KłšőŞ n' ŐýJďmČí äăDÄcŹDDDD4[%&&"##K1ń·~*&óJL`]ÓÄQ ú©˘ö\rŢ!ţlłˇ»»;ŕyŽ!KłščĺŻM6Ý€š´Ôť’Ď­Xż#¬r]XŻŐË˙ׯ,€6Ý€A·ŽííŇLÄhÂZ>A› ýĘ‚ŻčG‚6EN¸ci¸ 'ŠŃ$@ż˛ `2§Ă§ŁCý€ˇ~yëµ˙ú şť°µG—E>¦M7 5ó{A×k6Š^ń¬JŹCÎwxő@Š˝{÷ú<Ď@–f-§Ă†Îá€CôzKЦbńŇLÜĽŃŽ–şÓ#†-N‡ ¦cĺăI6ź? ý*#2×lÂĺĘ]€Í«‘jČńyťŁËÓńý>†Đ¨IŔňu%X]´'ๆ3Ż˘ÇŇŚś˘=Đ.Í„éxy@o˝ĆÓŻ`ůúČîgň1KĂ4ź?ęóţ5‡ŠÖĎŇPĆ3ŻŞöŚ ±^łŃ}>ŚĹ)‹‘‘ČFA7čvâüˇbÜľŮß Ű)–čŘxlŘ{ܧg\ţöx÷Ř>´7\€Ąˇş•Şëa˝jBĽ6kwńYŢŃeÁůCĹčGKÝ)Šň7ĂżçŁÇŇ mşE«}Ęl®y€7ĽńnuŃůą”ĺŹڍű#çg.›ĄYŽĄ¨ěůxo·Ő­,t^«—ŻSrtYdďą5ĎýĚ'he¬–3˛űű¸î” jüaí®#XĽ4dřŁĆ?hĽă8Šut;CöÂSűlT×)F“€ŐE{bČAJć÷Të†xŚŕ1bbŽ˘\#H)##‹St¸ďÁ‡YDloDD3ŽŮ…SuVŤÚŔ] ňmlúá` Kł’¨¸76¤’2<±4\x^9ąŽĐ"x'ÍQď9'ĆĄ·0cČß@L´lDÇĆ]>XŻ˝‘DߖܮȿܢŐذ÷xŔ„@D3ű‹:ś, 5Ş˝ÜB-Á(g%·Yša ňţţ7ÚÇÜŁm4VíŁË")G—Ž.‹wňźá}™ů›BöÚ#â1‚Ç#f†¦¦&ÍfÜ=üo2lýAŇ’Ó˙Ëű˙_ Fˇęm`Y*P˛HLŕ¶ {Zţ Ľýo÷‚Xx(Ţ Ł00Čú Yš}Ť~x˘ řmľţÚ.˝íx´”ďŻM ű–Ţ„ cMN† {ŹĂŃeĄá‚śÍ€śE]„OÁfv'â1‚Ç#f†ľľ>ttt&{TěĽďiZŕWµüĄßۡĂü÷˙Č5x°áŻ˘ĚÎőď[~÷ˇwżPz\ďAÉŹ˘»ěY˛4«86y+pfţ¦”†ÓŻŕćŤv9ÁŹ024Đ?ęuPŢBlČßI>­ďŹĄµ$Yň |’|˙®ËB’eéŐZŻ˝n=ëy•Ó—˝•í‹c¸P€kNŔóĎ?Ďť„sçÎś#FIws:J Č) DJr"˛łł9(DÜßČ ib€ź˙Ł}®t:z›˝Ú1婢Ľ«RĹlÖ<`Î4ŽŐxŃj nú\׊X@ŞŠÍ^&`Î4.ć5 dÉ«|ůŢaĺňÜ›úíŻ8÷ńMřč×/)ßűČĆ ­.^s5őWak±ôzZ˛ĽĘzO!áhomR‚ź^€ l­–_§ľâŞËب˘ă1÷ńďcîăßG}ĹUüţ'«¤×÷׏˝>lé6a2&îÄ9‚sç˘Q%vXŃm6Âh‚'p© "îoäít™@ć,)„Ëű›ăú’jéK­’B٬ylgŕ«.\.qů†ÔÂÂŮÄ ë@Ö7¤ŞXÎÜäU®ť“ß ŹŽĐb7<ä´pĎąwť®_®\v^ŤÝ™óÂ@nŹkďóXö׏ű<ú“·~†ß=·űÖMë7 W?ů#ö­›†ß˙dUŻAQôÔ™coHâÁ9‚sőC­6=ěyXô ëm&‹TA»ó đďo‰0|á^=Iާ¤Zę »ó đë?I•ŃÎaěÄ `ĺ"`Ďf)´gëY0Ť(â¸8h4·ŰČú¸śśäüźý°}q ÝÍu^ýZn\r,Ô3wŦ}OP¨J FäŢ=‡ü?ľî¸ŘZ,8˝÷˝)ΧAźŢűC—E|dőW•ŐŰăff Ëjé=O§~ŕ!Ç‚@ů|Ýă÷ÔW\EŤ}5ő‘ěYIś#8GpŽ `ďŢ˝¸pâ l_ă`q#"ňÎÁěĘEŇięÎŞë%ÄŰyP ôŚu7oĐj•*aż/bŰ~{sŕ1\ŹŹ6>Ľ¶•Aě@ÄŘ xníZŹmeزŔÇŤF+nNkńfÎČ`V"źůČw”Đăňűo*AÉŁ?řOĽý’¶ >úőK¸üţ››™¦újT_͇­Ĺ˘śvÜSüĚ ĚY± —ß?ŚúŠ«řÝŹ–bΊMĘŞäŐWóqůýäS—yz׎Etâ ĺtęß˙d˘g`ΊŤž:Oţů˙ý+éy=·OţH Uz>ݱÚŰ’8GřÂá˛/rŽ ;yA"âţFDäŤÔ*)ŚÓe (6J­ śŰŽĘYĂŇ˙SămŞâŁäŤ6“E ËKŚ@±QDu˝\ýęŢvr¸9Ó€ĺq1·ˇĆ@–Ľ‚ĄľZ958*qĆ ú-ĆĎĚ@xtšękP_qUéť¨ŠŽÇ“˙óm|ňÖn%¸p>exΊM U!˙żĺńqŮř/Ęí¶ ň˙űWČďqźŔp<ů?ßňĘ·ćK Mäç-ź˘˝đÉÁR_ŤkźĽ K}5>úő‹nßÝŽ˙=â}+‰ĆÓ!‡™ś#Či5Ň×ÚĄŔĺRŔwů+Ç"`2ąç¬ň÷w´­F@ŞFZ@ŚAßđ2ÖŐő@q•’jˇGĺkď!¬6 u '˛äl-dŘĂŤř»čmřč^Aµý\›S5[ôÔ™xňľŤúŠ«¸as‚BÂńŔüoAŹ‹NAKtâ ·Ç]řäŹ0ó‘'pő“w•Ç—#~ff>ňŹ‹Í|ä ÄĎĚčót`Utśňš{Ţ/~fľű§đĺű‡a©Żq—Gđ f>ňܸôę+Ż řyqŽŕÁ9‚#$Č|Púě A}%€·›Ü˝ęzŐőŽ Zµ HŤˇ‰ ‰aí˝0ÖmíR8n¬“*`ý_{W㣥ÚćL41 aGYň ŃSgŢSYüĚ —0˘çĘćyüŢ Utü Oëuî/Ů›ţ7zęL<úWüš‰8GpŽŕADDDDĂMöA9=^®Î¬®wżżÉ"-•×cŤŰÔx)ŐÄHaH°tÝx×jŞSŁ4vĹU@«Íąő€3ĎájJśm‚˝J9šřh` KăR}ĺ5üń˙űŕ»˙qŞ× ĹyÁ"âÁ9‚hŕÔ*éK>ő˝Ő ëíýK«D”Öô^Ť)·9¸ü•çÇÔÄHA˘Z¨#\oófr•«rŮ4E,ŚőÎŻÎzljAR˛6A@jĽ4n}ÝźFY—śOďÍ˙ăëřÖ?ý§[uŰGż~IYA}ć#OpĐ8GpŽ """"ş!ÁŽŢłşL)4ÖI᫱N «·>´ÎLéËą/­'Îá¬Z%"*Rp\Ń÷÷ŢM%®\ąÚ9`•/·Ú×Uî=h•őßvúżĘaµ&V€V#Źر†,ŤKŞčxĚY± —ß?Ś—>BőŐ|ÄĎĚ@Tâ 4T^SVP€Ź<1 Ó‡‰sŃPËĚĚDjj*j_}‘>řú41rŐ& ‡r%­É"ťš_\%˘­ÝsËŢČÁ­óăŽ]{~)q˘˝ťÔwwbr3xő diÜzdăż (T…/ßűl-ܸôn\úHą=0$s˙ţ {?ç""""˘ˇ˘V«ˇV«1žÚ|Ę•´2ąš°W–Ú¤¶€ş6E岧…ÄĽI|401Pz=R[!ÁŇőŽ~Ż ^Ç:+€:–ęjD††BŁŃ¸ÜÎ@–Ƶ…OţsWlBőŐ|—•ƹ؍÷ś»ó§†!#‰+ÂçâA4šüÂc8wÖĚQ#%n„űѰ«iµ.ů–{@)·P]ďÔ NT.÷¦ŻŢ¶˝‘Űô÷ÜC‚ĺňÄ űőn k1pővµŢ€wŢAJJ věŘár;Y÷‚BUx`ţ·đŔüoq0Ľ˙¤DĹEB«Us0sqŽ EB@0ü'%@“t?4±!"îoDŁĘѡ˙đvöL: ‡€hd°BÖÇeggŁ´ćN\6Á/<†BDś#FŃöíŰ‘_nÁĄŠf÷7"""§Čú8ŤFk ţ•ˇ "âA4Ę´Z-n´›ŕßhć`q#""˘qŠ- FY"""""""""˘–DDDDDDDDcTnn.ňňň`f˙""ďĆ@–hŚ2™L())$p8|Y"ňj¶/ŽáBq®}8Ď?˙<„8GŤ’îć:t”S”äDdggsP¸ż‘ d‰Č»˙5a2&qŽ Ub‡Ýf#Śf x—Ş âţFDD˝a KDDDDDDDDD4D‚hDńńĐh4n·3őq999(-«„ÍÜŽ ©Yđ ‹á ç˘Q˛wď^Z:`kéDĐ7Ös@¸ż‘аAłv-&Nźîv;Yg4a¬¸@:­…sŃč‘ä "îoDDD4~±Ů Ńa KDDDDDDDDD4BČŤ˛DDDDDDDDDD#„,Ń ŕŤM™™™HMMEí«Ż"’ĂAäČŤQjµjµÁ "ŻaPŔR]ŤČĐPh4—ŰČҰč69ă!X5"?+pî:Ěź†Ś$žsqŽŕáŘi…Ř\Ď o<ďsaц˙0Ţ/<s×aÍ5Râ&qŕ‰¸żŤ[µŢ€wŢAJJ věŘár;YşçĽî†ŻĐu» ] Ą@§ŤB.üŁSŕ™˙č”a `ü'% *.Z­šÍ9‚8GŚű9B´ZĐm®BW})şęKą‘«€ řO’ö7ż¨iCŇ Áđź”MŇýĐĆpĽ‰†÷7""/˙łŚC@w{Ŕ×Qv]·Š8Ô'9č(5Ŕ/R€¤oÂR†sç!ŢçÚŻţ_VźSß:m.a˝t &¤dŤXĹ:IČú¸ěěl”ÖÜÁ‰Ë&ř…Ç Íßňąč¨şÔgĄŰ“9öăŮí6ŕN›űőÝf#Úżü9GpŽŕ1nçíŰ·#żÜ‚KÍCňxb§%gúüđ#8ăt;®ŐXk§űőr8ëß,LH]6"m Ľy#"˘±íßöřńÓł© ňxźĂďŁňf3~şuŚFY§Ńh` TĂż2tHúl_sëC7E% Că‡ijq*NŠŰmŔW ݸr«Eµ˘ëŕť*}c=üÂb8Pś#sĸ™#´Z-n´›ŕßhľçÇęn®Cű•­Ť.×ĎŠĺ‡Ů÷ůaňDngäşĎŢęFiCŹ}îVş›k8ó˙ń©}n(÷7"]żóÎݲÇ[÷ąG’üđH’j,"Ţ-ęÄŤŰŇmbs=l_Cŕě5lBDD^çđ‰D„bj\8ö˝YÔk K4V0Ą~ÉUoÎ}kôÇ#I Yhpߛ㏥É~x=·S9u˛ăÚ{&Á?*…Ä9‚8GpŽůYpđ˝9}+ĐiŕâTžËś€ĎŚÝx÷o]Ň>×iC{á žÁBDcVDh Ň’˘<ŢVY×„Şş&R¬'WĘĐŘŇÎô1f‹ 2T`é‚ű±4c ţÇž<śÍż‰ĄS884f1Ą~9-ÁŔćů¬xŁ{;üiV^ĎëÂM‹tşdűŐ÷xđÇ9‚sÄ(€ŘMšlžŔv t×hü!8>é´ˇýĘ -Řčs=e‰ČűĄ%EőÚj`÷±Ď°űŘ%čő>CÝú`¤äćć"//Viö/r8ůqĚ–v¬^>KHěko*=›ož,AEŤęG†aőň©Ř¸&Őĺ~˙¶˙s<2˙~›Ú±ď­BLŤ ÇO·ĚCEM>ąô56®NĹ'—ľĆáĹ€9ÓŁđÓ­ß@¤*ož(Q®_ş`ŠÇ^·ož(‘^O“t|µ:Kz˝őÄ%ďĹ#fęSűŐ÷”~ÁŔs™ ZčžMś ŕąEţ"ť6´_ýż;­ÎDś#úĐó—0ŚĄ{§đâ#l/Ő­Ť.UŘDD4şL&JJJP€]±Ýí{«áX˝|*¦Ć‡ă‘÷ăO†JTT7 čűż˙ĎgńwOźĆ_ňo"",˘ü%˙&6ýä¬Ň—VöňëźăĺýźcÍ–q6˙k~·pöłŻńňëźcÍÖ±é'g!ŠŔťĆvĽöf!ÖlýŰvçâÇ»s!Š@yu“r_gŰvçbÓO΢ĽşIyŰöäaîęwa¶ŘřFű5SŻşJ]Vm~âAôŃ™8AŔ÷ćř;ţšëŃQrć®Â‰ 'bďŢ˝TÎÄ9§玲 .‹ć=—€‰¸ĎŃĐU©"ÝdŻj=|˘¤ßďż|­‡ß-Á·łQqć»8ů_ŹłGt¨8łáŘ÷VˇŰ÷|ňŮ×ř_;A,ţG|yň LŤWn+Żn—'źŔŮ#:\ţÓwđČ‚űĄŕöD .źüŽý±ż‹og%âlţ×.ˇńľ·Šđí¬D\ţÓw”çńżv.B!Äf) IDATEMÓ€^ y˛Ô«ÎŞż*——L°@ĂÍ…†VśJp9řëşU4č ¸nł¦šr””đçâáŰs„s8¶†€Đ0¦öĂß§řyśçBě°˘Űl„±âŚF#”hq#’Ľö¦nrj-°zůTD„âÍ„‘áAřéÖyxmg¦ëőŞ Ě™ˇ†ŮâŢs81.LY4lΠמƛ֤ş\·tÔ6aŰĆŮ.Á­|ąE‚\ŰŘÜîR »iM*ţňÖJ.RćŘC–<ęşS…nłô‹=8@Z)ťh¸ţÜĄ¬ňÜYőWLH~Ă9‚s„“ÎŻ‹”V“&‚‹ćѰYšě‡ĎŞ»q§Mj]ĐůuîźĹ!"˘1éÍ“%TB€O>sôž;SŤłů_ăäÇX˝|jŻß?5>/?7€T-[ył—Ż™půš ×Mžż'.Ľ÷ߣ˝ô­ť3CÝçëT)Ő´IYǰtÁ,]p?ľť5•‹“y©`QD`|<4ŤŰí d}\NNJË*a3·cBjÖ€DéúÚqňěűüxJ$ « Ť?nÜî’B‡ęĎČrŽ ňŮ9bďŢ˝0µtŔÖ҉ o¬đ÷u/9ţĐOć 4|&N° Ţ.íV¶=o dďv#""ď /ćK7śöxź}oöČŇB]Ż˝Y¨­Îíz>ĆËŻŽôéjüîçó0gşZąďŇ zTÖ4ŹŘë‰Táĺçćáĺç桢ş ' R({6˙k¬Ůú!ţň–ŽoşáQ4ąéşSĄś9E%°GŤŮ±Ží¬«ŽPś#8G(Ż×éY±+ŇiŘMž<0Ůi¬˙ŠBDDcJEuţd¨Dút5V/—NëďůµiŤ€ŁĎ¬'ňbY'<ŠŐ˧ş·#Ć^ľÖ€ď˙óYśÍ—Ú.LŤǶŤłqö‰qa8›˙5ßtĂ@–ÜV‹ryb€ČˇˇuLG]wŞ8 ś#8GČű\{›r™€ĐH™¦věsb›™BDDcĘIC`Ó©˝ŢG^čëÍ“˝/÷˝ÜŁ_ě÷˙ů¬˛ŕÖH ÂáwKđ?~žç˛¨WEu*kšńČ‚űů¦ű˛ä~ŕçôG·óăDĂ)NĹ1ŕAÄ9“îć:ĺrH Y…yżÓĘA "˘1eߛҚrčęÉÔřp|;+fK»ŇŢ §mĄ>ék¶|5[>Ä÷˙ů,’–ÉŹ*đí¬DRőęp›Žźnť‡Ë×LHĘ:†ż{ZŹż{ZŹąkţđ@Ľ¶sßtòD4&¸ś‚kkâ€ç;çţÎüđŠFJJ”€?Ű»et7Őq@hĚ0[lظ&SăÂú]čjŰĆŮ3#Jąß¦5©Xş`ŠrűśQřňäxíÍ"TÔ4áŽĹ†oś…MkRQQÓ„93˘\ď§[çaj\ŰĎYşŕ~`ë<Ąâön®ůąyXşŕ~>Q˘Tçn\ťŠmg÷Ú—ĽYră|*hJ+qhd8ź†+Z9 ś#8GŃ€íZż»Ö/čó>iIQ.˙’w’żąź¬lÓZ·űĚ™…Ă˙±ÔýzU[ ŰŰĎíůs†úzň= d‰Ć±Ó ±ą^şÜa…ŘRďń~]·+ű,«˘µBp„ŕţKůý''zĽ^Ť†0!Xş ! o^Ůü0[ÚxOŹó‹g{ĺëĎĚĚDjj*j_}‘Ü|Y""""""Ńm6ş¬bG›ŇvÂ9„˘µq@g1t›Ťz\çpÖ/<„‰Ňőö×/RĂ7ŢÇĄ'GßŐ÷™›m ňúׯV«ˇV«ÁŹ(Ľ‡@Ku5"CCˇŃ¸ţ®b KD^-pî:Ěź†Ś$6V$"ÎDŁÉ/<s×aÍ5Râ&q@†‘\‘*…ŻRŕÚÝ\tÚ|óő6×C´_î5Ě ‚_XŚŘúEj\±ËýÍ÷śÎ/ţb9Ž®ŁőÔŤ¸ZoŔ;ď %%;věpýµĹ!""oć?)Qq‘ĐjŐ "âA4Š„€`řOJ€&é~hbC8 C@´ZĐm®‚hµ ëvĺV¸N ⣤3$Đĸ÷… âP¨U€É4  Íwu=Đjużľ¸JT.—Ö ˛O}§ ÝfŁÇŔV‹†ßÄHř…ÇBVÁ/2ÁëZîoî*k-8zć:ޮٞދŃŘĆ@–h čn®C÷#şÍUŇ"šwQń L ˇMpš©öł$C‚MŚó˝‡vqNµJúꏶ—şLĎϧŘ)c-qş\\%˘­]@u?µŘ\Ź®ćztŐ—: ÁRü"ŕ7Iż°n€^ČÜlĂéür=sç k\n‹ „.#™DDcY—ťťŤŇš;8qŮżpţ‘ADś#FÓöíŰ‘_nÁĄŠfŃ8ßßäj×nł]·+ŐWur¸µJ WC‚¤jUŞÁ§ŢKç×ů˛s€+Wç–V«cPÝ  ­·L»Ó†®úR—Ö/R˙ɉđ‹Ô°/íWPVŹ˙Ň_ţb9Ě-®oňĘŚ$lČšÝB†±D4v1őqŤÖ@5ü+C9DÄ9‚h´C­7ÚMđo4s0ĆáţÖm6˘«ľ]wŞÔz`b/B#@#ý_ $ľÁ=ČŐąžĆ§Ř´Ůc`¬QRí9¨íŮň@‹†˙¤řß?›´c@e­§ó˱˙T[K‚´¤(lY•]F˛O,âEDľŹ,Ń0‘«0»JűmA09\j5Ş4Ńr{†Ż÷J®Şť3Í1žĆ: ¤Zú·¸JÄí&÷q›ëŃŮ\ŹNăçR‹¨řGK_4˛^8xôW\®KKŠÂSYÓˇËHBb,ď$"ďÂ@–huÝ©B××Eý†°ńŃR¬6AŞ€U«ľŽMŚs?]&‹#ś-©öĐ—¶Ó†®[EčşUägďź˙I ĚpĄĽAąśž…˙ýŁeHOŽćŔ‘×b KDDDDDtŹD«]·ŠĐył˘µŃă}&‡‹3 Đ&öjM€°cÜň`Î4éýhµJ´—żňPAëÎ Á2ţ÷Í‚Ě*Íá’ŁÂyÜ”5`ѶăĐ-L‚.#+3’|ľMAnn.ňňň`f˙""ďĆ@–č.‰V :Ę.H•“ÄGK§ĘĎ™hbľz‹`Çű0ÖąEîŐł˘µeĐQvţ÷Í„ä‡ĚßnË–Ui8j¸ŽŁg®Ł±Ąú‹ĺĐ_,GdhÎ.LÂĘ ß\ČËd2ˇ¤¤Ŕšl"ßŔ@–Ľší‹c¸P€kNŔóĎ?Ď!"ÎD٤»ą%ä”"%9ŮŮŮ>˙z;«ţę1•+a3g N§Ĺ“7ÓÄŮËýgs‹Dä]u]L®š‰`vĽíožŤôähüâ™Ĺ8b¸ýĹrśÎ/‡ąĹ†#†ë8b¸ŽÄpč&ă{Ë´li@DcY"ňî!ł&3`âPç˘Q%vXŃm6Âh‚'řůîëě´˘łüSiˇ§RâD,HPN{'ß$‡łŮˀܿ†ĎáR5+łšyHú&„€`îoClCÖ lČšĘZ ôůĺŘŞUuM¨¬kÂţSŘŞéÉQxjŮtlY•ÎŤ–ƲDDDDDDĐŐPŠŽ’3n=b=č2ą(×x”ů ôUlňţ&}É:ŤźŁóë"Î|ţQ)¬a«ÂÖUéŘş*eő8pę ôůehliGAY Ę.0%˘Q @#ŠŚŹ‡FŁq»ť¬ŹËÉÉAiY%lćvLHÍ‚_Ď™""ÎDŁeďŢ˝0µtŔÖ҉ o¬ç€yŃţÖQjp«Šu±ďńN«‘ľt™@Î7ěá|§ íWN`BňĂšÉFéÉŃří¶,›†>ż G ×qľč&†FE,€ ‚€µk1qút·ŰČú8ŁŃcĹ Ňi-DDś#FŹĽ yĎţ&vZŃqí}tŐ—*×ĹGk˙N ŕś©UŔW ¸üpř(=f;Ę. »ŐŚŔ™Źs†YdXKK"˘±,Q/z†±‹Ö.B‚96Ô»9Ó€=›ăgm şnˇ`(;*k-8_t•uŽŔ51F…Ĺł¦ 1ÖQ˛î|™h,a KDDDDDäAGŮ—0văcRżP˘ 6=&U͞Γ®ëşU„Ž`&$?Ěş •µĽřĆč/–÷zź%łă°sÝ|,™Ç#˘1ËŹC@DDDDDäJ´ZĐY‘«üźa,Ý-]¦TY-ë¬ČEwsf Ęę±hŰń>ĂX8WXÇvťÄĂ5ŤY d‰zč(» \N@dK÷dÓc@Jś¨üżłęŻ”A07۰bן`n‘šň®ĚHBÎθvprźvŻĆ®őóxvßś+¬áŕŃÄ@–¨‡®[EĘĺěe„îŮň‡ŰQWC)ÄN.¨:PôJ›łsŽďzş…É.=b—ĚŽĂ®ő píŕÓHKŠĽôĆŤI d‰śtÝ©R.ÇGK=@‰îŐśiN˙é´ˇ»‰m ętľÔ¦`×úůĐ-Lîóľ‘aAřÍŹ— ĘPYkńúן™™‰íŰ·ă{Ňą9ů˛DDDDDDÎÚĚĘĹđ‘ăAC&!Ći{ęjç€ PAY©UÁ@¤'G+U˛•uM^˙úŐj5´Z-Dps ň VUľŞ®†Ńht»=€CDDŢ,pî:Ěź†Ś$–®ç˘ŃäŔąë°fŽ)q“Ľúµt™Nµ·ůŢŇĐąĺ´=u·šáĎýmPŇ“Ł|_ą—lŁ˝ŐŤś6îv¨đ6ĽóRRR°cÇ×yśCDă…ĽÚföž÷Ľć9››mzMă™˙¤DĹ%C«Őr#'Îś#8GŚ‚ĘZ ^ďóě>W%âÁ9‚FÍ‹‡.`Ć3G°˙TĚÍ6˘sł űO`Ć3G\ÂÔÁjliÇąÂ倮”7ŚČ6˝˙TÁ|8cn¶aÝĎßÇąÂ4¶đÔĺ1%Ŕŕtv Čý‡„î]ϰKÉA ąýŔŃ3×t˙s…5J«ů{Éł`§Ë—K٢…†FžÓďMÎtž1őqŮŮŮČţţÎ]żđq;ćfŽžąŽ§–M—~‘®ŹéçŰßgAY=mËń‰~HÄ9‚sçńdűöířćšÍś»Îë_Ë Ďc˙©¤%EáÚÁ ¸¸/Ţł÷eăÝ«g÷ťńĘôˆýgTÖZ°â_N*}ilďo§sV+ǔ8ţ†]wK·Pę»űŘ%8UĐďľnĎű€§–MGd+dű˛Ŕ©zńĚ—LŽ Ý›ÜżAŮŽ‚¤qHâmÇQQkaĺ–—ěo& °÷¸ČP–îJ«UÚ~ÚlŮ{ŢĂŚÍo!{ĎűĘßAŻl~׏D¸¶-řŻ“śëčîőüđi>‡¤W di\ř/ýD„B·0YůtµŻÓ]ô˰h[BVŔ”ő‡đ⡠J/Ćž=Ţ*k-xvźSÖBČŞ±ů-ě>ö™Űcľpđ<^8x•µdďy!« dŐdďyĎĺ1Ź®áĄ7.HĎŃpÝĺg^)oŔţSxjŮt\;ř4¶qŽŕAŁŕŔ©+€_l~¸×ĘŁ%łă°ký|lŃą×EČŰ®óv~/•´G ו}ŇÓ~Ó×ţŰóg?¶ë$ ËM(,7ąő°u|čářţg÷Ü>Ŕ9b¸¦ě›36ż…UđÂÁóĘí»Ź]Âě$5.ľ–­Ě94vŘW]Ş®ĘŇ Éa>ĂŘ{„‹Że+b‰ç÷r—6Ţ˝šŐ±´J Źç:ş[Ć:×ů.ŔKď_pČ×UÖZPPÖ śŠĽ!k^*k›pôĚuś/Ľ‰Ľ×Ö"1V…ÄfOŤBAYb±xÖDÚŃ'Ć„#ďµµZ]”8G µóER€ąxV\ź÷۵~ŰuŮ{Ţţb9Ďš‚]ëç+-CôË‘łst “ő\žÝgŔĂuĺńä}ň|áMüaç ,™íxŽ/<Źú+HKŠr»ďű»żŤôäh,ž5öEcĎš‚Ä•Ëţln±a‹. +3’PYŰ„#öţÓyŻe+ţUuM8WXúŐö`Ŕynř`÷j—çEcŰS‚Ž bç!?ř¶­†cC}3|čs]ĂŘŘÉ@ÝŽÍÝ Â»Wăč™ë¸âˇÝKDh ĎŠna6dÍŕ€ B€'a_Ţ>×ýěđošŽ lľ;ţć;•(â).ćŐ'˛äóöŰ{ =•5]ąN—‘l?ř+s9đ37ŰđҡO‘Ž‹NVO-›Ž…ŰrÜűĄ7>…ąĹć€ě>öv»„#†k. ”5`eFŽďr„0‰±áŘ}ěôůĺŘş*]9@;zćş˝şhÓ}UHä[JÄ9‚sŤ2ąi°•GG × żXî¶ťo]•Ž…Űrđěľ3X<+nŔŹ{®°G ×Ýo‹.3žy Ďî3ŕÚˇ§Ąç\kÁý,ž59;W~ĆĘŚ$,Úv»Ź]Âń]Źc×úJŬóţ%ďĎ=Că´ä(Ľxč^€ó´0PĎ? äę˘Ćß,"Îś#ČkÜMK ýĹrR«—DzďCć›R};ň>$W»ö<`Ż´WŞJűoą˛Ź9ďżéÉŃČŮąÂcEĽĚÜlù¤%EąUđn]%ő5ôÔćÄůocë”NEčîFűŐ÷`Íý şJÇĺö>gđüZ`r¸ăD鼫ţý-§ó¸ŕ9‚‰ÓyŔżż%ÚĂXI|´´ýd>Č1˘»Ü¶L&”””  €yN*€§ U7*ż»sĄ`6ďo|ȡŐ*Íw;ş†±QÄSb9Dýb…,ů4ýE)<‘*ĚÜ{6žÎ/Ge­EYڤĘ^ń#źžčlÉě8Đ_Qţ/W–7ŕ±]'=ţüÂr“Çǡ!<`üâ.ŕÚ‡đüóĎs@sqŽ!ň6>…ĺŇ)¦=ÝÂdĐ_Á•ň†·-¨¬“Z ĽôƧî·ŮŰ\)oŔ’ŮqĘé­ž»ëďç]±?ďĹł¦xĽ=-)JiSŕĽ{š+ĽEP€TŐ'8ťn(ZŃ~ĺü"5Hú¦ŰbYÝÍuč(1 §,)ɉČÎÎö©m^üëÓ _Hˇ€T-«Ď>ţ\ÄśŔŠŮqŞŘä‰Č»*ď/Žýfĺ" ëC_]čëű[Oeő°´¶ßŐ÷ö×^‡\ĹŘ,ř@ˇý:“E:K@ź+UĚ.⇠ă–É"…óîÚŽ%Ň^Kئ`ŔČ’O“«Ű ĘPŕˇĎ ťöë©Ç]Oˇ®§OĘŐj"ěŐ#nżř§đ Ýf#LfŔġ ÎÄ9bĤ%EáJyËž÷±k„‡”ÚTÖ5!=9jČž‡Ü—ŐÓ>–Ž„pD„p„·÷’öÖJÁWĐEQ d˙ »:”ýŞýË?Ŕ/R ©Yđ “ ŠVt›Ť0šŕ ľy"^H°FĚ™äśQZ#ŤO›M@ŢU ďŞtۢĄÉ·ĺý Č-rmM K‰ńýLJ/ ű›ł]Ŕů˘›wő˝ăˇĄÁP  ÔA/аŘČÁlÎ_D,ź'`Ńŕ‡Pㄱ0|îů§QÄŁ‚€Ó 0%źUYkÁéür¤%EáâľlŹ·ĎxćŽa‹\5ăéÔ`ą:¦gř˛!kú€Â"âAä+t “pĄĽˇß,^:$ő]˝ůűͤ ··?äŔt0ä°őĎ{Öô{ßÄÎă¦R1ëL®Ľę­ŠJţ9˝=wy˙÷µ wçęX˙¸ą€_:+r•ëşÍFŘ>; ˙űfaBňĂăjĐÄ;Ö (6úOÁ, ťşyů+`bT5›5Oŕ˘8>J\ľ—ę0%“z k5¬#ď—`« ŕ €sNÁ¬|v€>HŤ—úl§?Ŕ>łľF®†Í-’.÷üŕI#ŠX"HdU¬GÁö1 ŚŹ‡Făľ(Y—““ҲJŘĚí. ăÜ+NîéöË%V…•I8ť_®,¬łÄ~ v@ĹíôĹÓöÇ“Ą%I>ÎaŤrPYkÁc»NbeF~ńĚbnÄ9‚sçěÝ»¦–ŘZ:ôŤő^ű:¶čұ˙Tśşâ±O2Ľpđ<Ě-6<µlşRY*WÖö\0ú9Ň"[•–…óE7ÝČ€|Í€Şú&üç?|éÉŃX<{ ŽžąŽóE5nÁé?ýę î4ŰpÝľXOéÉŃ Ä…˘›07Ű\*eÍÍ6–›<¶Bđ%]Ť7<ď»’†Ž˛ čşUä¸íVşnÁoňÔq·żi5€ÖĚ>QpĂqPę\5«V©ń"椬śőB—ż.—:‡°îáâĄ/±Ăă•Í+gEôć|Q ĚÍ6=sŤ-ířÍŹ—ąýn »“ ÍC0 HârĎä9Ó¤/†łŢËX'˝źąE"Şë=Ďg b&ŔA@ĚÚµ8Ý}]˛ľľ3Ť0VÜ ťÖ2ž°Żś®Ëč˝/܆¬é8ť_Ž·ĎcCÖ eQ‘Łg®ăŮ}üP—Kk;öź*P‘E†a‹. ôWđě>ţóFdXĚÍ6¬űůű¨¬kşçJ™+ĺ 8_TŮSŁ˝’5çÎ4ö”””řÄë Â+›ĆłűÎ`Ĺ®?a×úůX™‘„ÄXÎŐŕ¨á:Ž®#-) Ż8-ŕµký|űţs 1áJ»űŘg8WXĹł¦x w{łuU:ŽžąŽ—}ŠÄ•˛O1\ĂŃ3ŇĎ—O—‘ŚÝ1—pŕÔ,ž§Üw÷±ĎPPÖŕ¶0Xaą ç‹jŽÄX¶®JÇîc—đěŻ řÍŹ˛”ýyĹżś„ąĹ†ť=ľß׍ŇѶ¬BŕĚÇ!&?Śö’ŹŃÝŕXÉŁűv…Ňć ««k\íoZŤÄ™,Rx÷ń_EÜnr¨š,Ň"`yWˇEŞFŞ´MŤçÜ8ććęj{(at^¬Ć=x.bůCÍdř4Üň»Až×w­_€ÇvťÄłűÎ 24hŔ}É©r0[ Žł2ů Ź‘9K@j–«Âo~Ľ /ş€EŰŽ#24f{ Ĺł¦ gçăz‰±*üöÇYřÇ}<¶ë$"í­BĚ-6$Ä„#gç ĺľ‘aAČŮąŮ{ŢWî+˙ě§–ą¶‘+o˙~§TÉ~|×ăص~ĚÍ6Đ_ţâ!·ýyëŞtź|źťűČv7×)gRÁ*Ą=®;Uč(»ŕlí÷­¨¨ŔéÓ§±rĺĘqµ_¨UŇNYߤSŰżŠ«\ĂŮžˇ …˛Ú)¤ŤŹb¸7’Z­RQ]W9*ýz39\Äś˝)‰¶ IDATi@ć,š†c‘üˇác»NâĹCČTű×·”ř @]ŹűT× 8ţĎó?ý9ŻÄ{Ż‚•Ą"Ň©Ň/yŕb K>)1&ě^= E6>Ř˝Úĺô—Ȱ ßő8*k-¨¬kBDh Ň“Ł±űŘgna‹|ß‚˛zĺ¶Đ čěUBÎz;Í&-)Ęăs˝vđiűé6¶^_Ç@NÝ!"ÎDĂeCÖ lČšýĹ2ĄŹjDhPź•®˛f@—‘ }~Şě¦sĹj_Űľ§mZ·0×fĹą<^ZR”Çđôäh\|-ŰĺľžZ.üâ™ĹX2;WĘ”ö#ňőOŮ+çűÚźźZ6‹gĹą|oosßŃÖm6şµ¶ńź”˙yß•‚Ůë@l3K÷íî†^ŻGnn.t:-Z4îöM °é1ÂŮÜ"%ŐŞëÝďë|Ę/ U–i5‚REËEs†ŽÉ"Ćş…Ňű!µś`ë=äŻ+ëšp®°†\“`Ř«f4B g+DĄ‚»ľćąÔx~5\ŚuňNŇď “KË~÷÷)RŘžh˙7!ě°a Kľ¶ÄŞú\őąçÁ™¬˛Ö‚gu[tiĐ-LvyŚĘZÇAž§Çčď4šŢnŹ ňřBdXPżŐ69­“8G ÝÂäAU E†őŰ×ĎÓ¶ß×~2Đ>˝ooŻi űó`ćÁÜw48/ěŐm©íő~ţ“€éʎýË?¸TŐšL&>|z˝k׮Ŝ9sĆĺ>˘‰˛—9V)/6Ú«“sGĎ\ď÷ôf"âÁ9‚|Uwsí€î'bcca±XĐÖÖ@ fýë_#55+W®„V«·ă¨V™J_€ ś>*W/yŞ mł öĘ2÷ęřh!A´ Ňc«#ĆOŰV+PÝ`_mRËV›§Ş×ľ ąV› Ř+őľz;słŤaě(r®ś…  @-¤ęŮ*Apko8zm{š3Ő*©ÝÁÄ ÇYăőĚ}Ř„>ç<Ť("Qiá©`űűD#‹,Q9;WzŃ9[™‘„ßţ8‹DÄ9‚sŤKbs=ÄN+„€ţ“>•J…źüä'0 0 J0[RR‚W_}©©©X»v-4͸×`yerÇÁ°smiMßÉrřč©÷©\Y+˙ąÚL5€±Wi+‡ Ęe›}LŞ÷é»Ďkßă51Źr­€Č÷‘÷07ŰđŇŽľćiQEĂ+Âţ•jý¬Zą‚ÖŘGh˛Ř[ŤxŘďĺ^´šLJPńŃN—˝čĂ)ůuŽ*WŔşŢ͇M€Ô‚@®~ŤÔŠ€áëŘŔ@–¨‡%łăpýĐÓ8WX+ĺ HŚ GZRÔ>ť8G ż @w »©NjO0!!!ĐétČĘĘ‚Á`ŔéÓ§•ŰJJJđłźý ‹-‚N§Z­ć8;ŃÚB]¦ŁÍACŁÔ°Á,ÂX']ç©Ý3Ge­Äy!±Ţ ´ňLŰĎfĐj•ŐÁ„Cir¸¨śň,źÁöýľ&&NśAÝ6Ü^8x…¦Ý÷JYË‚‘ľđ7Zff&RSSQűę«ôí,R0čVBZ¬ RHk륒֙<żő·ŕ8›p p=I˝‡Ď [­đx¦LX>˙őşĘ4˘`±‚€ű UżF8Ť1Ť- d‰ú]Řüť8GŹv{ {§jŔ¬Lf333ˇ×ë‘——§Ü–——‡ĽĽ<,Z´k×®• Ç(§ějÜĘ‹ŤR©Ň6űŻŞíË@Ň #!%Nę9)‡ŻŽľ“ Ëh4bďŢ˝HMMĹřC·Ű÷»ßˇµµ;věń}őJyÎÝÔ÷D„â•ÍűĆ VC­VĂ—;‘(-ŕ ÖBި­„´pYQ'° ňńť«KGwţş·ąI%Š€¶N˛W˝F‚ÁëXd…ô!Ąş‘ˇˇng1%"Ż8wćO CF«‰sѰđĐčnî˝^É/<s×aÍ5Râ&y 6mÚťN‡śś(·ĺĺĺáňĺËXľ|9–-[Ć`väSîçLs?ŘoµF{Ą–ÚJ׋Ęĺ¶vˇĎj®‘ L ČUlŽŠ¶‰ö6íšhçę¶ń@ô·ż –ƶµµˇµµµ×űŐÔÔŕ—żü凲žMí‹na2žZ6ť˝ý}@¬ýßža­ÜöÂZYĄ(:¶k/ (ÄŘź»\ĺ !-şĄŚCWŻQ ŕmx礤¤`ÇŽ.·3%"ď>Fś”€¨¸Hhµ<Ő‘8G ! ňám·ŮŘçýü'%@“t?4±˝5jµ?üáQ\\ ˝^ŹŇŇR@[[ôz=>ţřcĄŐÝ›`穞CŰžäöý©®—ßľ~v|t˙Źăh#ŕ˛5ńÍŔ~9ým ä06,, aaa}Ţ7<< #Ęţâ™Ĺ|ÓÉ…ÜöN˙J;‡çůٞ—Ëď+Šh`łµ şř‹"bEţ}„˘ÎaŞ'Îk0s_Ďť|Y"""""ęťź?„€ ť6 ÓŃj|ďUçZ­Z­ĹĹĹ8~ü8Ş«ĄsHŰÚÚpüřq čt:,Z´ďÁpY®ÇćśĂصk×âäÉ“}Ţ_ĄRaٲe8~üř¨TĘÝ­Ä^.€UĄ¶‚Đç"c›Äúůq0iH0őqŮŮŮ(­ą—Mđ Źá€ç˘Q´}űvä—[p©˘™AŢ%4h”Ó®;U¸Ö=´V«Ĺżţëż"77z˝·oß - třđačőzlܸZ­–űŃęƬCiLL Ö®];ćCŮU­§¶đÍ&ŤŞd+Eµ‚XE/aě·Đwĺ+Ń`1őqŤÖ@5ü+C9DÄ9‚h”iµZÜh7ÁżŃĚÁ Żâ?)ťö@¶»é0„¬,33™™™0 Đëőhkk łŻľú*RSS±rĺĘłÜßzw·a¬l´CŮóE5wu_UH Ň“ŁąŚ3rđZeŻ€µÉÁęÖTQÄ|†±4ÄČQźü&%ą€î¦şaýYYYYX´h  Ě–””(Áě¦M› Vł74Ńݸ×0V6’ˇ¬ąŮ†u?ç ký˝żÓц!24yŻ­Eb,ű/¬˘? ”–×}«˘(Bčq{„(b%‡‘†ăo+őyĐŕÔÖF´Wʧčt:ěٳ˖-są­¤¤;wîÄáÇa2™ú},ŃÚëg‡!vXůFҸ7Ta¬Leĺ…ľZ[[‡ĺyżxčÂ]…±=™[lxńŤ ÜĆ‘`A€§%áDQtůApů?řŔĺ ŰYgů§čüşo*ů´‘ceñЗĆVÖZpŕT^8xŢí>ćfNç—á¨á:*k-ÜHÚfĽŕ’‡ßžÂXöŤĄˇ @#Šx .ŤĆívVČú¸śś”–UÂfnÇ„Ô,—?¤‰8GŤ¬˝{÷ÂÔŇ[K'‚ľ±žB^G‡ŘX @Ş@÷ŹJĎ+33™™™0 Đëőhmmőx í\ Őqí}é€čţŮ|cÉçŚV+ŽJŮ‚˛z¬Řő'[lHŚ Ç/žY¬ÜvÄp /úć›rÝÖUéxeóĂÜĆ)+€ÓJz\źŕ>8Zgi# ±6bÖ®ĹÄéÓÝng…ě8řEl¬¸nłb‡•BDś#FQII L5ĺč¶/2DämüÂÜu[jÇÜóËĘĘž={Ü‚źŢpé(=î¦Zľ±äsߍf+ĘJŮĘZ m;®®ˇA.·=»ďŚK űO`÷±Ď¸AŚÇż·E—06R+‚ ‚ UÁöřžXË{śMA4¬Sqh ü''*—»îTŤÉç——‡¶¶6ĺ˙=OOui_ĐiíË?0”%ź1VÂXŮP…˛»ŹIőڎř`÷j\ÜçXÄl˙©ĺň+›ĆÍßoĆSˤj´§®pŁG¬ţŰţesú .E±@ŞÓ}ťĂ×`+E‘}ciD1%""""˘<„9jŠÄ–ú1÷üZ[[ˇ×ë]®ëYëV-key¦y;“É4¦ÂXŮP„˛ç‹jHë’Ůq.·ťÎ—ďKKŠÂÖUé Âo·e!"4ćÎÖpă.Á˝*V%ŠxŔ˙+čą7$ ćŰCŮĺ˘Čľ±4ňSqh „`„ű©Âť6t7׍©çg0\Şc©BVţňôůµ0”%o×ĐĐŕ¶ýŹ*• *• ·oßľ«çXY×X2Ë5Ś-(«WnÓ-Lrą--)ŠĹ8Đŕ(€ŹŕZű(bł ¸TĹö´Řţ•Ć0–FY"""""0!ұRpwÓŘ d[[[a0ÜźŻ (_žţ/›ë`űň|ÉkiµZlܸőőő8~ü8¬Ö±ńŐjĹńăÇŃÜÜŚçźjµú®+1Vĺň˙óE7•Ë+3’¸Ś3—E87ĐQ‰"ľŕQU±= si”0%""""˘@„9-ě5ĆúČęt:¬\ą)))wőýbsÚŻľÇ7™ĽVffć e{†±ŤćžݞÖâňýĹ2RoŮôäh—۪앳ä{j!±=«b°YČ!"oř{ŠC@nĹÄHĺňíVŽŤŚ¶§7‚8 ś#8GŤŐß“”ËÝÍcg1¬deeA§ÓaÇŽ.·Î]‡ )ËôM‘NˇrO]·ŠĐU_Ę7šĽÖX e‡2Ś•Űś+rô57Ű” Y]F˛Ëý÷ź*p´9čŃsÖ[ßÓíŰ·ă{ŇÇůö}Ŕęś‚Ř˙` €`Nä%8Ô“ě8 ÄÔ*r@hDÔXś¶µĐhç"ÎDٍ­Ăq +Lp=Ľőw dĹćzŻx=ţ“űóžôMÇó·6˘»­bsÄNşîTť6c¬7.Ń`effŢ|óM?~|ÄůęĘXÝÂ$\)oŔK‡>…‰±á8j¸îr»ě±]'•…ĽžZ6Ý'ŢOµZ µZ=®ĂĆJ‰˘K HU±K¸ËÓdPŔR]ŤČĐP·y,yµŔąë0j2’T "ňÉ9Â/<]ŤŐ€ 0MÍ÷•†_uc·côPM*„E+alם*řOJ€_x ç®Ăš9j¤ÄMňŠ×)GŔ?8Â-¬m/=‹ÎŞ|·>łDcéwCűŰh…˛CĆŔ]:öź*€ąĹ†ÜçÚ+:!&ş…Ž Y9ŚŤ Ä®őóą±x9+€óúĹÂiNÖ"V "8D4FŐxŢy)))ngď°eą˙rWĹ*— ous@hD”68ŞßśO‰ďŹ˙¤DĹ%C«Őr9Gçźś#„ÇÁsu#«Ňid8WĄ Ý„9~Č}d…€`řOJ€&iÚ0ŁĄóëBt}]Ŕ0–Ćüď†ěo#Ýľ`8ÂX »W#!&ÜĺúĐ@äě\árÝ]v­źŹkźv[ŚĽK%¤ö—ś® đ-Ć’—c…,ą˙í´rîÍ&©oßÄ ü”†—s°ç“ÂáAÄ9By˝©č¬Čun4ěľ29Y˙(÷}ÎoRşnş}äôţ®;Uč(=Ł´+E‘ˇ,ů„ÁTĘŤFTWW÷úX>ř T*Ď!çp…±˛ôäh\?ô4ÎÖŕ|Q Ň’˘°xV"Ă\{Ë˙â™Ĺ|Ó˝śR{‚Âs0«bÉ—0őqŮŮŮ(­ą—Mđ ŹĐ÷Á"â!ÚOŹ,Ľ%b†ŚŇđąÝ&{ĘA^¤†Â9‚Č'çíŰ·#żÜ‚KÍţż° HŘ,°v_™ş1MÍ“śhřŢaí´˙'HĺŇ;\Ů.ť~gŚŐ>˛ÝßDk#ÚŻľ‡nłŃqĄ„.7ň e?ýôSÔÔÔôú8µµµX˝zµŰőĂĆ:[2;Î'ę"ĎJčE6§06€@*?$#Â@ÖÇi4XŐđŻ ܆«E‡=lů ¤ 4<đŁáó~q—ă/jšËéąÄ9‚Č—ć­V‹í&ř7šő}ţNŐ”tcë"îs4|>)wěsţŃÓ<ŢÇ/,B@ÄNDk#D«Ĺcp;–÷7±ĂŠŽŇ3Ęľĺňú"¦ űvą×ľ‡/żţ9ţm˙çýŢď§[çáĺç捩ç~ňă ¬^>•;â0h(ë©×!üň—żDkk«Űő#Ć’ď˛8 )uî›"ŠĐ x„Hľ†,y>đ»ďAt–]€ŘiĂí6é`x…–§HŇĐ»Ý\ŞvśŠ yÂ9‚sD’VBŁŻL"«diŘ|fěVÚA° ÷;‡Föçş›káě˝Ĺ+:«?G§ńŻ@§ç*X!4đâ@V–>]ŤHU`Ż·OŤ SĎ7iŮ1$Ć…1FC˝ĐĂX —śëQ«E<*¬Š%źĹ@–<˙Ś€”,t\{đIy7Ňî÷CśŠ“! ť¶‡>ëtlwńđ·ŻrLś#8G8ísÁ*řß7K eą /,Řż™†|ź{÷oNŐ±ńóú¬zőꔀN{ Űu»ŇcŻŮ±¦óëBt–çB´6özż¨iü}ă0鵝‹°4cŠ×<ߊš&$ޱŘ U(Ë0–îU#=€*ŔĄ*ö!QÄVĹ’Źc K˝o÷ĎB§ńÄćzX;7.uâ…%<řŁ!óîßş\úB=¸’Â9‚sD/&¤.CwC©R™ţzn'ţöî=ş©3˝˙w[˛äűMľ–| –Lbc‡ <@(ÎJšgBڡΏ$§Iş’&]…öś¶«ĐžLť&Y“dć ©Ç$ ÎJ&™!„±!L¨o,_ĚŶ@ř.ŰŇţý!k[˛dc[ľHň÷ł‹­ŰÖÖ«ýľÖ~öłź÷Ő‡‚ąŁĐ´čń“Š!©v¬ WBž2~FzPT’´lëň퉽FOŘĺL‡3‚‡ň…9°uÝäN1U—:p§k€#W9îóżřę :RĽ%ń“z/Çk'ú^4>o˛ ĆÎ®ŠŠ TVV˘ŔŇáľäę^s@µ(bĐ)&Šř® I0Ö_•ůxjţ&€ŮŞj=üŹfáxšM@ăQÜűg¸Pârđ÷7ů ¸÷Ţ«ÂyăČ ÎÁK÷ąÚsÄ1‚8FřA‚ණřź_°Otö‹?á/seěs䕾AżřŁë Ų-w­×ě<Ážc˘G_ÓŰy– żvť° ®AXÁůrŘ(Č2ç]@öÔąkř“ď„—żźŤ7ö廓É˝^Ŕ7’-°—¸ {–ëÔܸţ~éün/‹`9€ÔüľűüNqG¦ěö–R‘$Đ ·1Í ŁńwDçl”n_ëţ©|­ť"‡¦|Đw°Ň5Đ"S/|aö”ÖgąP‚3ÇŢFQQ—cqŚř1B›‚ŕ%ŹK·koŘłoőqߡ©ií´ďCŽş±€ýHPDâÝŹŁĺ!€räD‰µí2,JPúË(--ťÓĎŐŰŰ‹ââbüîW?v ĆŽÄěGp€_”] ëV,Bjr~SŢâöXń‡ö Đ+ŰsÍĆ.üÉ÷?ÂíN ţßż>„Űç·ă÷żÚ€Ü,ţęďľŔńĎ›ÝÖńĂ•ČÍŠĂďµ˙sü)|Ż §Î]Ç+*yKTřýŻěWAäfŮ—wlÔ6ľř;Üî´ŕءG Ö?‡¦ň-ř^A*Ţ8\‹âëçmßµu·MKËĎĎÇöíŰŃŢŢŽŁGŹb```Üç 0;Źőř€÷`ź|ëÎmÇťá÷ox{ú¨Ťő‰"jç( –á÷ţ€wDqÎľç@Ç YšđÁźŁVd˙đăÓCxL'ĂÚ4ÖŻŁ‰;o´á“z«Kŕ@¶  íĂS˙1j6ŔdLl^ŽÄ1bžŚň…ٰuÝ€Őx€ýDČk_ âOî‘áŃLžk§‰éqşYÄďżµJe X[0© A‘‰°Y:‡ű›6ł3<·űâŃŁGQYYérꧬXQ]2dSkâ@GÖéXÄúç¤ĺuř§_ăÔąk.ug×#7K…4u$ŕ~ sç~˙« ŇóÖ­X„S+!mýá‡*Ý&ĺzčÁ…8ţÓGĄŰĹ˙¶±ËKYŻ1QJi]1Q —÷oníÂöŤZiťięH˙Ű:Ľr r^—-ű§­ż9gĘ@LLĚĎmooGhh(±óÔQD›‡I¶,X0µżE}} ťÔknܸáúű@‹(bgLţuŔű*—űăââ P(feş»»ŃÝÝ-ÝnĽ#ŠŘ*HbŢßôlščÁ_Pd˘ti2`źUý÷ß+4AxPĂÉ|Čł[}@í ľh´şepk W?ŔFâA#8FL’Bű0†"¸śů¤ŢŠsW­x(C†Ĺ*}ަ—˘Ç?o†ąs˙ńw#űĹ_]Gt¤Âădaë\„ĂÇôh6vI\ÇýÎb˘”ČÍRMh»R“#př‚<ůpľW†(%Š˙m;ô4rʆ……ŤůĽą Ć>÷F9R“"±u}R“Xćl.ś\‚±©©©řîwż‹ŮźŻ»»řĂĐŇbĎęoś°ÖŹŰ÷&†±Ăâââđťď|iiiłľ-‹ řă˙XeôžM!4˘…ZíqĚd@6Ŕ•––˘ˇ±ó‚µşül,A‰P>řW°\üHŞÖ?dź]ý‹&Bä€:Z@h0 Žf†Î|ÖŃcĂí>ŕVŻčů2Zeş‚y{Y ÇŽ#ćďQTTSĎ ,=CP.ŰâŐş'B,ŐĂŠ·ú€c­öňˇ@\űĆ;6ô ŢĄĎĺ>5ĄżBx‚´ěi¬ąd2™‡ž;·ěŰ*nxŤîSfÇF­ÇŔ©'ięH|Ż ż)o–îs”pÎxudµ ş·Ć\Ws«k@Ö“¨‰e{?ôž|ń3¨—Ę'<ůpž|8 ۇËĐôČĎχJĄB|Ľç × 6'™±-mť8rň2ö—śÇÚśdl]ź…­YüŇf«ýá,\·n´ÚąëxôŃGˇ×ëqęÔ)`xűRáź5eű|ŕt;55Ź>účśmŹR©Dvv6ŇŇŇđÁ```m‚€ĎDŹ0(;aI¶ 7mBh–űxĹ€l€3 04k˙‘çMvj IDAT<č}e!$ !ü%†®×a°ńŚtčĽ8ęŹŐް˛ńÉ}˙‘+!S?yĘwî:QqŚ ŽČ1!Çt ŠHDčw˙†®×a¨ˇ\ĘPěÁŮ[}ěs4ŢW‚3V{”ĹŽL/#ôřdE‚LŘĄ ¬[`V®ä‰bŘťż)oÁńĎ›±îÁ…řMy ¶oÔş•ČÍRáŤq&ÔĘ[˘š¶mĘ[Źć“‰Sç®ářçÍ8őŐu˙ĽÇ?oĆ˙|Óá6 yG§ÓMé±Ůtş¶§k[ńę;gP¸2/.EnFżĽT/ŠŇeô©©©sŚu¦ŐjŃÔÔ$eĘÖ‹"Rý0`č<9šBˇŔşuë|b»"""°nÝ:|öŮgöß“aw6 ČŇÔvś…Ů/̆µŁÖ¶Xo_u Ľą âC– …,a1±#8FĚPź“%,†µý l·ŻÂÖŃŕś%räJĹg"(6eÚúś­–®ŚđÉĎ,­RđŐăd^ ™‚9ţěxJ‡WT˘řĂz;ícČčz°©É¨ľlňyŰl삹Ë2­µ]«.u &Ň^cÖńžÍĆ.ä=ůßxóWu ČÎźŘwË/ˇěl>:×0÷XđnůeĽ[~ąńx±0V¤#&BÉ›f7ť–srr|jŰrrr¤€ěM?m_çéłłłˇTúÎ>ś––†twwĂ"¸ š]bz~żł ȲřL)›ŔÖÝfϰłZ`ëĽÉĆ™Ď{ˇ1B쵝ś3gcÇâ1í*±g:g;ZoĎÍË>7ďE%2ĺŚőą ČDX}8 ë¸Ëž) Řěł9—--ČćŽ2ěɇÓpř=»859Â- ë¨űĆáZĽ˛}$0cî´ŕOľ˙nwZĐ|rË´e›Ť]¸˙ɱ}ŁÖĄflš:éęHT]â´®óɶ‚%ŘV°ćn ŽśĽŚ#ĺ—QÓÔ¨něŔso–?/ ϬĎÂÚśd6Ú418Ť—‹-ň©msŢź^NÇi9*Ę÷j$«T*i˘Ż›`@vş0 KÓ÷cÜ©î/ů""ŽDsË9đĆ>G3:ľG.€Ł(†#řé+ś·'(2ŠśŤ¨=îZď6$*`Oýđ_+ďZ«ő÷ż*tąýĘöl>¦ÇoĘ[đ^rźXńŤ˝«püófüđ@%ît `Ý aîŔ?ţäk4·vá˙ýëCSĆ>ôŕB|ńŐuüđ@%ľWŠu+áˇâđ1=ŇŐ‘X÷ŕBŔńĎ[PuÉ„—żĎ@ú|ˇÄKOäâĄ'r±żä+ě/9ďň¸#k651[ ˛8ů<łSIą$ínT*•”…|«wOd‰hĘd±)ô±męííu Ć r%÷ţ‚Bcňŕ \úÖëu€`Íwö»™JiŢ’xäf©P}Ů„&ÍŠ‰R˘ęřźcÇßťÂ?ţäkéţčHţĎK`ÇSSŻ3şcŁU—Lxăp-ţçRN­X„ă‡Á+*=ľ×?ţÍě€X“˝Żí\ŤC'jPv®wzĐŇÖ…ý%籿ä< W¦ŁpE'#"źÁ€,M™A®„8dń™ěآ˘"—mQ,ŰârĄ†bÉă)a5~ YBŕe˙ăßx¬¬úÍźŹűxš:§Ţ-„ąÓ‚ŞK&¤%G"MéöĽu+A¬Îă:N˝[čvߎ§tnÝ(%Š˙mŠ˙mŞ.uŔÜ9ŕ±~-QnFŢzĄ@[˝Ů˛łM(;Ű„Wß9­Yxf˝ŽŃśb@–ĽžřHŮââbŤ®ŰâŚuPh `[ !„Őđ¦*&J9«ÁŃĽ%ńltšG˝Ů–›ť(;ׄ˛łŤř˛îĚ=:ׄý%çaî±řÝçP©TP©TáWJä7ú´č4ŤFăň8˛Dä×÷?ŤĺiX‘ÎBýDÄ1‚h®ŦÍ€¸xlŢĽyÖ·ˇ˘˘'Ožä—AóŁĎE&Bq˙ÓاBfr,Äs·ťkBŮąF”ťmry,:\É"˘uŔ{đţűČĚĚÄž={\g@–üš,6ńÉ1ĐéTl "âA4G‚"GJÜąmrË™iőőő8|ř°t;77ŐŐŐüb(` ňČbS I_MRÄÉéÚVĽwň2ĘÎ6ąeĂn]ź…­YX›“̆"˘9Ĺ€,yE‡@H€ŘÝŔ ŐétłňŢ?űŮϤŰjµ;věŔřC~1DóDgďö—|…#ĺ—ŃŇÖĺňXJb$¶dáĹÂ\ÄD03–|˛nóćÍhh˝ŤcU&—Ě…é&őCěn‡­« °ZŘđŠŃ@‰†ÂK…9FpŚ Ž°k×.śkęÄůćî}±żb˙ö9!S"(2BDůĚT! ŠH‚u8 k4g% ŰŰŰ‹˘˘"ôőőâââ°{÷n„……ÍZ#˘ąsu8řZÝŘęĆ—Ç6¬HǶ‚,®Ě`C‘Ďa@6Ŕi4ô+Tµ„O˙ÁŢP?¬×/ÂÚ®‡Íl`cÓ„ČbS ×,gp–cŃĽ#t:ľ0AvÇ<ý}®żÖuj«—˛‰Ćęsň…K!KČśÖ>•ëŤ:ö Ů‚‚‚ýٱˇˇˇxá…6ăýŤćNuc;~ZVă±$At¸/=‘‹­ëłšÄă"ň] ČŇäř†úa5^ŔŕŐóŔłnhűLw;†şŰ1dř˛ŮÎXÍŔ,Ç"ŽÓÝçϰ1hÂ}n°ˇMgś˛2ő˛iÉš Š©k4güs>|Řĺ}ţúŻ˙zÖk×Ńě0w[đŢÉË8rň˛[&,¬É^„­YŘV°„ŤED~Yš[wjŽAěżăöآ(‹ă€Đ` E€+&ľ˝ĺzźőF¬ Î,€|a6‰cqŚŕá…ˇëul(÷xňăž8 9J`ź#@ß ÖNŃµĎ Y0ŘxWĎCqďăĹgzőA#ĄoL&L&TŞ™™Pݏ¸UUUŇííŰ·ĎZÍZ"š}›|Ś/뮹Ü®ŔÖőYxé‰\fĂ‘ßa@–&wĐwéc—űbCÇ´2ä,ŕy"ÔŢqŞqhä pČ‚ÁKĂvű*÷>ÎfâA#8FLĄĎ5W¸eĹެËc±Š'?ČłľAµ7Dś3X]úÜ@Í1ű‰Í^­_VCĽcĎZ5 3-//GeeĄt{Æ ČĎĎç—K4O,MŹÇ‹O,Eኌy3IWEE*++Ń`éđ?"ňo ČŇ„ 6žÁPs…t;D{:ŻZ­ĆîÝ»ůŇĽ%öĂf6Ŕ`B‚yśČß0 Kc˙‘ę‡ĺB‰t{Q”#Đ ňŢCéA0Ţ]ţ‚bS ‹ĺE8#8Fxěsýť.'@ŚĄéôLž ·zG&Úl(‡,6BČä'ĘE‚`˙[`0 ŃhĽÚ¶ŢŢ^KÁŘĐĐPĽđ  ăG4O<ş÷ۤ^ŢX“˝źŘȆ%˘@#ŠP¨Ő 1 ŕJKKŃĐŘ‹yÁÚ—ŮoďfčęĄY›CäŔ߬b …föŕo¨é ČrŚ č1˘¨¨¦žAXz† \¶eRŻuľŚ|Q”€§îcFMŻťËĺřIĄŐ^ÇyČ‚ÁĆ3^O¬çm@¶··EEE0íuiCCC±{÷î MćM#"""ňF€m‚€ÄM›š•ĺö8˛Î`0ŔĐü-űe-%őcČřµtű©űhˇ™ńĚýÁřçňAöK‹­ Ĺg˛a8Fäác˛¬·ŻşÔŤ}ęľ ö9švˇÁö@˙ÁĘ‘r!bĆęIgÉ:˛cű|~~ţ”·éčŃŁR06mÚ4áďTűůžĄéńă>ŢŇÖ…«m]ěŮŻc©ięŔťž6(Í9dÉóߍ‹Rć[l(89͸PŕŃĚ |Ú`ż,y°ń ˛#8FŚîs×G‚±ËŐAX¬bźŁ™±X„{â¬#Ą ĽĚ’5 S~mii)*++ĄŰŰ·o÷*¸KDţëÇĎ®÷ńý%_aÉy·Át—> "š*ţš'ʆ®ŐHËŹiYźŽfÖşŚ‘ˇHěn‡8ÔĎFáAÄ1‰µŁÁc{Í„Çt#ăşÍlđj]FŁ˝˝˝“~]EENž<)Ý^µj±DDD0ř‹žÜCý»ŰĄŰ9 xI$ͬĐ`÷čܶ¶_aŁpŚ âářĽ·Żşd¤'G±ĎŃĚZ¬ BČđutb˙ŘşŰ&˝ĄR)-O6K¶ŞŞ ‡–nçććbÇŽüb(`0 Knś3!î‰kÔѬXşĐ)çöU6Ç"Ž>oNűÍŽś#‡ Ö¶É×b ‘–'SËŐ`0 ¸¸Xş­V«Ś%""˘€Ă€,ąřuŢ”–YŁŽfK˛Ó|!¶>3„cÇaÎ%Táěs4;âB˝{˝s@v˘˛˝˝˝(**B__ź}ââ°{÷n„……ń !""˘€ÂI˝|­kärńä(¶ÍutűdzŢ–,H†ěč`lhh(^xác‰hF¤&ú×Ôüü|hµZÜ|ýuÄđë# Lł 7b§´¬ 㥑4;śk"Š=ílŽD#ćPh°8Ňç'?‘žR©Dh¨=Í¶ŻŻ&“iÜç˙ěg?Ńh”nďŢ˝Ť†_MČšědiŮÜmąëóS“"ýęó©T*čt:¤ć×Mäú\pĹhôxµ3dÉŤóĄ qLJ Y;đs ě Y&ü:ĹýOcyZV¤3mŚcqŚŕA4—‚"ˇ¸˙ilĚS!39ĄĄĄhhh`/[ R©<ľ®¸¸Ř%‹vűöísŚ#5zűE<IľÝßć»čp…´\v®Ű –¸=ÇÜmÁ—u׸óѬ¸ ŕ=x˙}dffbĎž=®ă8›ü™,6ńÉĐétl "âA4‡yd±)Ф/†FŁqéwőőő_S^^ŽĘĘJéö† źź?÷ź%,ÝÎĚĚÄŽ;|¦ż !Q" v·ŁĎ" üPĎSuĹi‹VCa9ťéôÚÎŐ¨ię@MSĚ=ě/9ďńy?ŢąšŤEDsŽ% ćÁaMúbČbS ČyZź8FÍ%ťN‡řä ČbSŘđÂÂÂ'Ýv”-0 (**’îW«Őxá…|®żÉ5ËĄĺň "KW*.U:í_‹–˛Q¦YL„żÝ˙$¶®ĎňřxJb$~»˙I®Ě`cŃśc†,ÍťN'Ő5ŤĐh4(..F__ 44/ĽđÂÂ|o–HůÂl ÎKY˛EGEěŢ$@Ă Jh’ mŔŃߏL'D«!_͆™1JĽőJömYŽÓu­¸ÚÖ…čp%Öd/BnF|çw›€f‚FŁ‘˛MMM¨¨¨€Ńh`ĆîŢ˝*•Ęg·_ąl úĎ–N)(ű×ß ăJ4AőŕgżŃgĆĘ•PŢ· 3ĂR“˘°-‰%!Čw1 KDDDDD3B­VKË555°XFf>ß±c‡ĎĎ/ČC Ě} J YĐgđúQ{=Ů «řýŇŘzűíY±•ß2cĺJ(–maíXš´ŠŠ TVV˘ŔŇáDäßXC–f„N§“–ť±Ű·oG^^ž0E$Bůŕ_AąÜą¬Řű¶=ű‘h´Ş+ŔŢwÁX;!"Še[Áš4y&“ z˝WŮD˛Dä×,Jp¦^ŽKźc÷îÝl"âA4GlÝmÔ—Ł´QĚŚTlŢĽ¶¶‘±V­Z…üü|żúlBH”˶`ŕ›Źaë¸0uŻ´j`C>XĆ€Pu(˙ĐGV,Čd#X»~Z'P«żŞG÷Ă—uצm}k˛áÓąÓŃśa@–üűŕĎl€É ŘDÄ1‚hN‰ý°™ 0`ű…xUUU¸yó&ÁśR«Őرc‡_~>Aĺҧ`íhŔ@}9`é`ľ˝~P'xř«îăľ0źôö•ßر¦ÎQ*Ł Đ@ź9+ýŤü˛DDDDD4í Š‹‹Ą`,( ż˙\˛řL„Äh0tőʰż†8d/Ĺ`lPü[{9ĽĹŔŞű ŻNÜý» ¨ĽT\™°ËA+!S?yĘw¦5+v>[šďÓë#"-€FˇP«=ÖĚg@6Ŕ•––˘ˇ±ó‚µ¬YDD#ćPQQL=°ô Aąl „ÖĐĐŠŠŠĐ××çr˙őëעż ňg¬†<ĺ;nYS'P~ÁţOť "?[@Ţb@Ĺyśüž©Ó^– ˘N„±Ý„uŞË@ěŚůńłkŘDäW’l$nڄЬ,·Ç p†ćoŘ/k!"âA4wôz=ć…ÖÖV BCCałŮ`±XĐ××Áŕ1SÄű›s`Öză"[ÎKĄ {ÖěŃßGoÎę43gýí·Ňp&lŐç’®±PF!8u9d îc –&„Y"""""šVŽ`,ěŢ˝eee¨®®€Y ČÎ&AąúČŐŔzű*¬×ë`ëh˛f{pÖŘnĎś UÚłZŤ}R0h}‡ŢhÂę @˝Á˝ÁČw®DP|&d ł!‹MaĂѤ0 KDDDDDÓBE—š±Ű·o‡FŁFŁq Č2Yl d±)‡úamżk»˘Ůŕśíł¨şbĎştĐŞ]  Őęx ډ–3®·0v _ŻÚ±®ÜkĂ 1Č´/ĚfŃ”1 KDDDDD^ł^Żu ĆnÚ´ ůůů­V+Ý?_JwňČfK;kG¬m °ŢľęRÖ@jŁk@ĐQâ@«±gв­÷Lťöૡ͞ý:RvĘ({=1˛řL6"M d‰Č+C×ë`˝qQş…‚‚é¶s‰ŁŃ8/ŰH?Đłu·ÁÖŐŰí«chťK8¨D„)čRěZU4łiGsd˝šîذőW^‹§ŕëÁŘálPl ‚"9á)Íd‰hĘlÝmĽô±t[E$%%ą<',, jµZ ĆÖ××C§ÓÍŰ6 Šô gĎŠýť°uß„őV lfÄîvŹŻsG_ZŞˇI ŠcĎŞ†¶Ykę´˙ëíŚí@‡Y„©S€ˇ}¬šŻcg  ŠŃ@—Š  'ĺ""˘YÁ€,M‰­» – %.÷9—-p¦Ńh¤€¬^Żź×ŮŃ„(ČB˘\.‰·Ţľ*eĐŠýťłhú,ÂpvěŔŁspÖ¸•îŹvÎlrX‘ĚV`$Đ:ú9ZpÔ˙cPFŮŰz8–“q‘żČĎχV«ĹÍ×_G ›( 0 KDDDDD“&őc ćŕ¬J XÇ|ľV«Eee%€ŔźŘk:8& vşĎzű*`µŔÖy¶î6ýďL¬„k@SđjŰĄîĆs©€ÉÚk…h5„ŕ{&rT S2řJ~MĄRAĄRůŰDţŁ@€NŁ1áá.ĺ›d‰ČĎ)îËÓ"°"ť3]ǢŮ"őĂrˇb˙öŮç—>"6ć©™ëöÖ‘őž#¨8zr)±ż¶>3Äžv}öŔíđýăeÖNŐĬÂĚ4Äp¦«ŁME(„đE&Λ’A‘‰PÜ˙ôýŤćÖMďŔűď#33{öěqyśY"ňű“řäčt*6qŚ š%ú“.uNď}˛{ŔU“ľš¤0·×8dM&L&T*öÍéŕ(y€á€m°‡ç8‚¶ŽeŃiŮq?¬–1ë×ÎčöG$2% (4F ¶ NËÎ÷ ČC ‹Młż‘oc@–&lŕ›Źa˝Q'Ý^ň¸[ĆćX233ŃĐĐŔ^¶€ŮŮ#m§Ł\Â]ßs¸ląb@6ŔmŢĽ ­·q¬Ę„ Hţ""ŽDsi×®]8×Ô‰óÍÝ÷ŮŞŰŃŮ;€”„H¤&1‹-P ]Żs ĆĘÔË _=á×ët:—€l^^ű›b•Č; Č8ŤF~… ˛–đyßď–_ÂóožDL¸߼˝ 1JźŢŢ–›ť¨ię@áĘ —űŹ”_ơ˛jT7vR#±µ ű¶<Čž8FpŚŕáăt:ľ0AvÇźÇÜmÁ_źÇ‘ňË0÷X¤űc•xń‰Ą^íw§k[ń§űŽcß–ĺŇzÝ{ _Ö]Cď‰güs}Y×ęÖż¦ËéÚV*«FŮŮ&©ź®ĚŔާ—űôŘ3t˝—>–nËdCˇ}xŇwęëëQXXČţFDDDóN›€ć‹ź–Ő :\sŹeç}z[«۱äŮwQÓÔár˙ÁŐxîÍr"°oËrĽ¶s5R#±żä<6ř_2ÇŽ4«űáŞWJqđD5rŇUŘ·e9~ţňzě۲Qá ě/9Źçß,÷ËĎöÜ›ĺ8x˘zFÖí4ź®m•úéšěd ŐŤ¨nŮęĆv|t®_ÖµÂÜm‘._4w»¤9;R~_ÖµŽą>Ç{¶ÜěÄGçńŃąF´Üěty^ËÍNÔg˝]më’Ţót­}˝Ű ˛ÜÖí¸ďËşkü˛‰8FpŚ ÷nů%T7v`ëú¬1O^ĽőrH—ĺŹöe]«Ô‡FďçSQÝŘ.ő±»­Ďą˙Ž~î—u­čě@gď€ÇÇG÷gO}ľĺf§Ô×ďeî¶ ĺf'ZÚşP¸"Ă­4Á†éŇk}Ť­§}$+WBqďźM)ëŕ€ŐëőěPDDwQQQ˘˘"PĂć ĚĄyὓőHIŚ”2Ă•ŐŕЉĽőJÇŔÉÓ˙ú‰ÜěµđönYŽWß9ßîks’ĄÇö—|…C'j\jçĄ&Fâ×{sÉD{őť3€­YxţÍ“.ďůŇąxmçjŔ‘“—±żäüđďeĽ[~YzĎŢ/z>đkëâ—LÄ1‚cÍGőĹ'–ŽůśÔ¤(\z{›Ű_ŐŤířŰ_üÁĄŤŢĎ'c2ëóÔG?÷Ń˝#eÝëZöěl#žó¤[˝Ü˙»ó».iG?ýůËëĄţĽ4=gßÜ [ ˛°4=~äŔĎĂÚˇáěľÂáěšůÄrˇgęĺ¸ôY0vďŢÍžvŚŘ°"ks’9FpŚđ ťk’úËxFc[nvâ±}ż?y= Wd Ą­SÚĎEQÄŹź]3áí0w[đôO`î±ŕµť«±u}îôXĆ\ßcŐŤxmçj®H—úŇÁŐHIŚÄKOäâ·űź”úók;W#51R+6řKÓăńó-ëQ¸2egńŁwÎHŽžěoßů^,\ŠÔ¤(D‡+Ćý,~m?Ѳ&;ŮgľgQ!ÂČCfÁ¸ÁX[wőĺ(mT 3#›7oöř<µZ--3 K45íoDDä›X˛€Ţ»ĺ—ÝŽ Ę‘“—ÝËÎ6aMö"üřŮ5HMŠBjRŢzĄk˛ą<•śGJb$J÷>.”®Ě@éŢÇaMâ8`ÜV°©IQ(\™}ĂA ÇÁmjR”\IMŠÄÚśäqłežł-m]Ň߼ű1j6ŔÔÚÄK)ŕÇÇĺĎ#8FřŠ»=9x˘Úe?ʉP"7#G÷=ŽĄéń8TV3©KöŹśĽŚ–¶.ěŰň ^z"1J—>y¨¬FĘ/;Ű(•Yxé‰\¤&E!&B‰·^)@t¸BĘš]›“Śčp˘ĂX›“,ő›Wß9čpJ÷>&^ WfŕÓýOJŹŹöĚz~üěĽôDî¸u©ž¨ĆéÚVéÄ‹ŻpĆk _=îóĹÁ~ŘĚšż7ĐŞÓé¤eŁŃŢŢ^v(˘Išh#""ßÄ€,4{Ć&lX‘îŘşŢ^OńШ`HŮpPÔÓdŁł^N×¶ÂÜcAáŠt·`ă`îĚE÷zŤkGeľD‡OýŇÄçß,ǻ嗱u}Ö¤2ŠČwÇçlWŽäËĆš\n<5ĂőŹ='·×:vôł‰(;ŰčŇg=őIÇÉ Ç{oőPgůzÉł8şďńqÇŠ–¶.¬ÉNv;±‘š…5Ů‹ĐŇÖĺL=.x˛żä+ĽúÎ,MŹ—ęîú QĄeY|ć´®›Y˛DDDČBhD÷$'{śŔ”% \ii)[`1 X[ŕuÍ/ă8¨«męŔźî;îňXL¸-m]8]Ű*eŁ8fUö$q<÷PY •y.­^ÝŘávßtd¨9×ŔŰş>ËcťK"Žţ9FÜípŽţ­¨¨¦žAXz† \¶Ĺo?Gt¸bJŮ/ë®!e¸ŔX}čNŹeÂëëěµoâż|gĚçÔ4u pe†”;şŻN„ŁsnFüŰţeÝ5´´uMęďĽă¤ÉšěE(Ýű¸ĎŐŹufíh€\ýŔ´­O«ŐÂh4°Oěĺś5ËţFDDDţ. Ŕ6A@â¦MÍrO`@6Ŕ öËXűe-ó͡ŐW %1Ň%ËR#an˛ŕHůĺ ]8úŃqÔpś--7;ńôż~‚ęĆ—‰F(°őööâäÉ“Xż~=ÂÂÂ8FpŚŕá§Ątšěd|t®Éĺ„…'Ď˝QŽ;˝Ľö˙­–ę¨vN!;–¨0{لߗ đ$uTřNŹeÚźŽŕôDË8řËIç’C×j¦5 ëś)R__ŹÂÂBö7"""š7Ą€uş¶-m]ăč,Üň6ŽśĽŚ}[–»Ôeliëě¶ľ±x:ý˛®U:Pś.ćn ţtßq´´uáç/Ż·–ů—ÉdÂ矎ݻw{Ľä8FpŚ ŮR¸2ťk¡˛ę1˛-7;qääe¤$FJYŁŽLRs·{PÔQZ`¬ Úń¤:˝‡óű_mď’NެÍI3‹őGo‰ÎŢü|Ś’Žľş¶ű<$ZÖ6ŰłÝ'’ánL.橊/»Ű!öwB™žZÔŁëČÍ'¬!Këc˘ž•cĎ*î¨9縧peR#q äĽ4 ă î˝“ő.Ż]›“,ÍÄ>şfÜéÚV<ş÷8ť¨™ÖĎôŘßŰ-Ą{c ež1™L€ľľ>±ŢÇŽ4§¶,ÁŇôx”ťmr«µ Śd€&¦sô!8TVíö|©O®Čđv8˛Ď÷—św{ěů˙<‰G÷—˛×× ×pwĽŹó{żw˛Ţ­„ŁÄD(±&{ľ¬»†ęĆv·ţ\ÝŘ +Ň'´ÍĎ˙g9Ş;đó—×űE0ÖůękGĂ´­WĄR!44TúŰćř;GDDD40C–’ąŰ‚ŹÎ5!%1rÜ 5^z"‡Ęjp¤ü˛tYďŹw®ĆćźŕŢgßĹÖ‚,Üé± ělDnŻw˝ĹLYŽDD3/8‚\ qȱ˙lÝmŠHôzµaaa‹‹Ă­[·ŘëČęt:¶7Ń(*• *• !l "żŃ  @§Ńđp·cud‰F)Ýű8•UăÝň˸ÚÖ`¤ŢăTjŰŃĚRÜ˙4–§E`EúÔëŮ™L&=zÔŻ>7˛#höĆ"‚â3a˝QŔ>ą—§˛A‘‰PÜ˙46ć©™;ˇőęt:TVVŚF#˛Dí“SčoDD4{nxŢ™™™ŘłgŹËă ČŤˇôßoó,6ńÉ1ĐéTS^GGG‡ß|ŢHőČĺµ ĘrŚ Ů#%Ždm߲‚<˛ŘhŇB“6ˇőj4) [__Ź‚‚66ŃLĄż‘ď`@–ČIH¬ —GúěöE,P¸ÜfP–f,>sFĘ8Oěe4ŮĐDDD4/0 ŕ6oŢŚ†ÖŰ8VeBPd"„č.‚‚· §Żó&(Ë1‚hvíÚµ çš:qľą›ŤAţ÷7re &Ëůď”ÉdBoo/ÂÂÂŘß(°W± ›FŁ&}1d±)ä,N¨AYY°RPÖ`0pŚ ň!:ťńÉŦ°1ČďČ3Ąe[Ç·Ó¶ŢĚĚ‘őęőzö7""" x Č©e‰&ÂQ¶€T¶`:8gÉňoÍ Č±‚˛˝˝˝l""ňţŕ!~$›učZÍ´¬S§ÓIËőőőld""" üßTl"˘Ŕ'‡jÉHý˝ľľ>”——łaČk3Q¶Ŕ9C¶ˇˇŤLD4JEEŠŠŠp@ ›( 0 KD`nۇ¶šé¶Z­FAA†Ľ&‹Ď”Qěe ¬ŢPU*âââ¤Ű,[@DäĘd2AŻ×ă*3›( 0 KD~ÍrˇgŽ˝Ť˘˘"6ěÁXă:ĄŰjµ»wďž¶«‰8F‘,a±´lm ČÚşŰ`ąP‚Ň_Diié¤ÖÉ:˛D“ăM#"˘ą'g‘_˙5`2&6±D#fçbŃRXŤŔ%CVě‡Íl€Á „O.ďCŁŃ şş ×둟źĎ†&‡7ýŤćGn"˘Ŕ`,ÍÚDD˘T¶C–i)[ ŐjĄefČ‘ż  EÜ“śěr%3d\ii)[`1 X[`˙MDÓ¦ďÖl¶Y{żđ$…Ű}Ţc9FÍ®˘˘"zaé‚rŮ6ů-YÂâ‘,ٶ{mY/čt:iŮh4˛ż‘_K°M¸iBł˛Üg@6Ŕ ší3ŕŠýl˘ití«N.÷Íę{†ÄĘY/Ýö63–cŃěŇëől c•-đ†Z­–‚±őőő.AZö7""" $,Y@D4E}·†fý=űo[Ąe–) "˘9;˛Ηó1JDDDڞDDÓ@­V#44tĆÖßĐŕz Ë`,Í5·˛ ł˝ZźV«Eee%Ö‘%""˘ŔĆ€,Ń4Ř´i“×—VŽçůçź—–Ś%""ź8U¶ŔŰ€,3d‰hľ`É""?Ă`,ůÄĨ˛¶;­^­OŁŃHW›ôőőÁd2±‘‰(0G± ü±DD4×äNY±VSă´üms`Ů""" T Čů!c‰ČČ-•–E/3d¸”˙©ŻŻgČĎĎÇ®]»đ €\6Q`ü†bůc‰ČW!Q" v·OËúśëČ2C–ČNĄRAĄR!„MAä7ú´č4îň`@–üśâţ§±<-+ŇŁúsŞŐjŤFc‰8Fůů˘ĄÔ—„čdlzęId&ÇNi]Z­VZnhh`ăŤ!(2ŠűźĆĆ<Ő”űÍś›Ţ€÷ßGff&öěŮăúű‰MDŁÉbS0tÇh˙!Ü!b±ŠmB3ݵS”–…„IíŻńÉ1Đé{GÝ˝{7 4Íśc9FÇ"régń™R@VĽÓŠ„…ÉĐh¦Öç‡[·n€ô·Ź\ ňČbS I_MOÔůÖ%"źĐ7(:Ů)Ů Pu:3c‰cÄ<#„ŕ§v¸3Ьhít:`L.[ŕpĺR­Wďí\GVŻ×óË ""˘€Ă Ů·yóf4´ŢƱ*Ó]LK?ŞˇŇň­>¶!Í8F͇1b×®]8×Ô‰óÍÝ~MPD"lWĆ;6ä,q‡ ×;ŕ”•.ż{ŐBç˛ —j?]7ĺ÷Öh4¨¬¬ŕ]Ů©ô7"""˘ŮŔ€l€Óh4čW¨ k źř_ĚČeaßšlxŕG3Żć†MZ–Ŧ°A8FäˇÓéđí€ ˛;ć żFŤ‘–ݰĎŃ,©uęsAQIw}ľsŮ‚o/ס··wĘWu¨ŐjiŮ› Ů©ô7"""˘ŮŔ’äľSD$BŰ/˝ŐçZ·Źh6üd‰Z6Ç"ŽŽĎë€ţöÖ¨ň D3 oPĵ.§q?ćî5\G—-¨ŞŞšňű;—,0™Lčííĺ—BDDDu\Í& Ź;F|¦´|ŞŃơuĹdC˙Đđ e‚"Ů(#8F 誽Á€,Í,çq]VO¨d`/[ŕŕM@23GţÎxS¶€Č'Ź©Ůä‰la¶´|ŢhcťHšQ˙UeŮ÷łA8FqŚĹ9ĐőáE+łdiĆô Šř˘Éćqß»ëß§“uŐŐŐ^e¶j4#YąśŘ‹(09źx±—äń-Γ:ŹIsˇ˘˘EEE8 †»Q@`@–<˙ ŽM=RżëŘĹ!6 Í/šF‚y‚\‰ŕŚŐlŽD#F‘«”Q€ţ!f¦ÓĚ9Őčš‘.w:w7BH”ËßÇÄ\Sáü¨ŻŻçC€śűąsđÓÜę4ĆĹĹMą&öt1™LĐëő¸ €U±‰˛4&çŢÚ®ŮDÓˇµSÄ'őN™oę&|Y¤ĺB Î{EEElPŽÄ1" Çç>÷iŤĄ hÚ}e°áÓ†‘±\ˇ+ÔëmÝmŔŕHVlEEĹ”·ĹąŽ¬Ńhä—C䡿Y.” ô—QZZę—ź!>>^Zţ¤Ţ·®ţřŻŞ‘dąÎŽ%˘ŔÄ€,ŤI›™z™tűŘE+ţhÚô ŠřIĹtćYH€<ĺ;“˙1j6ŔÔÚÄË9FÇ€#ä ł]˛ß«â¤z4mZ;E|xqäHPüb—!öCě˝%Ý6Ť0™LSÚ•J…ĐĐPűxĐ×Ç:˛Dú›Íl€ˇů[żí«V­B\\{6ę/ţčAŮc­¸bŮŽÂÂBîpD4íĄq)´»üýâŹCřĘŔ,8ňţ ďźĘť-r%”K˙|Ň™oÄ1‚8FĚ7Ęܧ¤ ľú‡€źT ůdÝ=ň/µ7ÜO€(î}|ĘëĹ‘@†7“{iµZi™Y˘Ŕ†^xAş}Ĺd˙ű˙i WLâ¬gŻěW{˝öĹ ËU_6l`†,MIŤ(âžädŹăśMŘJKKŃĐŘ‹yÁÚ‚)ÍL­Ě} – %»Ű˙UmEk§?Ő!4X`#Ó¤|e°áË֑út˶@‰băpŚ ř1˘¨¨¦žAXz† \¶eŇŻä!PÜűg¸PqČ‚ţ!ŕ`ĄďJçyvšĽ/šl8ć”;'@ad쯨¨@AAÁ”ÖŁŃhP]] `jYoűÍ<ŤFíŰ·ăđáĂě'ťËÍĄÜÜ\fÇŃ”%Ř&HÜ´ ˇYYnŹ3 ŕ űe,€ý˛–)ý¨–‡@ąl‹KŔĺ‹&joŘđN†ĺjŇÝ]1Ůđ[˝ÍĺňA®DpÎĆ)‰cqŚđGÓQ:!("Še[`©ţ°ŘgA9vŃŠ/­řË<«Řçčîjo8vqHš4pdĆţŮ´žq”-P©T“~­s†ěTúËů‡üü|h4űDÍčĐĐPěرyyyürhĆ0 K⸠ęOÂzŁ€}ćÉ÷ެř¤ŢŠ{T–.B\€ä(fÄ‘}˙¸Ő+˘ö† 6\ëµO ô1Ë1‚8FpŚĽ D„¬Řár"äVź=[6.ÔŠśAX¬„¸P¶Ů÷ŹÖ;ö“µ7l.X˘Őö’ÓT$<<===ěe ¦’%ë|y'ö" lŤ˙đ˙€ŞŞ* Ô××Ăh4˘ŻŻoVŢ?33ŤńńńČËË›ŇI$"˘É`@–&Ě~™äăŠMÁ`ă)+çVpË(âĽŃĘF˘‰ Á«ś±¶î6XŰô°Ţľ X-ŇĄ“4Ď)Ł „D!(4˛ÄLČâ3Ů&#8FÇěs íĂ€öaXo_…µ][WÄţN)[ťŘç„(E&B– …,6eFßN&“!77Wš”Ë›˛•••ě˛Sť ŚČ—0 K^ ŠHDPD"‚ŮDÄ1‚hÎÉbSf<ŘF4yyyR@vŞe ś'öbY""" ăd6M·ĽĽ<„†Úg•3Ť0 “^‡óÄ^&“ &“‰ KDóN~~>víÚ…gä˛9(Ŕí9u ż¬­E§%°Ëp1 KDDDDDÓ.,, yyyŇ튊Š)­'3s¤¤ÉT‚şDDţNĄRA§Ó!@4›ś±« ˙\Y‰Ą‡ăąĎ>ĂgÍÍ~ů9ú\peŚ“Ň,Y@D~Mq˙ÓXžéś8FÍĄ ČD(îóTČLŽ`Ď’uÔ€­®®ĆćÍ›'˝^ťN‡††ö€¬s—ým¤żšĎš›ńYs3Ô‘‘x$- ?ČΆ:2Ň/¶ý&€÷ŕý÷‘™™‰={ö¸Žăüz‰ČźÉbSźśÁY—‰cŃä!Ŧ@“ľX*5ŕ\¶Ŕd2y]¶ ľľž M4F#" TĆ®.ü˛¶«KJđtY>Đëýľ¤˛DDDDD4cĽ-[ŕlrdĘQ`úß«Vá/´ZD)?{ý:öś:…Ő%%ŘsęľéčđËĎÉ’nóćÍhh˝ŤcU&E&˛AcŃÚµkÎ5uâ|s7ć o˨T*ÄĹĹáÖ­[ěe &’ČţFDDäǿŻ[Ŕ^˛ŕúz|ÖŇâöĽÎ| ×ă˝ęČHü ;ŰČU*ýâs2 ŕ4 ú*ČZÂŮDÄ1‚hŽét:|;`‚쎙ŤAó†ŁlA__źT¶`˛—Xk4šIdŮßüŰ#iix$- ť >knĆ/ëęđŤÉäö<ÇD`˙\Y‰GŇŇđZ-IKóéĎĆ’DDDDD4ŁĽ-[ŕ\ZŻ×łA‰ć‘(ĄˇÓáă?˙sśŮ˛÷ŞTc>÷łćf<÷ŮgX]R‚®¬„±«Ë'?˛DDDDD4Łś˛ŐŐŐ“~˝Z­––§21ud¤K}ŮGRS=Ö›őő‰ŔX˛€f”·e ś3dŤF#z{{Ɔ%˘yˇ˘˘•••č°třŮý 'o=úč¸őfĎ^żŽł×Ż#JˇŔ#iiřAv6źÓíf@–fśóä^ĺĺĺرcǤ^ŻV«a4Řłdť´Dôz= tâf¶¨T*ÄÇÇC«ŐB­VűÜ “É$•kIánBä‘s˝YÇd_ŁëÍúŇD` Č‘_ł\(Á™z9.}ŚÝ»włAcѱu·aP_ŽŇF23R±yóf—Ç ¤€lUUդׯŐjĄ€¬^Żg@–ŘßĆéoţĆd2ˇ¸¸Ř'jD«T*lßľťc ‘źŠR*ńśü 'ßttཟ57ĂŘÝíň<ç‰Ŕ>~ę©YĎe@–üűǨٓ0±)cŃśűa3`0!ÁîSUh4ÄĹĹáÖ­[čëëCUU•KmŮ»q.qŔ:˛Äţ6~ó'UUU(..F__źOlŹÉdÂëŻżŽ‚‚lÚ´‰;‘»7>˙;>˙;?_ Ξ˝~Ýcćělc@–fE^^Nž< ^d}!‹ŽĽçČŚuĆ®M  BrÔěmÇ­^ŔÔ+â‹&ú‡ě÷•——CĄRˇ  €_QpgżéčŔ řem팾_Ť(BˇV{¬›Ď€l€+--ECc ,ćk ‘ČF!"ŽDs¤¨¨¦žAXz† \¶… BóN~~ľK@v24Ť41cr0•JĹţFäÇ~úÓźJÁŘEQžÉ“!9Jý JÖeá˝*+ęnŠ€˛˛2äĺĺŤ;Ö‘ď3vuá—uuöŇ]]łňžI¶ 7mBhV–ŰăAüZ›Á`€ˇů[ŘĚýl"âA4‡ôz=L­M°™yą5ÍOޞ¤˛“ˇV«Ąĺúúzö7"?VQQ!Ő…‘;—Ëç&ë$4XŔÎĺr,ŢŽľľ>”––ňË"ňCŽÉ˝˙ď˙Ćę’ü˛¶Öc0vĺÂ…PGFÎúö1C–fŤ7e t:°Ž,‘żsîĂĄ!.Ôw¶í©ű‚p°Ň Ŕ^VüÇgÍÍř¬ąŚSŢ(JˇŔ_hµřANÎścd‰hyS¶@«ŐJË Čů7ç>ś/řÔ¶-V°dYĽDä»&Z’ŕ^• ?ČÎĆ_čtsľÍ ČѬq”-¸uë–T¶`˘Y˛Î“b82e‰Č?9÷a{Ô·Ü|{Ëľ\__ťpČÝž/ľ7ĄPŕ‘´4ü ;÷ĆÇűĚvł†,Í*çěd˛dä´ŔÝëČQ`9{íŚÝÝŇí±‚±ęüűC´5R IDATáĚ–-ř÷uë|* 0C–f™7e t:*++íaF#łÖś±« ü˙ěÝTÓwž?úgř‘ & ŕŠ-‰żĄ¦?,3SSéévzŰÎş"ÚÎtzçÂÜťŮ3ý3űťéŮQ{ÎtîÝN»;söH»Ýq¦ŠÚîµcضë]h-Ř©±Rh5ˇU41T05(„Éý#$$$@ř™_ĎÇ9cň $/ňů<óĘëm0ŕ-aŇnXءP`‡B-ŮŮ}źČŃ‚?¶ ąą………!_×ČrŽ,ĹÂÂB( ܨ©A:ËAqÄł8שÎÎI·Ë‹ń܆ ءP M$ŠŠűĆ@–ÜŁŹ>ŠăÇŹpwɆČćććzO&YA™(VHĄRHĄR¤°< t˝Ą×ŁoppŇm[±;”J<¶reÄÝŹÝúL&¤/^ě7` KDQNxď.ÜżRŚóŇX "â1‚(Ś$YŢ» OH‘źłtĘí Ľlkk+îÜąÔÔÔ)Żç;˘Ŕb±„|=˘xŢߢEUc#ÎvuMxy®XŚJ%v(Č•H"ö~ÜpŢ|ůůů¨ŞŞň»ś,EµÄĄË‘‘“ĄRĘbŹDa$HJAâŇĺçÉ _6u@*•J‘›› “É`z]˛ůůůŢÚ ß"aDÜßbĎ™Ě=6FfÇ'đWJDDDDDáŕŔNgq/ߏýqŽ,QlJ ńÜúő8SZŠŁMĚ„±;dc^II :®ß‰ $H˛X"â1‚(Ś***đŃ•>|Üig10»± ˝^ŤFĂýŤ(F¬•JńÜúő1ŔŽÇ@6ĆÉĺr ĄHĽşĹ "#ÂL©TâËA {­,f>¶Ŕ·CÖs]îoDDDŃď€ZŃłaç Y""""" ›ÂÂBo—l¨¬T*ŢE‹Đßߏţţ~ŤĆ€Ő‹‰(úLĆ~~ó&ÎvuálWúŽ€ëlÉÎĆ™,jÂ\˛DDDDD63[ P(ĐÚÚ d‰bÔ[^Öé`˛Ů&ި« o Ü‹=ŻRaKvvDß/.ęEDDDDDaă[ŕęâ^ľ¬aôEQ,jnnFuu5Ţđ)ËAq˘Ďá@Ů©S¨jlś<ŚçlWvŐ×ăő¶¶ľ d‰(¬|Ç„Č* ďiŁŃČ"Q̲X,0 ¸€S±)^쪯ǩÎÎ /_+•"W,žđň[ZđbssÄŢ?Ž, ˘¨ć8_‡3ú$\<•ŚĘĘJ„xŚ §˝C†Ó8vYüU+PRRňuÇŹ-°X,JĄ“^GéłňňD {q#ZXŰ·oGqq1Š‹‹‘á§Čőbs3>·XüÎۡP`‡BtAźĂł]]x˝­ g»şĽçżŢŢŽÇV®ŚČńě%˘č~2j5Ârý ?ŞHDĄR‰}űöˇ±±{÷îĹÖ­[˝—566z«©©a×l3Ůlčs8f~}»=˘ďY"""""Šł[ŔY"˘č˘ŃhP]]Ť“'O˘˘˘Â;ÚŔfłáČ‘#ě–ŤC[d2ďéSťť3ţ>ľ×];Ĺ"ˇáŔ@–"ĆtÇ( ď鎎( eggc÷îݨ¨¨ŔîÝ»Y8ć»×ËçĎϨKö¬ŮŚ·|÷śÍâ`ó…,EŚńc ¦ę’MMMĹ]wÝĺý?»d‰(Ö˘˘˘{lŠÁűg6›QWW‡íŰ·cĎž=8rä÷2q„/ĚDsĎ7<5ŮlŘU_“ÍňőOuv˘ěÔ)ď˙sĹblÉÎŽ¸ű™Ä_5E’ÂÂB´¶¶ZZZ Ńh&Ý^.—ă믿 ż1DDŃN*•B*•"%†î“ÍfCSSŃŘŘčw™X,†FŁAii)˛#0HŁů•+‘ŕąőëńz{;ŕs‹Oüçb‡R‰ÇV¬0\=Őى· †€1Ôę°ÜŹÝúL&¤/^đÜ„,E5á˝»p˙J1ĚKc1Ǣ0JdAxď.9 ×hN°…#·®!ĺgĘÎÇů:śŃ'áâ©dTVV˛ qFz:0Řv"ř…Ă w4Ŕeë†pí,Ź4†®|á+N¸Ď ~rÉůŰÄ7BćśÓŢŤ!Ăi»,DţŞ“§Ó1~l^Żź°ÓG©TzYŁŃÉZ˛ÄýŤh!Íu [VVĆ@v”J%<čiP__ :{˝­ /Žľ!;]}¨jj‚ÉnÇóúřa K4Kßń c].÷_ď ŔĎßhs) 6×OF­FX¬?”H‘Č54ŕîŇó=/Č1bä«vŚdć#13źEă1‚fóű¶Ýđ c].—ßĺž}n¨ŁÁ=6D˛ŚE›ăcžÓj„Ń ¤$ĎíÚÁ=ô---˛ …Â{Ú`0@µöQţbűŵZ µZŤĘĘJ466˘®®ÎŰeí kłłł±k×.ěŢ˝;ân˙[z}@›+c‡Réž+ú]Ö78łf3NuvÂd·{ĎY§ĂZ©Ź­\q÷‘,Ń,_ř9o~á÷ÂĎóbo|ŕâ´1ŇÓÁŔ…(Ž }Ńŕ÷‘éńÇ_ßÁ˘Ěź˛hDł0ŘŃŕ·żůîk.—Ëoşň!DżË˘E‰ÂÂBo {áÂ… ·ó]đËd2=UDD ëÜąs,B 6oÖ3cÖl6Ł©©)âŮ>‡Ă/ŚM q@­ž2T}lĺJüް0 łöĹ––°˛)ä.„ąąA$e ăŽ;†ŽËWá°"YQ„q‹2‡śVŁß˙Ç,ˇ¬˝›,ńOÇ[`÷Ľ/żŔhŘ×@/G›Ä¸ęęjXnÁq{źš®Iţ.Źß˙Ć˙ §Č&—Ëq×]wá믿F?.\¸€‚‚‚€íRSS‘›› “ÉřóŃc8™űĹ˝ěěläää ŁŁ#ŕyQ$y˝˝ÝofěŃâb¬ÍČůúĎmŘ€µR)vŤŽk0ŮlxKŻ_đy˛Ë<# kçN,Z˝:ŕr˛1Îh4ÂŘůĄűEĘĐ 2Çśý˝ÓÚ~äÖ5$ç}“…##â„ď8“`Ozď©ÎNďéçUŞi…±[˛ł±CˇŔ[ŁĎ˝?·DŢ3˛Dł HN™Ööě>$Š3I˘ŔUŢ';¦$‰X3˘™ţM1XőëšeU¦3¶ eôŁŠÁ>ť@DDóKŻ×ăŐW_Ecc#÷BS~áź^ŻGGG¶nÝ ‰D¢Í1›Í†úúzÔ××CŻ×ű]&“É ŃhPZZ‘µ÷ Oź[ż~Ć߇,Q ó XCyÂź°/üâ‰@śĺýuHÇ.0D4óý-9HIú&Üçüć8óMҨęŘß…˝Ć-,ťN‡ňňrżóôz=ĘËËqđŕAo(ŰÔÔ„ÚÚZH$8p ";5ŁQWWŞŞŞĽa¸Ż­[·BŁŃ@­VGĹ}É‹‘&šyĂJn„ý\Ž‘h6;PşÜýâţ‹†řţë}!$Bâ7ÖłhDq$Y~_Đó=‹ ů†C‰2f+I¶Á{Ú3Äw,o8—Ä}.*ů°uÉ[8Fmm­÷´JĄÂÖ­[˝˙ßżż÷´çŁó6› UUU0›Í,Ţ ™ÍfÔŹÎKő,Üĺ!‹QVV†“'O˘şş:jÂX~sdgÂ4n Ŕz>>_YY‰B,ĂfłůąZ­ëëë±gĎlßľZ­Öďr•J…˝{÷˘±±eeeČÎÎŽšű–+p˛ł U}Çl‰ŔűĎ@–h–—.‡pĂSîY‘Á$‰”÷M$fň…Q<®yÂĘ#gA´á)ľaC4GDź‚ }âIAş˘ O±PQĘ3¶€wlA0Ę^I™Ć/őĚ(ő=&űŽ*đP©T¨®®8ź&f6›±˙~lßľűöíó›+‹QZZŠ“'OâŕÁ~żhň܆±O<˝<đOWźĂáwÝ>ăŚ"gČÍÄĚ|¤HžĹеspÚ»á˛!H—#Aś…$ŮzÎ…$Šc‚ä6~#=ąŮ§­®áR– )3ź]zDs˝ĎĄ,AĘćR wµa¤§N{7÷Ü÷ÄĚ|ż±ť ü÷ 6GÖwlďŘ "˘hTXX…B55HŹ‚Ű¬SĄRA§ÓyGřž¸;>ÍfsTur†C}}}@7¬Bˇ@ii)ÔjuL,¶CˇŔëmm0ŮíxË`Ŕ™ ;¦ńFkźĂ]őőޑϫT9O–,Ńľ*ŠX" *13źťňD ů$W¶ák ľěť;wššę·Ťo Ë…˝(ÚIĄRHĄRDúç©<áŞÝn¸Ěłŕ˘§‹Ö—D"ÍfCWWىĹb¨Őj”––ĆܧBŇD"Ô>öĘNť‚ÉnGUSN]˝Šç7oĆÚŚŚ Ż×çpŕÔŐ«x±ąŮĆîP(đ|ŚĐ  ĎdBúâĹ3îČQTŢ» ÷ŻăÁĽ4xŚ ŁI„÷îÂSRäç,ť·ź#—Ë‘›› “Éä[ŕ;[Ö^ř.śč´w#Ágľ0÷7˘ůˇP( ×ëş]e2 ««+ŕ:¶_|)’Čd2TTT@ŁŃÄD7l0oéő0Ůíx,/Ż·µNuvâTg'r%äŠĹX›‘4ˇpÖlFßŕ ßĚX“͆]ă:ŠůŐCMöÎÄ ‡ŕÍ7‘źźŹŞŞ*żËČQTK\ş9éP*Ą,ńAF‚¤$.]yž ňe©óúł qüřqČŽç´1%îoD ˇ´´ű÷ďÇ«ŻľŠ˝{÷zĎW*•‹Ĺ0›Í°ŮlŢ0Ńl6{·Ź.ćD‹Öą°Óń–Á€łA‚{Ŕ°šl¶ //Ôí<µ ‰,E•‚‚o ŰÚÚtlď¨ç­k€l= GD4Ď4ŤßśÓŠŠ ořŞR©ĐÔÔťNµZ ›Í†ýű÷p‡±\qjZ­őőősöý *++YŘ0` ăJJJĐqýN\° A®"â1‚(ś***đŃ•>|Üig1fA*•zÇÁ»d˙öo˙oľů&ŔiżÁ˘-˝^ĄR ťN­V ­VëíŽőŚ+¨©©Á«Żľ ˝^~Z­fńBĐŐŐťNÓ÷q­tá?Ůć°ČĆ8ą\ގ‰WłDÄcQ)•J|9hAbŻ•Ĺ šĄ©Ć<účŁŢ@ÖeďaÁŔž={Îó ^˙1»4§C&“yO› žĹÖ"ÉŻ¦C+ČQÔ elAZĆ7Đwó+ŔČ­kH\şś…#Ša­—{°iU& B Ĺb1T*UL/P5×4M\Ě‘Ť d‰(ę„2¶@šťç dť dç…Ój 8/!]ÎÂĐĽ»zŁo4\§/áj·-ŕňM«2đô¶ŐŘłm5ŇĹ"lěÝ»—!QČQTšjlAzfŽ÷´ÓŢ÷őrÚ»a‡ű´­ńśľ×ĐŔ„ŰÎĎ+QÄţóëÉ)H,s˙'Q46ß>ȶż¬vĘ˙ĺ4´gŻLş]ëĺ›h˝|?{í ž)ZŤ˙ç˙řVÔłÍÍÍhiiÁ€ŤŁ_‘ŠaěÂ1›Íčęę‚L&Cvv6 eČQTšjlÁ’L™÷t°NÎXâ´á€ëvŹ_Ŕ:ďÁęL ;‚ţNFz:¦x;Îz\ÁâL÷ivćĆĽÖË=řÎ ‚őöŘczÉb!6će6ćeŕÓ+7´ŹÍ)ýăéKxżí:Žţň;Q9ŇŔb±Ŕ`0ŘçOz˝/ľř˘ßl^ŤFŠŠ żŃUUUP«Ő(..fŃ"Y"ŠjŽóu8ŁOÂĹSÉODŔ¤, % ’,RŇG˙]Őż÷HŮßÂÍjwř…±ó2đăíˇypUĐÎW«ÝíG—ń뺏q­Ű†«Ý6ězé]´Ľ\Â󨼼|Ć×=xđ 8łŮŚýčG°ŮüÇthµZÍfo őz=ŃŘŘ­V‹DÍśŢ]Z-Îvu-čĎę_YY‰ĆĆFo§¬Z­Žřű¶Cˇöř€łf3úńąĹ˙sqĎ­_Ź4ŃÔoľäÎCP˝ Ŕ3˛vîĢի.g ăŚF#Śť_şźřŤ[9•Ǣ…ĺŰBDs§  ŔČ677ٍ¨(čţćí” §˝#]méů"¤™ŻwIÜÝ®ň,w÷ëX7©€żđIČGß[v×ËżVz#`ęq‡¶úk® c\ö Ű{Ł» ÍĐ&Ę6đŤë0Ňž˝ Ŕ=7vŲ™ŤI‹đíő9¨˙č ´g/3 3ĄR‰ęęjTVV˘©© ?űŮĎpňäI&D …Âď˙2™{Ë`##Ôj5ęęę`0˘#U*§ĄŃ€şĎáŔ‹--xkô9ŔŮ®.-.)”]h d‰(ŞůŽ-0™L°Śë( pôFn]CâŇ…Y§|¤§ĂýułvLşmn& Č aĄi ^çšRîl[úÜ!­ń† S𑞀vب’DHĚČGb¦ű‹NŰ÷>­Ů’7«ďłiUę?şÂ‚F•J…¦¦&Ífżů§śX,ŕ~“ß·VJźÓfłůŤ&§1i"¨ŐH ńz{;>·Xđ˛N‡_Ťź/ČQT 6¶ŔWâŇĺůŞŕşÝĚc ëčðńc wµOÂćç¸ĂW…ÜwÎ+CŘ…$M ×Xç®»gˇ1ŃÝAŰq}ÜďcŘ‘ŻÚ1ňU;)Ky’ä÷C% ĹE3Ďb^KĎ®ËíŰës|ŚÚÍ,j„đý˝§Ë“&¦V«ŃÔÔ„ĆĆĆ€đZĄRA§Ó„µz˝>îęô«ÂBśęě„ÉnÇëííxnÆyK0 d‰(ęŤ[ŕ+!m™7uöÝ—ź?ňU;†®}<á6—öÂc_ľsh]˝6ę0lÔ!!]ޤě HüĆzqž-Y, zţ_]'Č@ IDATýň>h7ăŰëłńß/=ĹBE‰D±XŚěěldOsnh<Ňh4¨­­E}}=Š‹‹ý:c t:ôz˝7µŮlčččËZíP*ńňč‡SťťxnÆş} d‰(ęŤ[ŕK°8Ó{Úw§Ůr `¤ë3 Ďť {—Ä…‚{€ÂőČłŔFĺăĘ »gĎ6·»pá řÍźuZŤ´!¸ü!’ä÷!Q¶Ž‹Í“kݶY]˙öë€ĺY\i>R”••ˇ´´6›ŤĹŃľ}űP^^ŽýčG¨¬¬ÄćÍ›‘ťť •J…şş:čt:ěŢ˝fł5550›Ýáń6b‹OÇ5Y"""""˘y0~l/ß™±®^¸†f w6cčÚÇc ‰ÜŢC륜!l,‘g%Ű(Ů6Ö5Űňü[C§1tĺ ’óľ…$9gaΕĺY\붡őňÍY}Ďő7će°¨ó$آRˇb‡ěÔôz=ęęę ‘H`łŮ°oßľ€mšššpß}÷ůť'“É8ź7Â0%""""˘ŕ;¶ŔĺrA  DKráęuwÎ:mÝ3^ŘkäÖ5 uśMp—ÄÍ7(¸Ű3–bú±6Ú9«)šŰÓç}F ;0ÔqĂĆsH^óť[D.–=Ľ>o4\BýGW`µ;.žţ,Ů÷Ű®{ôzxCNTÝ˙ÂÂB( ܨ©Az„ßÖňňň_÷Üąs|°OÁ3?v:d28wµ2Ex×5Y"""""Š ľc |ĂXHdaÄČŢş6íĚ5<€á+bŘčßý•źăÂŁ÷ Pp»aă‘4ÍĘj ćĎí‡.ď8×@/?9Š$ą ÉůE,Ö,h¶äፆK€ßi[ńBéÓşţŐ}ř‡?Ŕ=‡öém«Łëq&•B*•‚ďőŕX …ßü؉( ¨T*H$ń7¦ă-Á{:M(\đź? @źÉ„ôĹ‹!—Ëý.g KDQMxď.ÜżRŚó¸ş-ńAN ’,ďÝ…§ ¤ČĎY–ŰššŠ‡z---—%޵#¦óÜ]®ÉÓřľ®á8Î×ůuĹz:b ×1%·Âu@á:Nź´ÍcłĂFFn]hăß@’3űŰBŇlYĺ[𻓟âém«±bŮÔµĽzŁo4\ÂďN~ ëm÷/ä'Ű7ͨÖBsđŕÁ¶ÓétĐjµčęęÂîÝ»QQQÁâ… ¸¸eee,Ä$úĽŘŇ‚ł]]Ţó¶„aĆ ‡ŕÍ7‘źźŹŞŞ*żËČQTK\ş9éP*Ą,ńAF‚¤$.]yž ňe©a»AŮń2ďi×ížżźÓŢ Çů:żY±ŰîuAS(ŕh Şhł{śÁ±Zżí–µ÷ŔńéB´ątNüŠ”ým!˝Pz?Ę_i€ő¶kţĎ?†tťßžlĹď´źz˙˙ô¶ŐÓé uN©JĄBii)ĘËËqäČlŢĽjµšśB<ĚŮ}K݇ÉnźŃu?·XpÖlFßŕ ßů;Š»ź d‰(f`ѢEčďď÷;_’A’®a0ě€ÓŢŤqÖ¤ßË5<€ÁĎ˙ËĆ.Ď>Ž' )IÓ€ż{R€ _żĎÝ-ë˛÷ŔqľnÎBŮxóLŃĽqú>h7‡|ť‡7äŕwÚO±şŇ‡Ź;í,Ń<ňí’MČČ÷ž/H—Ăuó î…˝¦ d?=áS°HTîäüsIÓy,Ţü_ Ôw˙ßeďÁĐĹw!Üđ‹3Ç~ůJ^zgÂË7ćeřý_łeZ^މM«2YĽĺé¨5›Í0›ÍqŃ:µµµ¨­­ťŃu<rs¬xnýz<·aCDŢ6˛1N.—c@(EâŐĹ,ńAfJĄ_ZŘke1ć‘o ë˛w{ĎOgÁé dżdë'ü#·®Ái5`Kł<öËď?zoô±ŐÓ‘›Hôył€B“.áż_š^Í06˛Ůl6ďé®®.˛4kąb1¶dgc‡B–ٱˇb KDDDDD1Ĺwlk ×;ž aér łĽaëD†Ż|č=]´™a,ÍNá:ŔŇ ÔŹŽ7ľvŽ,€ššďiEÎůŚ4*•jŇE˝Ěf3şşşĽ˙@ii)ÔjuÔÔ÷¨FżK˛DDDDDs|»d‡ÍźB¨xÔo<ŹgA0®áżŔ¶h3ëIsđĽg,uZŤp ôA’ĆÂPL őăôfł{&°Bˇ€D"a§ R©B; ÓéP]]Ťşş:ŘívěÝ»—Ś d‰(ćř˛Î›_ŠG!HJ@śé cGn]CâŇĺ×uÚĆĆä縚ÂEĽhöäYîńýŽŃÇYż‰ dCňWż<1­ĹĽ|Ý9ů㨿˙ÍÍÍhiiÁ€ŤŁ_‘j&óMĹb1ĂÂy R©Ľ‹¦iµZlŢĽš8é>Ť d‰ćk C—?t,Îj„@ś…q’ä*$H–±@Dqn¤§#_µĂië†kxŔýŃŮĚ|$}c=É\i™h® Ďa¤§ÎŃůˇ â,$Ę6 i’™ˇ[ ,LÁĐŕ€˙Řń2ŚŚ˛®Ű=@°@öëËc§Á0–ćŽ$Ő…~Çčcjđ B!±X,0 €ĺ±¶OH$8|ř0gÇÎßnZĎŠ d‰ćâE_W†:€a‡÷<—˝#önŚ|ŐŽDů}ćocˇćă|Îč“pńT2*++YŠ8®ˇ ]z#=ţ/ö­F8­FŚtµC¸ć;|ă†Çš«}n ŽOOř-ääżĎµA¸ö R–°XsĚiďĆá4Ž]"Ő ”””„ý6}cŐZ/ťw?_óŚ-Xş#_µFľľŠ¤ÜŔŹ~ş·˝§oőą†˛4Gz|Ött:ěHڎým>=]´oČ™r»ÖË7Q˙Ń÷u¶­ĆÓE«ů [`çÎť i;˝^Źşş:Ô×ף¦¦`ńćAccŁ÷´X,fA"Y˘Yąu Cßť|ă9 §¤!I~ 6×/ţ¬FX¬€…Ą 5xńďŠŢÁ¸ěÝp|r)•łS–ÇšÁÂŘńŹ Ç§'ňŔł,Ös ąç®­@JrBDÜ&™O ë[ĘYp±÷ô×6î ©`ءKď˛`Dł4tĺĂ a¬ßţ÷!CW>dÁâá…誵€Č=ŁÓ5Đ‹‘›Hű˛˝p ôy•äß·rú]8Á^ěŤAčśä#”D ýŃü u,ďřŽŠ#Łc 0ěpŹ-HqäÖµIŻ*ýTą'”˝đËIˇ»đđo;î’Y“…`µ;đA»™…p6› 555Ţ˙+ eČd2¨T*.ęaŘ!K4 ‚$ŃÔŰřtč°óŤ(ÎŽâ¬)»đ|ŹA4s ’eA÷+_~ťłI"R–°pq"1óŚÜ`Gş;¬Ř6ö¸픝HÁ=Ŕ9=Đďp‡˛˙ö' h3Püú˘‰Yú€ăöĚŤuwr3apą‹ő™o˙đďgĽ§WdqnćB*//i;»Ý˝^ďýżJĄâŚÓ”••…´ř™^ŻGSSŽ9»ÝŽŠŠ Ž0 d‰fóâ/]>ń ˝`Ű3l!ŠŻc„$ #ˇ~,:IA2_ŮÍjźË¸Ço!˝éü §Ń“˝q,˝ŮáÚ'Ü]ł÷ §˝{ÂçiK/ý¨>zÜçť>ďÚ4…ŔCëX_ňwú< mvˇß1öş 7ř»'˙x‡ő™‰?žľkݶ)·»ÖmĂűm×qutŰŤyX±,Ť\@3™oŞP(pŕŔo)•J(•J( TUUáĹ_ÄáÇYHznÂÍâ…źdsU1ą˙茟é· Hş‰™ů,QIľg›{±.źŃžăÂř7p„kž`Áf»Ďĺ}ź@v˛7ą Wś=ggŤ°ž±’,8=¬Ő8éç©)@ĺNwČÖđ‰űqdé~˙Đܩܝ´ßZ>ţGç‚©GOW,l»×…’mhŢ8}iÚ#–,âŕO·±x LĄR…Ľ­Bˇ€Bˇ€FŁaáć‰Z­†Bˇ€^ŻGcc#Ôj5‹!ČÍÁ‹?§ŐčýXrĐ~I"6<ĹbĹAr „kžŔ`ۉ€ă‚ß8ŮzľaC4$Ëśż C ű™ßßîüm~#(>Ś[x× oGµłďĆ”×OMJ¶ Püţ]ľ¶ą_“űKšćîÝt7GÄ“;@Ă'î ÖÝ;vÜąKâÂłß@)g»6će`c^^(˝źÝ±apđŕA!ÂČd2  ˛„,Ń, ’S şw†®|čí”ő{qq„Š"~™(n€|î˙>/ľ8O6Iáš'ĆÍĺ“[ů}ł0xń` Ď˙”4÷>Ç™îńůŘ7¶ Q¶Ţ{™Ó~#äż)@Űěţhz˙č‡ <ł‹D.Ü <´^%'cĬ _:\hůܶޅ®‹Dîpľh3Řąňß/ĹwsKaa! nÔÔ ťš&»ÝÎ"Dâó–€höÉ)*ŠŕĘű&śönďÇŢ$Y\0„ Y†”ž…ÓvN{70ě€@śĹPhž$.]ŽE…?Űç€ŃżË슍ëcń¸±ń'cďkx‚¤Đß@wn@ËçŔ˙śëíwĐňąű|iš{”ÁCë9—zĆn÷X‚ _¸xßpwÄ® h3»¤inIĄRHĄRđaEÓUWWçťë+‹Y4 @źÉ„ôĹ‹!—űżKË@–h ’S¸t9C–$Ľwî_)Ćyü8EA YĆ@Çâ>Ăő΂đŢ]xŞ@ŠüśĄyÇŹ-3᲻WęrÚş§ý.5ĹĘm ů3ŕ´nlá/ŔÚť>ďţ’¦Š\ ňś7E<ť°“`4„ ”›éž#\¸NŔým}Đ~˝·h˝|+˛Ň°b™Vf ],â—b–V«E}}}HŰŽ_`ŤłzÖ ‡ŕÍ7‘źźŹŞŞ*żËČQTK\ş9éP*Ą,ńAF‚$÷Óň<äËR#ň6&/`,ýŞ‰Ů›0â do]›Ő›ę…ëÜ_–>wÇlËçď8Ŕ}~ËçîîYPäĘĺ€Bäf°Ł2ÜL7Đ_sĎ}tl{—Ä…‚{€Gď@šĆýmˇXíüĂżźöěXo;‚nóđ†ür×ýxxCÔsşşş‚Ö©ĹbTVVB"‘°€„,ĹAJš_W,\.ďeÎńsľgHšć^ü«dŰhwĺčW˙¸ěČłGn¦ Ją ą{Ľ”ŤýóÎŇç_ŤÝ€Ţč‚©gň×E"÷ ÷çĂ.´ÖË=ŘőŇ»¸Úm›t»÷Ű®ăý¶ë8řÓmx¦h G1E&“AĄRMąťŮlFWW`ßľ}\Ě+1Ťq%%%č¸~ '.X áŕ*"â1‚(ś***đŃ•>|ÜÉŵż%eoÄá4Ŕĺű şÓjśóŰŕ ďwč×ÜîÂ…/ŕť7ëËÔ#€©Ç=Ţq _&€<ËÝQKłc0ąĆÁFX=ť°…ëśFV»?ú—oűíőŮxşh5VdŤ˝sŃ{ŰíŮ+Đ~t˝·QţJ6će`ÓŞLb†FŁ yô€N§Cuu5ŞŞŞpđŕÁ‚\Z8 dcś\.Ç€PŠÄ«‹Y "â1‚(Ě”J%ľ´ ±×Ęb…iKĚČ÷˛ÎŻ;$!0< ;ŕč e~ZSĺYcťł–ľŃ®Ěkî`Đwî¬Çř°Hä‚<Ó ¦¦¸ÇHÓŘMëËŘ ôş;_ď Ś°=.ô;|׉»[s3ÝA¸rą`´S™ť°‘ŕŤ†Kh˝|&í|ŐlY…ÖËńř oŁ÷ö ţáß?Ä{ż~’¤¸¤R©°wď^ěŮłUUU8yň$ÇD˛DDDDD7Ć$/v˛Fn]C’lýĽßOęůŘűťŔŘă™]ęBÇőŕ!`żC0ęŔ#7Ó…T‘ĘĺŁßÉčů16źÖŇog«Ą×}Z ¸ăläŔġj~Ž;|UČĄ|ęí)<7\ĽPz˙”c6­Ęı_>Ç_xď·]ÇŐ}X±ŚďZP|R*•P©TĐéthlläÂ^„,Ĺ׋ ź±ŕ™$ë´},@ ;^jŠ; TĘMˇ; 4v»Wý5,}Á»h}yÂČ`a­‡'´ŕí˛uźÚÎ÷ĎZľ<+Ü´şď7ŕÂNlę57¦Ť°ąAŔđ5xşcźŢ¶:¤íŢŤyřôĘM\í¶1Ą¸&‹Ŕ;S–"äąK@DDDDDńÄwlëÎ-ďůN[wÄÜFy–ű«hóX`hénöz>Žď‚±Ű}^°™´ÁřvN܆Ę3BaĽŔ31łëß%qAšć®]Fşą™@Ćß± `ŁŮt‚Ő%‹…1sż›››ŃŇŇ‚GżBŐŃŃÁ"D ˛DDDDDWĆŚrőš"úv{F¸?Zď,ęŤ@żctfj· wÜç›n ĐďźŰ㡤ÂóVE" 7ĂÝÓśšȳܳ^‰‚×…bËtĆôÝŚ™űm±X`0Ë#ü¶Ţwß}súýĘĘĘPVVĆ˙ čőz=zfł “ÉX”Â@–˘šă|Îč“pńT2*++Y"â1‚(Lśön NăŘe!ňW­@IIIdżň[€„$Ŕ9 Ŕ=GÖĺ¸ pą\0v pú\Ŕ?Xőđ¬žÓą™Łç‹<ă<şFŰţ6[žńo4\ ĄLąýűm×˝cVdq#еµµ¨­­ťöőÄb1Ôj5 IĎCX"Šę'ŁV#,VŔÂRŹDaĺ€Ój„Ń ¤$'Düíő[ç0\.w88Řú\#C@€Ö/Ý FůŽVľˇćŘVŔLNĎbd?'3ԅĬĆËţ6[?ŢľĺŻ4ŕ×ucy–dŇ…˝Z/÷`×Kďľ˝>›ócĂD&“!;;;čefł]]]‹ĹP*•A·Ńét,âţ.8‰„oND˛DDDDDw\Cý@ň"¸ď@ @  ťĂc§Ge,apŚg12˘ů¦yp~—÷)>˝rĺŻ4ŕpšó°qU†w›ŢŰhĎ^ÁO_ŕž!űO?ü‹®ß™F3á¨O—§R©ÄÁn3ףb…JĄšÖ•J•JĹÂ…A ąËan.äňŔ?– dcܱcÇĐqů*ÖA$+Š ÎbQǢ0©®®†ĺö·‡!Ú\Ę‚…aĽřFşÚ˝˙ľ#eʼn .@*• ôz=RSS^6ßŇĹ"üé6”Ľô.®uŰđ~ŰuĽßv}Âí=aě¦U™S[‹ĹŁŃ‚‚>ĐbŔ ×ÜżŔ?—kôXŕY;wbŃęŐ—3ŤqFŁĆÎ/¸?ÖBDÄcQřxä ˘…ß߆»Ú0lÔÁi»1aë[řµrľ_Iq ąą‡B~~>ŞŞŞ.×jµ0™L¨¬¬ K(»iU&Îľ\‚źżvÚŹ.Ł÷vđE»ŠĚĂ Ą÷GTk4Q]]Ťţţţ ;B)şčśđ•ËŤ€ź˘ éc KDDDDD1O˛.{7\.—7pő==QP»HčçťR,ó„±SéďďGuuuŘBŮt±µĎ(ňvČ~zĺ&–,bEV6će ],ЍÚú†±{Ú€ˇ,ÍY"""""Šy‰K—#9†:üÎĐŁ\ÎÚ» |ő„˛ľ§‰b™o»sçN…ÂI· …Řąs'Äb1Ş««a4Ăr»?hżŽ_×ý?í ámüüµ3řuÝ_P˙Ńĺ©­o»sçNdeqöI,züńÇݡ¬@-˙vĐtž“°DDDDD/’ď٧­.{÷”ť±p—„ă (6ŤcSRRBş^JJ vî܉ăÇŹ/x§¬öěeüüµ3¸Úmó;ßwŻôĹ"ĽPz?~Ľ}SŘj,Śýâ‹/ř ‹QŹ?ţ8°S–¦…˛DDDDD7É)m| Hů…±łÉlaˇ4Ó0ÖĂĘ.d§ěO_DÉKKűwőZo;đł×Πü•Óa©-;că;eişřô‚⊠e „žÂŕ'GýÎ÷tĚú†łKłÓ‰bËlĂXŹ…ě”˝zŁ寸ç?/Ď’ŕ…ŇűˇypUŔ^Úł—ńŰ“­ř ÝŚ?žľ„o­ĎĆ3Ek¬¶óĆBˇPŕFM ŇůŽX씥é` KDDDDDq'Ř"_ľłd=ˇě"GPě%Śíďď‡Á`zţx Ęţşîcî0öěË%A¬‡fË*h¶¬BŮ˧ńFĂ%ĽT÷ń‚˛ˇ†±Áj @€ŢŃŻ…°dô+ÚÜŕçźŃërAW†˛ä1 @źÉ„ôĹ‹ŽŤ d‰(Ş ďÝ…űWŠń`^‹AD¦Ó;Qm#Ő/Łl߸ŕßâM´2”%Ďcń0Ľů&ňóóQUUĺ˙„%"˘h–¸t92rҡTJY "â1‚(ŚI)H\şň<äËRŁćv{ůrÚnť#›šů/¤/\Ľ‰ü˘ .Z.KOâůďoŔŢź¨˘ö±Ą~¦{˘ÂľżW…´íÖdhüنű›ŹPĂŘm۶ÁḷpÉ’ŕý’óĘZo»oÓ·×ç„´}şX„oŻĎĆífôŢś×ßE¨aěćÍ›lńłąđŮgźáłĎ>›—ď]VV†˛˛˛yůŢŁ˙>ňČ#ČĚśß ^$šřÍů eÍf3şşşćě>Ĺb(•J>‰ ˛DDDDD·<‹|őôp ;BŮÔo.}ű:ń_4ÂÚ7ź~o=ž|t% ÖHŃy݆ -Ř÷[öý«·zxů…Â˙}n}@†‚Ő|Î×tfĆÎfćéBΔŤÓéŚMII‰ŞzĚtˇ¶d2٬~ţąsçfuýĚḚ̌×{>BŮúúzÔÖÖÎŮmT©TŢß-,˛1®¤¤×oáÄ $\Ý‘xŚ §ŠŠ |tĄwÚY ˘Úß)K˛ąů˝ßůîy˛‘Ű!kísŕżh„Ë|ňöwý>¦_&BÁš <ůčJ¨ź©Ç+hÇóßßŇÇţŁY¬wĆN×\-ŕŞů e—gIp­ŰíG—Cš kµ;ĐvĹ2Ż÷uľđŠv*•ŠEđÁń4˛1N.—c@(EâŐĹ,ńAfJĄ_ZŘke1"lK,píüüť Ł "ŃďO`íÄŢź¨&ś™šž&ÂľżWáŮ˙Ý —,Ţ@¶Ódá· řţ“ Ŕˇ· čĽnĂĘ ľ˙¤"hpk퓸áH IDATsŕĐ :Ż»Cî‚5R|˙)EĐźkísŕO§ŻzÇ(L´m¨ŰŤ˙ůéiBüuŃŠ€ű˝˙·:¬ČăŮďň#¸ ĆzĚG(űđúď"]ó2&ť kµ;Pţ/§a˝íŔ’ĹB<Ľ!gÎď#ĂŘąŁ×ëqôčQ466âĎţsLŢÇą eU*ŐŚÇ=Řl6Ô××ĂfłyĎ‹Ĺ|† Y""""""I˛ ůú ś7.EĹí}ĺP;ŕŮ LŹ'] ëągýÎëĽnĂľŐáŠÉ†C' X""=M«×íxůPţü‡bż°ó÷˙Ż˙ë7-°ö bÓj)¬6^>Ô†—µá?~łŐoŰ oâ‘ďŐĂÚ79b¤KDŢm˙ü‡bď"cˇn­—,Č+Şón{őşűţU‡˙řÍVżđußżę°őYܲá c=ć:”}ˇô~h?şŚ«Ý6|ç…?Ał%{¶­[{˛/Ő}ěť9ű“í›ćüľ1Śť=O0X__˝^÷y®BY•J5Ł.dťN‡ýű÷ű…±ó9Ë—¦–Ŕą‰Öm…Qq[;ŻŰ°"G<«1‡NpâwŹÁzîYt6ěĆüf+¬}Ř÷Żş±źc˛áżh‰źĽý]\řÓßx·˝pŃ‚ü˘Éď{>őă˙.đç?Łła7.üéoĽŰ>˙RË´·Üłrż˙¤.}:vă“·ż Ř˙Űó|ĐŽî0ÖĂĘŠĹbTWWĎx&)¬X–†cż|K a˝íŔO_Âí×¶ë˝íđ†±?ÖlÄ ĄĚé}b;;MMM¨ŞŞÂ#Ź<‚ęęjż0vëÖ­1˙üq¬[·m´.ׂüL›Í†šš”——Ăl6 >Ě06ĚŘ!KDDDDDä#1»Ă×>ŠčŰŘirw9­Ě‘˝ěĐۆ€ó}”˙űO)đäŁ+˝˙ö»JüŕM°ÚĆV¦ůP`߸ŃĎ~W‰ĆżtáĐ ŢţźN<ůčJĽý?ťčĽnĂŢź¨ ~0;`Űt‰;ěu;ŹM«Ą~‹’¬ÉŔÖdhúK°>"%ŚőËNه7äŕěË%řuÝÇxŁ!xűň, žŢ¶O­žóQ cgĆl6ăčŃŁĐjµ~Ý™€»Űł¸¸jµ‰$.ę±3e=]±ž `Wl$a K4‡\Cpڻᴑ.G¢%¤,aaŕ´Ý€ÓŢ×đÄYH\şśE!šG#·®Á5ĐHg"A˛ŚEˇ#˙e’§+¶×'8őđŚ#/ŘGůş›VKýţá’{ľk°1Ď>ĄŔˇ\¸hÁ“Ź®ô΂U?¸Âúď˙oőŘ÷ q»±Ű8çPý@6Yz˝>˘ÂXŹńˇě?ţă?B*•Îč{­X–†Úç‹Pű|¬vGŔĺĎ­ iŃŻér8ř·ű7†±!˛ŮlhjjB]]]ŔH™L†ŇŇR¨ŐjdggÇe}|CYŃ‹Äc͡‘[×0Řvö±.HY‚ä5ßá›!óu¬łwcČpÇ. ‘żjJJJ˘úţDËÂ^ž`ÓWÁ)ţü‡bżóů^}Đë DÓÓ"odĂD‹–qŰß222°hŃ"ôőőˇŻŻ/bYŢŰ乍łőAűuĽßv׺m¸ÚmĂĆĽ ,Y,ĦU(~pŐśß~‘H©T “É„žž˛“Řż?´Z­ßy2™ jµĹĹĹ   »ŰýşŕnĚa(Ë®ŘčÁ@–h–\Cp|r40hńĽ(ěéŔČ­kHy¨śˇě|<µa±–‚"ÔHO‡; fŘÁ¶HüĆz×>ÁbńAs`ŘxC Á˙fôbđ“ŁHÎ߆$ů},Ö<<'rZŤ0Z”d.U±ţşhţtúŞw\€GzšČo ĹÇţ&•JQYY‰ęęj?~`Ű?ťîÄŠŃŕv˛íš>î¦ŐwńAÍiŢ„ux€­MśŃhdĽ9溯¸ük :ţ 'Ž7öŢ[ى Ç®}zäýč/‰ ò4Ű‚Ig/ö ·Ď¶č×ë//EéŹU“úąO%`ËÝĹ»Ë;‚Ľ•‹ĐkDUM zű†pt˙÷ŔÖ.᏿ĘÁ+?ݶ€ŠŁX­Ŕ{ofy´Ýdź+ŮŘ“˛3Őľ`*“±đ‹C_°%cOľ·1âp·Ű©źNúéĽw>»„_újĘů˛'eŮľŔ=‰DµZ µZ “É„ęęjčt:tttŔ`0Ŕ`0 ¬¬ ąąąČÍÍENN$I@ÇÄÉXöŠő¬%zV‡ĹBuđ˙ţđ{c&c˝űĂď®u™q®ůĆ”żFVĘz&!!Đét(//ÇęŐ÷¬­­Eii)Ö¬Y]»vˇ®®. c0ŐÉXVĹVČMŁ 9Ń Ѭú+î˛Ę;y‡(Âłż±NUłü»LcŃ Ţ{+Űă헥ŢöµŰŰĆzś­ë”غγ~„ąO%x´°'ŰŤő<Ý=ź±¶ť­¦»RÖÉXŔÖžYščŃö1âp<ł4_\0áć­!ŻĽVVĘNŚJĄ‚JĄBqq1jkkqčĐ!ŤFÍfčt:čt:ś>}: ^ł7*c:äÔ+°U%k4šI=žBˇ@qq1wĐ™8Td&/Hěüaf¬¶cmODM$–Áz÷´hwóĂčë‚ç%1hD“oˇ@D0Đ7ć=>‰f‰$e˙ţ÷żŁŻŻoĚÇZ°`ŰĹiď%c}ŮD’˛]]]hjjň›×ÖÖćťÖ>Ł[:tµµµčččŇßsá´··{5FáááxüńÇÝŢć­ĽÜŃëőśčü˛D (Fîtđ7î`H8űÇÍ2ˇň'04ŞOĄýtéŃóDPě# Ń~°ŤOÇČÝĹş[‚Śľ €“‰hVń4){ţüy\ż~}ĚÇILLĆ \®÷v26I&Ak—şSÍő„íµ âüŐži‰­§IŮ+W® ˇˇÁŻö›(/?~BBŠ‹‹…ŞŮ© \$Â×_=-1r—ťÎd,ůńçV†€hňDˇK{Cű_Nyî„&˙#OŤ$še‚ç§"(öÜéľ"Ěn GXÚ Ń MţGÜľqYčß<Ö˘^"± ˇÉ˙Č€ѬâiR655%%%.×ďÝ»ýýý.×OGeěłK…Eş2’c‘™2Ěm{-(üÇŃ{kŃ‘ax6=Ń뱝HĄlyyů„_§Óˇşşđ=ĎúÁţf0đÍ7ßŔd2!'' ă·&ÉÍÍť’ß  ŘaŤoůŔ 7×{;Ë…»ő"z@Áó’šöĽ­WäXŰ,P!DţE4 …Ą˝0nő«H,Cřcl§[Ń ĎX QŚ|ě1#GxĆZŠfĄ©^čkşÚĽ•˙$˘#Ăp­ËŚçßú…ż=ŽĎĎ»Vňîűř}őčNÚ۶&sÚbëÍ…ľ˛łłQTT„M2ý`?ÓjµŘ´i“¤^łfŤKŹSťN‡M›6 ‰ć@ŔĘXš&d‰¦@H|:"–oEpüRá P$–!8~)Ÿ܂0Ĺ ‰h–…F ‡ĺ[$‰c ¦jĚED#âń|„¦>gű›DDŮĆ\Úóx<źg¬Ѭ6UIŮéě»0. ˙ń‹8~ _\pMČŢĽ5(,öš:oĺ/źÖŘz+)+•JˇT*±¶ P_¦ŐjˇŐj]®Ż¬¬tşľŁŁĄĄĄŘµk—ߏ«™NĆšÍfčőz Nrľ˛OhpĄ˝ÝmOf¶, šÂ@žr<ýÂŰ€'‰ńTrA>-x~*{VrŽ i"‚g§Lł ‰ aŹmŔÚeR¤&Îc@|xĽMdˇ/wfbŻĚ”ů8ůŢzüâĐW8đŮ%·Ű$É$ŘüÜl^±dZZ¸3‘öčСC…B!śZŻŐja4ˇŐj‘źź‰D‚řřxÄÇÇŁŁŁ:ť999SÖ¶`şÍT2VŻ×ăСCĐëő0›ÍN·%$$@ĄRáŐW_˝o»ňŽNŕĂݶ‚a…,ůµŕyIMLR©d0sŃ …D x^äÉŹŚą ;ůÎx›lĄěL$cíĆEAűĆ ôüšŰę×—V¤AűĆŠKĆÚył}/3™LBbpďŢ˝ČÍÍEnn.ĘËË!‹Ř’ V«ˇÓé°zőj¶ţh¦’±Ť………¨­­uIĆÚß {[©Z0Ť¦˛DDDDDDDłĐ褬»ÄŽ#łŮ>0™LBU¬ăb^ö¤­?¸űo¦8¶z¸ĄRé·=z˛DDDDDD>čý*#r–Ç»TsîÚ§GΓńX”(ÁűUF´\7cŮ)¶¬U &*-íf|tĽ-×-‰ Ăë//ELT¸ËăŰű޶\7cQ˘9OĆ#÷©—ŰŕÚu‹đ{·©=eBÝWÂclÉs­ć­8b;M6oĺ"üöOĐŰ7„¬Xܧ°kź ÄŘşNéôx;·©śŰńőŤćn;{ß]űăÚ˝±%˙öË”nS±J6@=›žíë+°ţ—źŕ_˙Çghxo˝ßż¦úúz444`¶Óă3|řą&$$ ¨¨ŤkÖ¬qZ`J©T˘ŁŁŐŐŐP©Tččč€FŁ`;ß“ŠOš¸ű-ÖG3 Y""""""söb7jOuŕ7ofąÜVú;=~°b!ęľę€Őj»®âĄűô8şďűX»í/°Z¨0\»nAĹ#ţVµÎ)™YqÄ€W~n«ĘYŹŁÇZPú;=¶®SିĘ´\7Łôwz§Ë;·©űTzűńĘĎëPUÓrwQ.©í9üNŹ?ţ*Ç)ZqÔ–Ô}żĘÚS¶Ţ’V«ąO% ôwzä,Źż—ý˛»öéŃrÝŚŠ#F,LŁ·oGڍ8jÄ_˙´ÚéuĽňóZT1:=űď‹–„ą$dsď¶^¨:Ţ‚7¶¤sG Pę§S†sÍÝ8×|çŞd'٧§G8UÝ×»bďÚµKhK`6›QVV沍V«uą®  €;®ě §ét:§–c1›ÍBul||<čC¸¨ůµÁ3‡pâčÜţˇ'"âA4}îXş0xć˙˙űpřđaäŐ~iK\ŽŐ_őŁă×đúËéč=˝˝§·âő——˘·o˙ôr5vnSˇ÷ôV´|¶;·©lÉÍ»IJŔVQúĘĎëąDŠożÚ‚ÚÔč=˝;·©„¤*ä>•«Á–$ÉY«ˇĄ?¶%J§GUM‹í÷žŢŠÚÔřö«-Č\"Ĺ+?ŻCí)çžu_vŕŰ›Cř[Ő:üőO«ď› =z¬«Z‡–Ď6˘÷ôVlY«ŔŮ‹=NŻŁâGŚŘ˛V–Ďň…ç`µg/şďŐą,-Ń’0ˇ‚–ă-pe$Ű'»ykÁF:ťnÂ=aóóóˇV«<ŘăT]]íÔîa,Ť&“ b±ąąą  a…,ů÷‡ŃŢ6ôô= qŽ šQÖáÜémC[/ĘşŹeO:¶p”ąD*$G{í2—Hť’ťąËă± @oß˝¤Ô{ďź·ýŽýßwŞ6-ý± U5-říźÎ;=öh˝}Âďzď­láú¨pTí˙>’WBĹQŁËs/ý± ËŇb=zýď˝™ĺ´í[–âýŁF§×ńŰ?]@´$ ď˝™%ĽŽ¨pĽ÷fţéĺę1{Yšu_vpĽ¸/.„`Oü9ö8ŤŹŹłç©Bˇ`«‚ ČÍÍ…Bˇ€ŃhÄ®]»pć̬^˝Ú©ZÖl6ăĚ™3¨¬¬Ş•7nÜČ8ű&d‰üLLTóAúÝäç˛4©Űë}tüš­ťÉŚk&çŢ‚É $8w©µ§Lc&íŐ§Ź=*EÝ—&·ĎÍ]ÂÓŢ.Ŕ‹ťöäěŮ‹ÝNĎ#gyĽK_ٱž7Íż8ôĄp9:2Ś™F:ťŽ‰?/Ű»w/6n܋ŝNwßJŮśś¶„äV+Â,€\.wąť Ůwřđa\nľ†ÁŢ!„*V H,cPsŃ )++CĎ­a ŢAřăů ÇŰ+AÝÉ]î>é8:‘9ŢăçľTý@ϱâGŚ?w rM8.fĎN?Ď'ů›»<u_vŕěĹnŹ+vif}pü"Z»<[č‹ &|~ţ: I&ńűţ±ţ†ÉXďKHH€N§CII‰PëŽX,ĆĆŤ™Śť!q^‰ {ńEĚY˛Äĺv&d\[[ÚZšŘNk!"âA4sě rÇŰýŚ®€ťjËҤř[Ő??Đcěܦ·µÁL:w©™côß­˝[ŐËd¬˙8püҤZh__ÁŕQ@’H$(//‡ÉdBmm-, Ěf3Ěf3 P( R© ÷aLČÍ"™K¤8{±˝}.U«G ¸f˛ŕő——ŽYŃjŻÂ­űĘŔ5!»kź ÄŘşNéő×qî’ëëhi7ß·ÂWtdžYš·ňźduě xâ‰'¦ôń Xá9Ž„„lܸ‘đCLČůeK¤^;­>oĺ"ś»ÔűUţř«\áúł»ńĘĎë°0QŚťŰĆ®|]´@‚śĺń¨=ŐŞšä­\$ÜöŢűçQú;ý¸÷ź*olYŠW~^çô:zűńĘ›µăŢďÚuËŐłä›>ýĺZh’ ˇ×ëQ^^î´řÍ,&d‰|LŢĘEříź. öËŽ)OČ–ţX…ŞšT1˘öT¶®S ·oG –„ˇj˙÷ť¶_(FÝ—x,ď˙ŕő——bë:%Ţ{3 ą/UcíkAŢĘEX–&Eí—&Ôžę@ć)ŢزÔë1ÚşN‰Ú/;PqÄŞšŰs¸űűÇŇŇnFËu3^y)w2"/ęjV&Ý3›Í·ć±X,\[ů0¶3‹ Y""""""“űT˘%a¨=eÂ[ŇťnËYŹE‰b—űLäúłý3*ŽPqÔßTśÇ˘D ~°bJ·©°hsĎÁŞýßGéďôč5 ‹j-K‹ĹŮŞFé>=Z®›ń›ŠóX–&ĹÎm*Ľ±ĹąÝÁ˛q¤9Ëăťn_”(FÎňx·=tGo żÎEîňxÔ~ŮřÍ›‹đĆ–t”Z·żĎŢ?vë:w2"/`{ďłWĽNTYY™ÓĎ{÷îEnn.:C%"""""ňAolIÇ®}z´´›ť’¤µ¨Ýn?Ńë·®SzÔçuYZ,Şţçsą~Ń *~}˙ů÷ŢĘó¶ŃĎmĽç4zŰŠ#$/¸Ü§·oĐöü]łůíź.Ř»\ĐËŻśkľľţÉő~fi˘mż° â|K7 jnűË’_ެ¬śT2ÖŤFĂ…żf˛DDDDDD>čŤ-KńŢűçQqÔŇóÔŇŃjżěŔ+?ŻĂߪÖ9%X˙íW €ÜĺńÎŰź2áěĹüőO«‚ Y""""""Ž÷ŢĚÂżýŞÁĄ ŮÖU5-x,ďrź˛%_[Ú-hąnĆ–µ —JŰ]űőřÁŠ…Č}*Á#ż"•J!•Já'Ď×l6Łşşµµµş_yy9ßl©ŐjŹŰ Ř“¶JĄ’}c§Ń€.}í퉌„\.wşť Y"ňkaŹmŔ“‹Äx*9ŠÁ "ÎD3(H"CŘc°v™©‰ó)˛uťg/ö Ş¦ĹŁöłÉ˛´X´|–ŹŠŁ¶E˝ sÉCřÍ›YČ[ąČiŰ–v3¬VxÔbăÍ÷ĽűĂďáć­ˇzŚŚäXüÇ/ňŃ‘a@^b6›ńŻ˙úŻ0 †ÄÇÇOř> …­g¶X,f§Q'€đá‡HMMEII‰ÓíLČ‘_ ž—„ŘÄ(•R8GÍ QH‚ç%AžyÜ\d Ť×u¶‹‰ Ç[Ň]>mŃÉ˝t9Ţ|ßTô{Ť‡ăŮôD/ÓjµB2V,óů)VZZŠŇŇŇ Ý§¸¸óALČŃ«®®`«Ę,//ç‚QDc`B6Ŕ­_ż—Ż‹Łg{$‘1 DÄ9‚háÔŐ>|Őba08ŢŠÁ`€ŮlěÝ»—ÉX/ĐjµĐjµ“şoyyąĐCö‰'ž¨T*öîť!LČ8ą\Ž0)‚qđp IDATŻE2DÄ9‚h†)•J4 ő řf/AÄńFDP,–{_€%$pń<˘ń0!KDDDDDDDDÄ^} Ř÷b…¬wb\PP0©ű:.fŚÉ,FS Y""""""""z`999¨««Cmm-Ôj52ĹT*•Sâ{˛&›ÔĄ©Äů¦úúz”••á€F®ĹĹĹ‹ĹĐh40 |ó|€Á`€^Żúű’o`…,‘ŹęééŃh$ůřsíčč@aa!ĘĘʰiÓ&äććBˇPxt_Vmz‡FŁ^ŻwZÔ‹f˛Dä×Ď C.ţ%ĹĹĹ qŽ š!w,]6Çáć0¤¦,ÄúőëýöµćÄ—{úřŢÇŰL»Öه…qQ|ăý@aaˇÓϵµµ¨­­őčľLČŇl„,ů÷‡ŃŢ6ôô= qŽ šQÖáÜémC[/ęßťŃD÷?m]V"ľÁ4%Ú»9Ţ&Łŕ·ÇŃÚeƶ5™ŘôÜÄĂą3Ѭ¤ŐjˇŐj'u_ÇdąD"Z­FQQ:C%"""""r<ďŢIÁí7Dčé¤,ÎŁdhľĽ—Ü’Č” ¸ÖeĆOţý~ňď' ~:/­X‚ŐOĄ00>ćôéÓ ‚0›Í¨¬¬„ŐjĺYd^@nµ"lÁČĺr—Ű™ p‡ĆĺćkěB¨b‚ÄüŁODś#fJYYzn cđÖÂĎg@|xĽ?Ľ·żąĐŐ[˙;cJćł3Îű—($‚AńĐłé‰řâ‚IřYwň*t'ŻbˇLőÓ)xMťÁ–4+¨TŞ ·vĐétčččŔęŐ«‘ŕtݡC‡ő’8/‰D˝ř"ć,Yâr;˛®­­ m-Ml§µqŽ š9ö9Č÷Ç[HŇBB¶áď€Bd˙ăJ“Sń‰gŻśö/ňÜ[ůËńš:şSÍĐťĽŠęSWŘŞf÷}|ű>>‡gÓ±ůą%XýT2[PŔR©T^KŻ×ŁŁŁjµZ¸ŻBˇ@II Ŕ`0@©T2¸ÓŚ Yš·o\Ćí—a¸ ŔÖłŹf/‘x>D!…F x~*‚baçÎÄ9bšXGp§ű n˙W+Ç‚bl§É‰"˘mcnžścΓ¸‰e^đ8n·ŰĘß˙ŰőLĘŇDŐ˙hřÚ!»(›g%MBŚ8/­HĂK+Ňp­łşSW±ďăshí2>?źźżŽČp¨źNĆŹÔČL™ĎŔÍÁ€Ý»wĂ`0”J%ŠŠŠś‰gÎśÁęŐ«!‘H´ićř^X,d0!K“vÇŇ…‘«˙‰Ű7.3äś °Ü€őîeűţ!ĎGhĘ÷›ĘqŽ Îś#Ľŕö·­¸Ý®ç#×ůXHČ· źÁóSĽ@ĺÔ+•\…)VbŔÜëÍv¶¤ěő«łşżţ ş8îĐŞŔţ÷Ž̸(l[“‰›·ń‹C_!:2 Ń‘áhí2Ł÷Ö >8~ ż„…2 ›fz˝Ţiń(Ŕ–|-,,ÄŢ˝{‘›› ¨««¨***‚Z­f𼤨¨‹ …B¸Îń,Çëiú0!K?čĂpó á=‘GűŤĺ†Ź"(FŽää ç"ÎS䎥 ĂĆ㬂Ą ±źą#GŘŁ˙Dě˝8–đĚuÖ*• ĺĺĺ ě `B–.ś#s牌ąóGťzĹFGGăé§źf"–<2wî\¨T*¤ĄĄáäÉ“BĹěH›ÖáAVńŤwđżÁóÁ°ń3áďŢw"čęm­ ˛µb…J9×kš5Î^ŽëmUÓŽ‰XŔVŞxŽĂfXtd8Dúú‡póÖ_ľ†ěěl( tj4ńçë®ő€R©D]]ťĐŞŔÎ^±i6›a2™Ř¶ŕ>T* &ußřřxá˛ý1ŻŁiţLÁĐýŚt\pJ´HĄRdee!44”Áˇ feeáÜąshnn`ë9lüŚś#8Gxhčë?;%c“’’śVĘ%šČ{ć™gĐŘŘ(TËŢţć†#˘¸čĐ8D!{ôÜŽ_ŠáćBoYhřZ„†Żm•˛+TŔ˛GX5zú€†żőś{Ä űHôŰB•ě‡>ŁŞO5ăă— ;yŐéú$™ę§ý«Ź¬T*…T*…ݧöíźG,‹Ëmö…Ł“˛“H$0›Íččč`BÖOĹçľÉ&uÉsşôµ·#&2rąÜév&di\·żmuęMÇ>š ™™™‰‰Á™3gřŕ/ě± xr‘O%óh‡sqŽü9bô )))ČĚä)—4yˇˇˇÂĽmOĘŽ´ÔC4'f§XId{lÖ.“"5q^ŔÇ.x^‚UqűŰVŚ´ťĆťî+Âm=}Ŕ˙ţ«íźb°,•ÉY×Óg«†­ż`Eű ‘Űm¦3;ŰĆ›§Î5ßŔ˙Ô5Bwň*zo9ŻiđĚŇl^±/­Hc ĽHˇPŔ`0¸T»Ú+1;::\î3şj–(t8~ÔÔT”””8Ý΄,ŤÉ:2€á‹ź?GGG###ˇ)±páBôöö Up#-őš—4á°Á󒛥RĘ rŽ Î=GXú\ľa2–¦ŠJĄÂĐĐĐ2dřňqĎdB§Z‹B"ě×5Bw˛Ům5¬ú©Ľ¶&™)\Xv¦čőz(•Jčőzčt:čt:H$( ˇ]FŁFŁqŞŽeŇĐsfł›6mrŠź;&“ %%%P«ŐNŐĘä prąaR_‹śĐýnówŕî©VsćĚÁÂ… LňŠąsç"%%E8-y¸ů‚UÎD9G(•J4 ő řfď„î7ŇzZ¸üđĂcţ|h’wĚź?R©===¶1×úĄ­ęsŤ·©$ ‰@HüRˇďíî˸Ýu·żm]W]A+Ť ¬Ç‰„-“´×Ów/ŰÖi…±]4j1®1’±áQ¶–˛TVÂÎ0ÝÉfě×5âóó×]nK’IđVţ“P?•‚q85Ă ]®3›ÍĐëďsŚN$* 3x*))b¨P(źź…BŤF˝^Ź˘˘"X,čt:ttt@§Ó!>>ž yů&dÉý_ç˝ţ#ii<ÍĽëŃG˝·˘úÍvXGXuŔ9‚s„ŰÝ—ťâAäM©©©BBöNwŕ§ Y_dŻśl}ˇďX:qűż®áNo¬–.Ű÷ô _‹Đđµóő ć[í¤ŤfËŔVmÜsó^ňµű¦»Ę×±+aEâůŠ‘#řˇ…Çůmux Ú÷ń9|qÁ9‰·ůą%ŘĽb žMOd€|ڧíâăăˇR©››+ô”Ąńét:!ą]PPŕ6ÉŞT*ˇR©źźŹŇŇRÔŐŐáСCČĎĎgś}˛äÂ:2ëÍ{çN9®ŚHä ˇˇˇŽŽ¶ő¬í´dV!pŽ âasÇŇĺT‘Íť‚Ľ*>>ˇˇˇ†uŕ&îXş$ć TSM…ŕ¨{ Ú‘Ü1wáη­¸ým«ÓßÚŃě•´gGuAn…|ľŇ(+bcl‰G…üŢíţś´ulí`lłýßÝkEOźm7¬řnĐ]˘uü6˘čž—„ yI’ČXŕ’dl[“‰ÍĎ-a5¬ŹÚąs'Ôj5á%öd¬JĄşoĹ«D"Aii)ÔjµPĄĚÖľ Yr=đ3w —ŁŁŁŮŁŽ¦íŕĎžląÝu™ YÎDś#îşÝe.łUMçkmmµÍű˝mLČNQH„íôřyI°˙e˝ým+¬·nŕN˙·¶żż·nŔz÷ wľÝM\ŢżŞcrV9Ş5·cw,sÂl}nÇŇÖ|7t˙DZ'Wí ­·µß7jŁţ+¶á@ä|Idš;A1rîÓ~ćŮôDl[“ őÓ)łňő××ףˇˇ2îţóULĆz—˝ďęŐ«=Ú^"‘‹©ŤF&d}˛äâη­NƉ¦Cllě˝}đ»^„s绬#Âĺî 4-ćÎť{oúŽ™!Áó’7 Ţţ¶Ö>XżëĹK¬7ݶ<ŹSĹi{`ÄK˝˘Đ‰eŠŠ‚ĂąXl€x+ů¬~ý===0m_ĐâýÄONź>ÍťÝC9KQ©T˘®®ŽAó1LČ‘_Üů®Ö[7„Dúm‡/RÇkŕëDŃ \^ż(lD‘ó4'fÖő{ťmăíăŃÚež˛ÇK’IđŇ ®ŕM&“I¨ćô”ăÂ_žöˇť­Ěfó„Ţ ň=LČ’Űrvaaa M ÇŞ/ë-Ď+<îô¶ˇ§ča9GçÎDSĆ©BvxŔłżĂ¸Óۆ¶^ "4Aśöľ´ŽUµc5rLÔ:žýŘ’}÷}ßoŽ[•+Ď‚ÇďńiŻfuäđÜYÝĘńfwŕř%—E˝Ä3KőÁ€źüä'“J —% >ţřc.B5Šý ĹC‡yÔ~Ŕd2 Ő±999  aB–\^Ne%M§>¤ăôF#ÎÄ9‚sŃ f⓼`÷îÝSR‘i6›QVV†ŇŇRŐAnn.Ş««ˇ×ë±k×.Ť™´ÖëőĐh40›ÍP(¬8žfäV+Â,€\îÚśť Ůwřđa\nľ†ÁŢ!„*V°y=qŽ šAeeečą5ŚÁ[#<ź!âx#"ĽűĂďáć­±W‰;pü|v đżČs»źţű 4^íf@˝Č`0T* <şŹ˝2¶ĽĽśĽŹÜÜ\¨T*čőzčt:ÔÖÖâ÷ż˙˝S˛U«ŐÂb±ď…X,ĆÎť;Ľiŕ%‘˛_Äś%K\ngB6Ŕµµµˇ­Ą €ç§›ç"ňű‚DÄńFDžËL™?îí_\¸.\~6=qĚí˘#Ůnkş@ĄRMč>Ý~¶Ú»w/ a4a6›]*_G÷â-++cu¬bB–¦„X,†X,öx{&b'F"‘ ˛˛:ťZ­vĚćää@­VłŻŹbB–ŘéÓ§'|¶*µZ µZÍ8ú)&d‰hJ™Ífa*łŮ,\/‘H„ ΄„Šf%&d‰hĘhµÚ1O§€ÚÚZhµZäçç{Ľř&“ “şŻBˇ`ű„,ů­čČđ mź‘Ë yŃ®]» Ó鄟Ĺb±Ó˘R‹fłZ­Řąs'çęęęqÝă)//gż^„,ů-ÇëąćČL™ďv»ľţ!@Ś8ÜŻ^_vv6 :5ÄřřsŐëőB2VˇP ¸¸ŘmPŻ×٬¬ FŁ:ť999ČÍÍĺÎLł˛DDDDDDDä·Ę]}ęŞŰ„ěµÎ>śkî0ńŠÚ™&•J!•JáĎUŁŃT*Ő¸‹L©T*TVV˘°°z˝řĂőŔęŐ«Ç­r5›Í0Ť0™L¨­­…ĹbÁĆŤńꫯ˛]Á4Đ Ż˝1‘‘ËĺN·3!KD~-ě± xr‘O%G1DÄ9‚hId{lÖ.“"5qBÄń6mĆE!I&Ak—ű?nDFr,ÔO§·_ëěÆ_}"üüĚR.$ĺ-$! """"""˘@Ăděô[˝z5[°¤¤&“Éív&“ %%%Bg~~>ç‰jµeee †Źa…,=0µZŤÚÚZÔŐŐˇ¶¶µµµP*•‹ĹP©T0Ť0›ÍÂ`€-‰ËęMšm%"""""""źu®ůúú‡¦ěń˘ć†ůUől}}=0 ăî?_VZZО˛2TWW 8%aíVŻ^ŤŇŇRîä4ë0!KDDDDDDD>ë§˙~_\0MŮă=ł4źţr­ßĽţžžŤF@’<_‰D‚ŇŇRäç磺şFŁ‹m‘E…B•J…Ő«W Í6LČ‘_ł4+1!KDDDDDDDD¬ĽĽÜŁíôz=t:::: T*ńꫯ2x4«0!KDDDDDDDDLĄRyĽ]~~>6nÜĘĘJ Ź“ąD€ËźŇ¬°}űvlßľŤŤŤ~ń|;;;ť~>vě¶oߎŚyꦦ&l߾ǎăNÄ9‚sMË~X^^Žm۶áůçźÇżüËż`űöí¨©©™’1éxPfßÇ›ššĽţş,‹Ë› ö1¨Ńh&ôÚ‰•D"Z­`«5 ÍLČRŔ;věŃÔÔä‰Ý»w٬¬Ěĺ ·±±3atëÖ-466zĺ ’sç˘ŃăfëÖ­¨ŞŞ‚ŐjĹĘ•+‘śśŚ¦¦&”••a۶m°X,“~|ű˝ŹßşuË«Ż«ľľŻĽňŠWƉ} ;v GŹőřµQŕaB–^CCd2˛˛˛PSSó@Óő|ÇŁŃh|ţ5qŽŕA«ľľŤ)))ŘłgöďߏââbĽű¨¨ŔĘ•+ŃÔÔtßJP_ÔÜÜ<-㧲˛’_ŽBU,÷˘Ů† Y hťťťhhh@FF˛˛˛Ŕă ¸‰Môŕm˛{éééčěěÄÁůćqŽŕA3â7żů `ÇŽČČČpşM,ُ¸2™ ă&§:ńéÍ19•ż'%%‹Ĺ/ÖDD43˛łłQTT„M2쵕••aďŢ˝(..ćMł ő˘€fO¬dee!;;‘‘‘ř裏°víZ·Ű755A«Ő §üĆĹš  ďĽóŇÓÓńî»ď:diµZ§äMFF °xńbáşĆĆFlßľożý6Nž<é´}^^ ť¶€óçĎăůçźÇ¦M›°yófaűÍ›7ŁĽĽUUUČĘĘr9&"Îś#Č›ęëëa±X°rĺJÄĹĹŤąťý *22ŇĺţZ­VHÔŠĹbäĺĺá?řÄbń¤žŹ»ÇŰ´i“˶ŤŤŤĐjµB;€ŃŰţô§?Ĺůóç@kź|ň‰0ž?úč#TUU IVűřĎÎÎv;ž+++ŃÔÔ±XŚ}űö Űdee!.. 8zôčó ‘ťT*…T*ED€ľľÜÜ\ľÉptčkoGLd$ärąÓíLČR@«©©L&–ě§$766ş$*šššđłźý V«‹‹CMM Ţyç—ǵX,řŮĎ~†¦¦&¬\ą«V­BSS<źýěgřőŻí”plE2™ Âs«ŞŞBdd$6oŢŚ¸¸8lÚ´ „L&ĂŞU«Ü&SŠŠŠ°mŰ6h4ěŰ·oR°$ě± xr‘O%Gq‡'Îś#8GxYss3Ü7ŮďîöŕŕÁHIIAQQ"##QSS ±±{öě™Đs9vě4 d2™Ëăuvv˘¨¨HضľľďĽó"##…/EŞŞŞpŕŔX,bŐŞUl_xŚN8ŰÇsVVV®\‰ÎÎNTUUáťwŢAQQ‘p_»ĘĘJÍf¤§§°%o«…‹ŠŠ°uëVTVV";;{Üä¶? ’Čöج]&Ejâ<"Ž7"˘Y«ŔAřđC¤¦¦˘¤¤Äyg(PŮŻq¬\ÉËËTUUąlđŕAX,ěŮłk×®Evv6věŘ•+Wşl[UU…¦¦& ¸¸X»v-***`µZˇŐj]î3wî\ěßżk×®ĹÚµk…OÇJ;{Ą›ý˛»ÚĹ‹cÓ¦M<-ů®ŕyIMLaż!âÁ9‚sÄ4°'d'š@´ďŹ2™ {öěÁŞU«„1”••%,vĺ){şL&ĂţýűťoÓ¦MÂb}vZ­‘‘‘¨¨¨ŔÚµk‘‘‘;v ==]¨zuü’cŐŞUÂx;zô(ššš°iÓ&ěرŮŮŮX»v-öďß™L­VëŇšŕľ˝Úr IDAT›oľÁţýűńî»ď:UÎۉĹb\ëQH‚ç%AžüKqĽŃ=LČRŔ˛ŘŮ,öDEJJŠŰľv HOOw©Zs<ŘîäÉ“ŕršˇX,FFF†Ű•ĚGWĎĹbČd˛I˝¶Í›7#%%UUUc®¨NDś#8GĐT›lďŐúúzaß]µmoËqżë566 IÔŃŹ7ştSS:;;Ýn[\\|ßJrűórś+ěc4//‹ĹĺągddÜ·:=;;[HF=z”;Ń,–nýúő¸|ý[=Ű ‰lÖĽn‹Ĺ‚ššĹbÔÔÔ¸@ŮÔě‰{Âbt˘p_dď gď1çČždéěěĽoŃś˘8ú´d"Îś#8Gřľ˘˘"śşÚ‡ŻZ,~ůü/^,ôYť[·nŤąOŰŻ›H˛×^©{ňäÉ1źŹ}¬Ů÷č~¶öß}żqÖŐŐ…ČČH· Vűś0ú –””Ź÷‡@l]ŔńFDDD4>&dś\.Ç@Á×"gŐë¶WŞX,8pŔí6555B˛ĹÝAÚýĚť;Wč çȱ_ś·Šíý$<(TqŽŕÁ9Âw)•J4 ő řfŻ_>{˘Ń]źeGŤŤŤn{±Nµääd·Źźžž>eż×b±x­ł˝uÁ;-F3á>şŘ㍲>úč#Ŕ‡~čö j÷îÝhhh@}}=˛łł… űĘËŁÄF‹ŚŚDżŰS•§ÓćÍ›ŃĐĐ ,üCDś#8G7Ů“°Ž_X¸sěŘ1aŃ<Çv“myŕnڶńčÖ ŃŮى††deeŤ™ŔMIIąoîŚ/{낆†¶. """š%ŘC–NSS“°ňX-öExěIŔVMsţüy—ÓÝ-î“‘‘‹Ĺâ¶7ăÖ­[ńüóĎOŮAçýŘW‘ćâ=Dś#8G·ĹĹĹaĺĘ•čěěDyyąŰmęëë]’±ö mÇ1egŻRźH·=1lď×ěčرcxţůç…ç—‘‘ČČH—ö$öń[^^>îx´!ănŃ1űc:.8Ůq‰ĘĘJîdDDDDł˛pěGăŘegg#22Ňia{ĄĎ¶mŰPSSĆĆFh4·IŚM›6Ţyç§Sź5 :;;‘——7éÓSRRpőęU·‹ Ťu h>DÄ9‚sy[aaˇ°hÜöíŰŃĐĐ€ĆĆF466˘ĽĽďĽó"##±cǧý0==]ئłł‹UUUř裏鲰Ýýökű‚XöqŘZ%hµZDFF:-•——‡ÎÎN§mkjjPUUĺv±ľóçĎ _¨äĺĺ!22Z­V?ěŹŐŘŘ8nu­§ě­ ¦ë‹""ň/őőő(++Ă\®•(0°eśššŹěV­Z…ŞŞ*TUUˇ°°xűí·Q^^ޞ˛2¶S÷ěŮíŰ·;%O/^Ś·ß~Ť»wďvzÜĽĽîíU»ź|ň âââ°gĎěŢ˝eeeÂř·Źç©úÂñu‘ŁžžŤF@ĂAĄ€b±XđöŰo{ÔËmÓ¦MČĘĘrÚ6;;ŮŮŮBőL\\śpyôŠÉömíUA)))XĽx±K•LJJ öěŮă¶zĆ]RfóćÍČĘĘ­[·„ç¶jŐ*dddŚ»jóŽ;Ź·íOúSáŕÎń`É~ŕ6ÖTFFƸżsĽç4ÖA«»Äű%Q<Ů& ?Śö¶ˇ§čáîOś#8GpŽvöýĘÓqeßÇÚ·ÇŤ§űo\\śÇí<ŮvĽ1ů ăŮ“ůg"s”/˛ŕNoÚzPvF#âx#"˘±0!Käp€XUU…"==‘‘‘¸ző*´Z-d2Ů„!"ÎDDDDDD4;E[­[°rąÜĺv&dÜáÇqąů{‡ŞX ±ŚACaa!,‹Đ7Î.==ĹĹĹ“^€‡sç"»˛˛2ôÜĆŕ­„?žĎ€qĽQŠđ’HŮ‹/bÎ’%.·3!ŕÚÚÚĐÖŇŔvZ ŤŻ¸¸………Bď¸ŮzŠ/qŽ Îäö9ăŤf/&d‰Fń÷ţmDÄ9‚|»M&d‰¦ ˛DDDDDDDDDDÓ„ Y"""""""""˘iÂE˝|Tvv6 :5Ä0D Y""""""""%•J!•JÁPůŤ]úÚŰ ą\ît;˛Dä×ÂŰ€'‰ńTrADś#fPD†°Ç6`í2)Rç1 DoDDłV'€đá‡HMMEII‰ÓíLČ‘_ ž—„ŘÄ(•R8GÍ QH‚ç%AžyÜ\„ăŤĆŔE˝¦ +dÜúőëqůú·8z¶ABD1Gô÷÷ŁŁŁ&“ ÝÝÝ|#}H||<ž~úib EEE8uµ_µX "Ž7"""šĄ prąaR_‹d0Čďçţţ~\Ľx­­­|ó|TGG0ĄR‰¦ˇßěe08Ţh–bB–üµk×pćĚ‚ü˛DDäóŃÔÔät]jĘĂP.N€â‘Ȥ;'śšA…%ZČLČ‘O»xń˘S2vND¶nČŲĄ‹"""" xőőőhhhŔ€Ś»˙Čż1!KDD>ëÚµk¸té’đsjĘĂx%˙9Hç‰""""šzzz`4I Q@`B–üÚŕ™C8aÁĹż„˘¸¸ ĂĂĂhll~^˙J~´†!ÎD>ꎥ ĂĆă8܆Ԕ…Xż~=BÄńFDDn0!KDţýa´· =˝@Cp®\ą‚‘‘áç­ů˙Ä ç"fŔťŢ6´őˇA ÇŤ Y""ňIW®\.żřlȤ ůĽr«a @.—»Ü΄l€;|ř0.7_Ă`ďB+$–1(DäósÄŤ7„ę؇bÄXńĚRľYĘĘĘĐsk·Fţx>BÄńFDDD(ŔK"d/ľ9K–¸Ü΄l€kkkC[‹murëđBD~1Gtww —Ąq/ ö9ăŤf/6›!םBrŻB®ŻŻŹˇi1<<,\…„3 ł|Žčíí.+'0čÄ9‚縚! ŃD!Â塡!„¦…c‘óY>G8&ß䉱 :qŽ šfýýý÷ćÄ0 DDDDS Y""ňisć„14»?¬ńĚšŽ YQDBDDD4•źńrŮ)ć% —M&BÓ±g(+q8GqŽ¸Ç±*Ý©RČ‹ś˛as"""˘©<®fČe§U‰ăxę0‘·8&ö‚e© ç"ÎÂëU—ÓDÓ5ć‚bä ŃTW34š($˘čn?yĂđđ°Ói¸<đăAÄ9ÂáĂšX&,dÖßߏ›7or§ ŻęččŔČČí‡đ(‰e Ń ĘÎÎFQQ6Čd8ă3>C@î„Ä)…Ë—.]b@Č«ľţúká˛(zÓéąÄ9‚s{Ż*řâĹ‹Ü)Č«®\ą"\ž˙BD4äR)”J%f8ü€VWÚŰŃÖÖćzLÍ‘;Á˙FšOŔ:2ţţ~\»v .d`hĘő÷÷ŁąąYř94ĺ{şŘcđä"1žJć‚#ś#sDŕÎ!IOŕö7ŘŞoܸůóçsˇ)wăĆ §ÖˇIË=ľoD†°Ç6`í2)Rç1D^ÄńFDäŰ:€?Djj*JJJśçq†Ü…D xJřą±±‘}"É+ďíwŃ ě°`”'‚ç%!61JĄ’ÁäAś#vŽËüđRŽ9ňŞááa§1üđR"<˙2CŕyI'?ąśí‡ĽţYĚĎÇ[j꽳?®ôÜńąçwý^·$ÎiD4őźďKHŇ@¸íCřČČ>˙üsüŃ”:wî:::„ź'ZůFś#sÄlâřúűúúśgDSAŻ× ýšE!áU<Ç ‘×8&9“źľŕżľî¶Ň~衇0wî\ľaD4Ą pëׯÇúW^CŘcśVF÷„($aŹľŔ?ňŠk×®9ť†˛({•o4ósDxć:ÎÄ9bŠŠŠđŹk°Ç6Lřľ˘(„¦Ýű»ÜÚÚĘ1GSFŻ×;}’şÂďű5?Čx#"Ť.b¸Ťď†­>óÜ*ĎŽ—YKDŢŔ„l€“Ëĺ'?‚ŕyI“úPO‰ „9Â!AÔ××Ç9‚8GxQb%‚b~¶ŇßßĎŕЄܼyźţąS2–_’Ńt™;w.~ôŁ ?_é±b×ń|zů®ôX§59{ĄÇŠş«wđnÝ0ę®Ţëg»zőjVČ‘wŽŁňčŕďŃ0‡aăqá ůĉŤŤ…JĄbO×đđ0šššpńâE§ë^ŠPĹs~J$ŮD#sçqŽ ÎÓ$ăĎgÎś9Řşu+–-[ć31ŞŻŻGCĂ˙eďţŁ˘ĽďĽ˙ż0*?TftMÂĘŻl1‘Üişë´)»­MµŃî‰f›bŹ5ßfµâŹď÷îŢ{zÔó='Ůű|!-§˝SĎ©¸Ů$­9ŤZmŚą7óc¸Ó­ &]tHt`FŁóýg2Ă 0 030ĎÇ9ášëóą®ë}]3 /?óąŞŐ/éž[_&ůßĎ”ŁqŰülMűĘF]˙ä=Ϩi`¤Ĺůóç}>f gHÓŚ%_c>7^#†ĺrąGA'©Ă‡ó®7r·/ŐmsÉY\®®/ţíîîşM›źĄ™9ßP\B2Ĺ1fłY?ýéOU[[+›Í¦††µ¶¶ŞŻŻ/,ŰĎÎΖŮlÖüůó•źź/“ÉUő±Űí:{ö¬$‰wHŔyO 0ę?–’5óKɵäkşvößärŘ<Łá€!Ż›Ů)!Ëüěqí×ůŻę˝†éúĎ·fhçÎťš×đsŻq ÉJ¸ďűşŃů}nűłn^n䂦ÍĎŇtó—Çő??n^i×őłoë·źĚTö’t­_żžBdŞ>ßňóó•źźĎT¦<YŚéŔř{ľ'Işqůśnt´čfOűŔW/ŔÄňµ1;Eş-^ÓŤš6w‘nKÉš°9 o:l˛;$;eç5ĽFÄřkÄmsé¶ą‹äúĽ_76Ýh?§›}Iň=‹|ÎŇ$IÓŤş-5[ÓŚć yÎą®\{6‡”0{‰çLn˛ź?çgŹű¨&±űŃú¤I,ńÁ˙‡"DPÜô~/” ÉěrifZšĚfłßă˛SÜoű[ťű¤ENÇ5ÍČYˇiłS) €}ŤKţrŔĺéw(cŃíZłŞPţ~•ŚĆ9žÇ zJ§Ţ;-W÷ź'tߪŢ=­˘ď<ĄÝ˙¸Y{ţqsX·Ťđ)--•ýęu9Ż~®ř˙ö÷ŕů¦ ’ž‹SęşuJĽë.żÇ d§8›Í&[óŔMt\×ű)^#$|í>że56¨ęÝÓŞxů¸jŢ{™‹Â}C<ß@ě"ÄśŞ7^ô[ćpô¨đ;O©öĂUĽ|LĹŹ­rÝHî'`rcöo$ŤssĎV˝{ZqÉ_VŮ/^Ń⻿«Â‡žRáCOiń=ßŐŢgĺ×ŢáčŃĆíŐÜEEžuç.* ¸n°Ű>rĽJEßyĘłýĹwWEßy*`Řşý'űü¶˝ý'ű8Ů!LYCn\¨óąkůô Ëm>o¶Žî‚˘OúŚâ>1y^#\ýÝ©AŐ­Ń«ů÷䎸îöźěÓ†ďŻŇá—˙?IŇžgĄ=ĎţJĂl•<ý}ĎzE«ţ/Ő~Ř m?ú{­YU(I*űĹ+ÚóěŻä’<ŁrGcăÓ{µliŽżňśŚ†9*űĹ+:ú‡S*ůI©*ţ×ĎzkżżKGŽWéáďxö©âĺc*űĹ+rtőčŔ/wOH_ĽFL­:Ń'}÷{dR<ßâ“5ýöĄĂ®ďęďÖŤ‹Ł8ú¤Ďp÷ĹĎ7@ď­(A …-ëtc”o¬Żň޸żY§OúŚTź<Ż.—Kqqqv¬§ pt]Ń‘ăUŞxůŘ@ úŁ‘ďĆ˝ěîźđóČ«ĄĘXşZ{˙yżOřYűav˙ăfźŕµđűTřĐSÚűěŻTňŁż—Ń8gTűoHž­#Ż<çiWřŔ}2š uôŤSžuŞŢ=í cŹĽZęłm÷ľíţÉ•‘~Ǹ×wđuÁkÄÔŞ}ŇçpżG&Ăó-Î6bŕułĎ1ŞŃ'}†»Ďh~ľ‚xoE ¦¶ůóçëÜąs+[ź9bű§ţ8şíŃ'}FŞĎyóćń„ŹŃ×P>ôTŔĺé‹n÷ :‡Sň´h[üŘjí}öWŞz÷´ ¸Ď3íšďř­»fUN˝wZUďťöŚś ÖšďúícţÝą>Ał{ŰŢŁu˝÷óŕ+ÇuäUoĽF|!--M­­­ĽćÓç”ď3)))âĎ·yó橣ŁĂoyVj˘vŤp< źk_MđۢOúŚdźfł™7µSśĹbQNNŽÚöí“‘rSě·nÝ:ĺääČn·‡Ô~ŐŞUăľOôIźăÝgRR’ňóóyÂOˇ×«ŐđŹčń˛Űk´ŞŃ0[ůwçüÄTn‹î°ěvI’Ł«çÖżW$I÷~í±!ű©ýčě¨Ů`ăÚŹÎJ’ŠľóÔë¸÷/žď±ň±sçNŐÖÖň{™>§tź&“)*žs»víRmm­z{{}–›L¦ŰÎź?T5˘OúŚTźŃň|ĂÄ2™L2™LJ Ŕ¤Ń/©]Rwk«Śłfůýçě—””$‹ĹrűŐ«WŹű>Ń'}FsźĽFDÇymhhĐ@6”y[ábŰ=Ěö żvß„žămĂL‰0QŰĺş•×~/Ó'}†7ŔX±bEČmÇűxč“>ŁąOŔřj“ô˛$˝öš˛łłµk×.źÇ dA đµąĺ‚¤/FĘ ł%IĹß_ĺ7W«Ă1Đ~´óÇ+˙îťş5‚{ŢXďm;şz&dţXŔđ¦QFď…_ľęółĂŃŁŻWú˘Ű=S¸§"(űĹ«~í7>˝Ws©ęÝÓ˛îvď?˙Ęﱲ_ľŞĹwW/ăD@1B€T˝{Z´WŰ~ô¨şşŻ¨ä'űäpôčŔ/v{ÖYóťB•ÝýŞ^ř嫊‹‹ÓšU7÷Şxů¸ŽŻRÁ×îó˝:^Ö¬*TÁ×îSŐ»§µöű»TňôßË<[G˙pJ{źý•–ÝťŁâÇř¸#„,!xţźwhĎłżňŚ25fëŔ/wűÜ ËhśŁŞ?Ľ¨5ßߥ˛_Ľ˘˛_ĽâylĂ÷W©ěźwNč>yĺ9•ü¤T_€Ý ľvźŽĽň'"€@3\Ýu›Ş7^ ¸<˙î\9lUž)ňďÎ 8¬Ń8GUoĽ(‡ŁGµťrÝÂîóŰżÁŰ´Îpűi4ÎQĹ˙ÚŁ˛ŢéŮvƢۙ;aŐŰŰ«?ţńŹĘÎÎVnn.@Ě#` ‚ťrŔhś3aÓDó¶Űz{{UZZŞÖÖVI҆ d±X( b,0J6›Możý¶ěv{Č}$%%éÁd¤`ĘĆJŇÁ%‰P€Q°Z­Ş®®Vż¤{n}ÜdQ:věÎś93ć~._ľ¬źţô§0ĺ cÝ&c(ŰÝÝ­äädN, "ěv»Îž~jĺ¦Y „?2ÇC ?RDżáćq8Ś˝ăţ9ęhěSçç’&W({ńâEuwwëý÷ßW[[›Š‹‹9ÉY` âçŽîiÔôV'…LIÂŘ´Ż&knf˘Ś™ úädç¤ e+**Ô××§+VH’úűűU]]-I„˛Y` âçN×ě…3) ć ĆJŇm3§iÉ·ćNŠP¶˘˘BŐŐŐZąrĄgY^^ž$éÍ7ß”D( €ˇ%H2»\š™–&łŮě÷8,Ćd¤0Öm2„˛Ţa¬;„u#”@0Hz".N©ëÖ)ń®»üźF‰Ş`ĂX7w(›ŕ5ĺĎÁeµZ#~,Ă…±nyyyZąrĄjjjd·Űą0jŚ@HFĆşEăHŮŢŢ^}ňÉ'ƱnyyyĘĚĚÔőë׹0j˛µPĂX·h e»»»őř㏽~BB‚ş»»%I .ä‚@вŁ2Ö0Ö-Z¦/¸xń˘'\­îîn]ĽxQ \ ,‚6^a¬[$CŮăÇŹ«ąą9ä0Ö­µµUűöíSEEFD € Śwë‰P¶˘˘BÇŽÓÇ<ćľ’““µrĺJUWWĘ`DĚ! €MTëÎ9e+**T]]­•+W*++k\útßěÍ7ß”$sŃĆ…ĹbQNNŽÚöí“‘rS,†5Ńa¬[8BYď0Ö˘ŽďP611Qëׯç⌙Éd’ÉdRĄ&Ť~Ií’ş[[eś5KfłŮçqY )P;mFś:űÔŮŘçY–0o†î¸Ψűżrńšě˙Ů«×nz-uů¬3^ˇ¬Őjť°0ÖÍÝďâĹ‹ąxbT›¤—%éµ×”ťť­]»vůdŔśýä‚çűÜÜ\ 2ąo&ČxŽŠžčëg¨›lőőőÉfłŤ©ołŮě77Č4JBőö»u:{ţ‹@6??ź˘Ŕ0d@HěťWtěäź=?/_ľ|ČQ”ŔTeµZURRB!4¦,!©řMĄúúŻIřŘúęŐ«) &Ąňňreee)//Źb˘ŽŐjUuuµú%Ýsë ŔäF FĹö™]ŻVŞőB‡gŮşuë‹I«±±Q)))•ěv»Îž=+IZD9€)@íř˙ţźi $éÁ”Ĺbˇ8Y@Tký¬CqŠŁŇŰçTë…54~*{ÇŮ;{|_µjSŔ(ȢڡŁVŠ…U\\¬üü|Š^$™].ÍLK“Ůlö{ś@-11Q+V¬ĐŠŃÓ IDAT+”””DAó,‹ÖŻ_O!ŕ±@ŇqqJ]·N‰wÝĺ÷8, ę¬^˝ZÇŽŁQ"))IfłY999ĘÍÍĄ r¶lŮ"§ÓI!˛€¨“››Kđ l˛˛˛dłŮ(Âb%€đ BdłŮtňäI € Č ¦566Ş»»;¤¶6›M'Nś ,bZyyąęęę(‚›zed·Űe±X|–[­Vuttř­ż|ůr™L& 0Y,ĺää¨mß>)0%Čp‹Ýn×Űoż=¦»­›L&-_ľ\ąąą!÷±oß>-Z´HfłŮgyeeĄţň—żř­o4•žž.IjkkÓŰoż­ď˙ű~í0ůL&™L&%P `Ňč—Ô.©»µUĆYłüţ6#ŕ–cÇŽ©şşzĚýś={VĎ<óĚú¸óÎ;Ő××çłě‘Gr}÷ş]]]úä“OÔŰŰË €h“ô˛$˝öš˛łłµk×.źÇ d¸ĺňĺËăŇŹÝn§@Ś0›ÍJNN¦,Üq˙ĹĎݯɦ·:)0 effĘ`0„ÔÖl6űÍ÷ ‡@€âçN×ě…3)¶nÝ:¦ąŁ€Ń  ĘlٲEN§“BŔ4Ť]˛˛˛Bž“2%%E›6mň»‹' :Č0…$$$hÉ’%JJJ˘@X­V•””PŤ) ÓĘËË•••ĄĽĽ<Š:V«UŐŐŐę—tĎ­/“#d2ŤŤŤęîî¦@źs]]]•ěv»Îž=«żHrP`J  Ę”——«®®.¤¶ýýýjjjRoo/…€(D ŔréŇ%íßż_6›Ťb@bY' ’Ě.—f¦ĄÉl6ű=N „Čb±hýúő $=§Ôuë”x×]~ŹČ ¦mٲEN§“B ,C€(“™™)Á@!€0ÉĘĘRrr2…@X0B€(łuëÖoĘŻŚŚ %%%QHBŚ` IMMŐćÍ›N`üŮl6ťSOOŹçgţB?˛D™­[·*///¤¶ńńńĘČČPRR…ŔÝăő}}}˝š››Łbżš››U__p?'“’’].IROOŹŞ««ŁbżśN§Nť:ĺůůË·öă@€)$55U›7o8q<€ńgłŮtňäI ˛ŇĺĆ˝őÖ[˛Z­Ý'«ŐŞ·ŢzËóó—].ꑦ“I‚¤Ő^Óś={VÇŽÓ•+W"¶OÍÍÍzőŐW=Łc“].}Ýk1vÜÔ 1­±±QN§3¤i l6›Ş««µfÍ ˛ľ§ł.—şo…ruuuŞ««Óś9s4{öě°íÇ•+W|>B/MŤ°Đz˙ůÖq\¸pAŻĽňŠfÎś)“ÉÖ}ąpá‚߲Őqq>7ĂŘČ ¦•——kůňĺ˛X,€$mŠ‹Ó1—KçĽÂĎžžż€4ś˛]®)~3.Ns%˝#É=‹ěµkפá’|«ľé<Ć,Q¦ĽĽ\YYY!Ď# €©Ăb±(''GműöÉÁýHôH\śÎJúŔĺ’-‚ŁRÍ.—ţ:.N9Sěcô÷KĘ‘ô–ËĄ6É3"9ÜR%-ş5ň‘±@€(ÓŘب”””Ú¶··ëő×_×cŹ=Ć<˛S€Éd’ÉdŠš`,Gňˇ]’aܶQ’A’¦đ|¦ ß’Ô/©-ĚŰ÷چeÎŘ1é—Ô.©»µUĆYłüţ6#` q:ťjjjRoo/ĹŔ„2ÜúÂÄH.`’j“ô˛$˝öš˛łłµk×.źÇ d€™Íćn@,rʍ%dD¬#@LËĚĚ”ÁÚřłŮĚÍŔ‡’Žßúţî[7‹b,bÚÖ­[ełŮ(Ä;Ś•¤Źââ$BYÄ0Y˘Ě–-[ät:)||(©{ëß­‘çxťč>Ű$5X磸8µ»\ĘŤ‹‹Šý¤OúĎ>;]®aoŚF @”ÉĘĘ y´^JJŠ6mÚäwOL~g\.ŮF1ŞÔ¬‘¤pö™0wşçMWçů~IR[\ś.ş\2ÇĹEŐ~Ň'}ŽąĎú'` IHHĐťwŢ©¤¤$Š„ŐjUuuµ^|ńEŠPé/ľ¨„瞓Ν şÍ‚;”ž›;üűÇ0ő™0wş–|k®n›9M’<ˇl\\śjľô%}}۶¨ŘOú¤Ďńî3Đ`YÄ´ňňreee)//Źb˘šĹbQîÁ·ůóçG¤ĎÔÔTťó §‡±i_oče?ţřcUTT¨¸¸xŇ;}ҧ7“ɤüü|żĺ˛D™ĆĆF9ťN%''S LĎą”” z‹%ęű´Z­z˙ý÷=?c݇˛ŐŐŐ’4d(;Žť>é3XÓx9 ş”——«®®.¤¶ýýýjjjRoo/…@XY­VÖĐĐ0nlnn®VŻ^MÁ1eČ!2›Í!Ý ±‹@1-33S!¤¶fł™›€°˛X,jkkŁ“Ř4J€X¶uëVn˘& ‹Ĺ2¦÷./˝ô’¬V+…Ś FČe¶lŮ"§ÓI!Ś»––Ůív AŚ Ędee…<'eJJŠ6mÚÄ]S JČ0…$$$hÉ’%JJJ˘@X­V•””PŤ@1­ĽĽ\őőőL Ď=÷ś~ó›ßPIŚ@€(ÓŘبîîn „ń9×ŐŐE!˛D™ňňrŐŐŐ…Ô¶żż_MMMęííĄü<ńIJX,"‚dB.]ş¤ýű÷ËfłQ ~ŇÓÓe2™(DČ@L§@h,‹ÖŻ_O!4YÄ´-[¶ČétR0)ěÚµKgĎž ą}KK‹’’’¶ ‚˛€(“™™)Á@!€0ÉĘĘRrr2…1ᥗ^’ŐjĄÄY˘ĚÖ­[Cľ)W||Ľ222”””D! 1B€)$55U›7o–Ůl¦@Řl6ť˘“,Q¦ĽĽś˙ń0!-Z$“ÉD!"@€(ÓŘب®®®Ú¶··k˙ţý!ß ŔÔöä“OĘb±P"` q:ťjjjRoo/Ĺ€(4ťˇ1›ÍJNN¦,bZff¦ CHmÍf3ůae±XÔÖÖF!&1¦,@LŰşu«ňňň(,ËŢ»ĽôŇK˛Z­2‚bb„looŻŞ««Ő××7ˇŰéččĐńăÇ>–––¦üü|®8Ŕ¶lŮ"§ÓI!Ś»––Ůív A1Č–––޵µu·c·Űuěر!ß°aiŚ(++K6›-¤¶)))Ú´i“Ěf3…€(S{§é„ąŁĎ§o›| 'z„. Z˛d‰’’’(V«U%%%A‹‰˛O?ý´JKK}Ń9ińJśç{řó˛GÝwâĽéJűj˛®őÜđY~ů?{uóşËóóňĺ˵bĹ ®8€(S^^®¬¬,ć‘“ÂsĎ=§ŢŢ^=účŁc’Љ˛fłY;wîTbâëŐ¶kJ^” ůł=_3fÝR˙s3}úąvő†_[\\ĚŐJccŁş»»)Ćç\WW…@XL‹•ĘŢĽîŇ'';Ô×ńů¸nÇö~—çű=?ĆF«ĽĽ\uuu!µíďďWSSSĐÓő-O<ń÷8аi±t°ĘĆ"íŇĄKÚżČ70µĄ§§Ëd2QškľuëÖářřxedd())‰Qh%ýô„±0őóI‰ÁfĚŠüŻŐÔÔTmŢĽYfł™“„ÍfÓÉ“')‚F {K°ˇ,a,L^«WŻÖňĺË}ÎSę=ł|ľ˛V™tŰĚŃ˙ŠĚ\9OwÜ?Ç§Ż„ąľÁnZZż7€(ÓŘبîîîÚÚl6ť8q‚"€°±Z­ŞŻŻ§“ŘtJđ…‘¦/ ŚŤv»}Č ®‡š{2gĎž ¸Üd2 ;ż$€Đą_·«««% üܵ«7dţęŘçlť1ë6™ţę‹©:Ď÷©żó‹˙ŘKKKÓÎť;™N2ĺĺĺZľ|ą, ĹQĎjµŞ··Wyyyc’"d¨Pvö™ę¶9=ëĆNí¶†e[ĄĄĄC>ĆŰŔÄĘş˙łm>ÝŐÚľ´··k˙ţýÜ1@@O>ů$ź Š0Ů! eÝc§ľŐ«Wűť÷ł¦))u†ĎWę=ł4cÖčŮ´Ż&űő5řF@‰‰‰Zż~='`ĘŽ5Ś+§Ó©¦¦&őöör‚ 1eÁ0O_@›ç]’n\s)˝(9¤»®673Qs3ż|ű:>×'';ËkkkUTT$Iެ¬ô9?Ń,ŘýĚČČPqq±¶mŰ6áaóhCYÂŘÉĄżsŕ|».¦ľÁˇěD‡±ŇŔh˝ĺË—Ëb±ŚţîďWSS“± ŔĎO<ˇżú«ż˘4=ٸqŁjjj"z 7nTEE…*++ýŮá”””xÂŘôôtOŰŃô.µµµZ»v­š››ýŮ©®ąąY{öěQEE…jjj˘&”%Śť|n^wůn€äĘJŃ;MÁĄK—tčĐ!íرCąąąś<„Ő…ďÖm3™ šĄ§§Ëd2Q K [[[«={öhĎž=;ĐŠŠŠ!sŹŢ 4Š÷čŃŁž‹µ¶¶Ö'č®]$9rÄgZoFŁŃ3 Äd™®Ŕ۲eËTVV6äőuäČť:uJÍÍÍZ»v­*++'|źF e c'ŹÜÜ\%&&ŞŻŻ/¤ö‰‰‰_1Ŕ;”eÎXŕ ŢĎţÎ’nPbÜÍë7uµíÚ¨Ú\»Ę{ÄŽéáÚĐŢ˝{µfÍš¨śku¨ OŔý‡řŕ s¸vŃ&???j¦U…Ńhrú‚ÂÂB•””¨°°P§NťRUU•jkkĂr­ ĘŢń•9úěO=„±“HYY™BjK;Ü7ú2›Í„±€×{ ÷<őc1oŢĽIyü‹EëׯçBŔKç }r˛“BCđ@¶  Ŕsc¦hşS—{.`i`ľŕp…˙BYﻯĆN«ŕ:FĎb±(77W—/_ž´Ď­-[¶Čétr2ń°Ŕŕ‡áíÚµKgĎž ą}KK‹’’’¶ ‚&<-,,T~~ľ^xá…q™ş ąąŮÓ—{ ŚŚ ­YłF6lđĹęľIŰÁ=ˇť{žŐŠŠ µ´´(==]ĹĹĹjnnÖÁ}Úť:uĘÓWAA ýÚrôčQUTTČápxFmfddh÷îÝCÎCëp8tđŕAUUUÉápřÜŚ,??_۶mói[UUĄS§NyŽËű¸Ýűę}L6l¸íŃÖÖ{Ű޵ۻwŻš››UUUĺ9˙÷y" 7{?˝Ď{ ëL}ÜÇçĘşĆEff¦ …Ŕ¤a2™&őú¬¬,Ůl6N$c`6›µ|ůň1˙'mRRRT~şz*y饗´jŐ*­^˝šbDHśËĺrMHÇqq’ÂŻ’’ĺç童ĄE’TSSđɵgĎO h·öîÝ;lk4UYYéÓ·{?qoĂýQ÷‚‚UUU©ŞŞJEEEC¶Ű˝{·öěŮă×ΛĂáĐÚµk‡ť&ŕůçźWII‰Ď˛ŠŠ mßľÝ3UÂPÇyŕŔ­YłĆŻnCí«÷1UVVú…Ť#m×h4ęđáĂ~íÜŰ.((Pqq±6nÜ8ä~>|سϣ˝ŽŐx°ŠŠ Ďö_c#][®[ďk­ŞŞJk×®öĽ8p@+V¬đ„˛„±BŐ××r8ÔŢŢ®wß}WŹ?ţ8Ż?@žsî?@)$›±Ś---%Ť°°Ü×h4úÜTk¸Đn(Ţ#kÓÓÓuŕŔUVVŞ˛˛R»wď–Á`ĂáPQQ‘jkk=íÜë¸=˙üó~ËËĎĎ÷[gÆ žeCŤ†őVTTä ˝Ű>|XË–-“$mßľ]GŽń´©­­ŐĆŤĺp8üޱ˛˛R6l4öz×°¸¸Řçqďăf_ÝA¦{»Ţ5\ۡBŃ3gÎhăĆŤ2 Ú¶m›>ě·OîmL„ŞŞ*mßľ]Ň@x;ž˙›ćׇ–-[ćs^8 ôôtĎńť?^;wîÔŞU«cDDjjŞ6oŢĚë&6›M'Ož¤ZŘnęUXX¨m۶…4uűcđ’´lŮ2UUUů|<˝°°PkÖ¬Qaaˇ'úBLba dCťşŔ}3¦eË– {c(ď0lĽGI«ąąŮž®YłF.—KÍÍÍžc:räjkk‡ <'‚»NéééĂŽŞőU‡ »‡j?7ó2 *((řĺľńŤ{Z…P¦ÄŽ÷ôEEE:zô¨_H[\\ě™+ĆŞĽĽ\őőőŔ¸[´hѤľ!ëTpĘ‚·ß~;čŃ«#©®®Vii©Ď˛x@ďľű®jkkő­o}Kßüć7=ëşy·qďKrr˛__Íť;Wťťť:~üxŔŹŤ:tH§OźöYÖÚÚęůw¨ţG vçĎź÷<ŢŢŢ>âţçüůóęëëÓgź}¦ÎÎN}úé§úěłĎ†<–ˇę7xżĽŰýéO¸¦Oq_333uţüy˝÷Ţ{žu˝·ů»ßý.¤ë!óçĎr˛éŐ«W«ŁŁCGŹU}}˝***TWW§G}4¨ÚłźwÜq‡>űě3ŐÖÖz‚öĚĚL-]şTK–,ŃťwŢÉ« €qÓŘب”””Ú¶··«˘˘BłgĎV||<ĹÂd,ďůFĂfłŤ)P-**RuuµOVËňóóµbĹŠ°nÓ/µŰí:tčиmŔn·űÝůmÉ’%Ş©©Ń•+WôÖ[oićĚŕ*Ş‘ IDAT™şăŽ;d·Ű=ëş[\ooďw‘KHHv]›Í¦7nřő;R˙Ž#P;ďŔ4жFňŮgź©®®nČŹöĎś9S×®] Ř˙pőjżúúú$ Ś^©¶Žw¤sLĚą˙ęWżŞ‹/Ęn·ëĎţł˛łł5gÎśqŮĎżýŰżUuuµĎ˛óçĎ{‚î9sćčľűîSNNŻd"ĘétĘn·űĽîxcąÓ1Ŕh¸\®1µďëëă˝Ë ÷q999a˝ß”—/_žđŤĆÇÇű|ĽýÔ©Sr:ťăŇ·;¬śŚtüřqO;sćLÝ~űíZşt©–/_®ď}ď{žŃÄă­§§gJ<‰Ľ§GĎ0Â}ÍnذAJOO×Ě™3}ęWUU%«ŐĘ+1Äl6‡t30D÷ Äp™>Ü s§ëöűç„ÖóŻţ1f&hń7çú=ĽXsŐ麨÷˙đgőôô¨ńr˝Ś™ Ň­Oŕű´ąŐ—kŽ3`_Ţěżá–ü·;öqűýs´8Ď·Ź„÷§K¤„yÓýűć8µsŐ÷HÇoµůŇ -ţĘĐűűYS›îXĽ@’Ôwµ_˙ňň˙č7)^Oü÷ďiIŢ"ż6źÔ˙ĹÓ˙ŕc9ď˘~öË»ťńp˛—şu}Fßµí?2pqzďpŰÍő0R»€ç&ĐȸČ3Ă ăŐQíçgMmžď‡ŰĎ»´PŇßxÚüGUťNW~¤ţ^§ęęęôĐÓ_ÓÜTŻfBÖôVçűXřĺŮJś7bA¸tşß3'ýh™ÍfÝőŤ4]şrB€°čůĚ©»Óîą}ÂÜéŁËf¦¨ ˙ŢŁţÎĎ#˛íaŮi3â4{áĚ1m`ćěۆěă»[Šôź§Ď©ăb—Ţ˙Ăź•}oşç1ď6Y÷¦«±¦EÍ ­ĂîOëą‹žďdÎ ¸nâĽé~Ëo›9ÍóďPý:Ž@íîY%íľµ?MźęŻż»4`ö ýü˙®$}ďÇß”iˇAýWF ˙]É·tĎŠ¬€íz˙ăęÇŰň©Ďz3gß°~’”xazŔä~9CśřP˙rIÓfÝTŇś„!÷·ł˝K’”ţĄŰ=í‡Űćh݇á wn|®¦O=ßç=°dTűŮöjŔýWÓp˝żţö@ÖqÁˇ˙óĆ™!ű;qŕĎ÷÷<pťˇ¶1ž˛nËľwV˝=ý×ůŕÄź}µ_ěňülş}č:qfś÷5C’Ô×ÓďłOUúŕ‹ó1(”Ś­ç.ę7?ôü|gÖÂ!× xNĽÚz{ă×§ôĆŻßńąľü®ĺŮÜ8ŔřYöť›Ŕx»ři›®őÜ 4-Ň;}o† ůʰëÜó@®ć-)_˙ů[:WÓě·Îż>ó{}řN$鯿}ŹL·öőicŰ„ÓC?řş¤ůĺgďĘž«iÖ‰_„{Y÷¦Ët»QiŮ <ŹW˝ö'ż>{{úőŻĎü>č@y¨Đq°żyh™îĚŘöë?{+`ŕ]őÚž}şű\Ąe/ űuŇĄ_çjZ†üúÝĎŢŇĎ~ü’únŐúŰ?řşĎh_ď€ţĺgüĎɉ_źň\?y˙‡ŔżŢ;řܸĂÚÄŮńCÁ,ăíłCž“2%%E_űî}Jś7ťB@ŠŠżÖľý}řn:ĽF‰zKš“ >»N?Űú/ęëé×϶ľ¤´ěşç\ő^q´˝ŕ$Ý™µ@ßűń·üú·Đ Ž‹]úđťmýÚ˙+Iúů{?ťăq‡ĚUŻýIľÓ s5?×=ä*qv‚kšŐzn NśŻÇ˙Çw=mÜS3|řN~¶ő_ţOߨí§^~ć÷:őÚž‘łkëŢßpk=צźmý— ÖýëoßăwĚiŮ =őm=צ=ë~®´[Atkc›úzúugÖ%ÎIPă ĐűoZ¦NśQcM‹>xăŚÎŐ´(-{Ҳč\M‹§˝$}ďÇßrÚ‡„„Íżcžşz[)V«UŐŐgt÷“ (‚2-vbđÔ¤e/ÔŹţ¤g:€Ösmză×ď¨ęĐę¸ŕPâěx>ňý¤bsŔ@ěď~üM%úXy°ŁHCńwŰľĄ>ógÇLđĆUúŔĆfÝ›®˙~`łĎHŢ>łÎ3Ző\M‹Ţřő;žc”ËĄo˙ŕëÚöó'żKß;ëłÍżyh™§>nÁڍMË^¨=ŻýXwßEÚz®MU‡>đÔV’ ůŠ~üó'Ł:lĽű\=ö?ľ«Ç˙éဏ˙đ™užcěëůbÄm_Ożî~ W?ţů“CöýĂgÖ鯿}ʤ‘˛ľÓ 7~ýާýĽ…ýđ™GüćŃŻöŤŞŻŻ§`R8˙f‡~ó›ßPI,ÎĺrąĽ444hßľ}’¤¤ÔĘ\9/¤ŽÝÓ Ě[hrú€ÁZĎ]Tß•‘†Ů·BÇ@ěúđÝŻuÓugÖÂĂÂŢž~}ÚxQ}=ýJś“ŕiăŢnâ쿏ăwõóöá;˙ĄÖF÷¨Řeß›>ěú­ç.}^Çç]ű‡:.şG­úwëą‹!őśĎľąŹ}¨6Ţ}ź«ińôź–µ@Y÷f »ľ{ÝáÎY(×w»ás~g'čžr=űä>§Cí§ý‚Cźž»č9źÎŚŐ4űl}ĺŽCž¶ŕüŐuľ÷C éŁiÓňĺËe±XFÝv`„l5#d@ŘśłCsg¤ęŃG ©}ii©RůłcľŽ˝í×%I;věPnnnض=a,ÍX¡ţţ~ť±} Ž¤Ý6sĹ&ř9G Âm¬lý'ŞyZŤfĚş-ćë©@–żÔB.]ş¤÷~˙gőu|N1řą=mȦ±‘F a2ťˇ±X,š™×«Žk)‚B €¶ě;™ZzÇR &…Ě•óôÍ”GCnńÓ6]Ź»Á´Ä”DĂÂY2 ăíł•śśL!@L8ńú˙VÇą> AŚ ĘäŻĘRž!/¤¶ńńń2ÝnÔm3ů?WFüµŔ’ššŞľ_‰óř?W l6›ZjÚ(‚F €ć¸pEÝÝÝ!µµŮlj>Í ˝@řtžďS}}=…ÄdÓÎüáĽęęę(:Îő飏>˘“ذźgěďü\źśě J„Qă•.%>p»ňňB›G¶ó|ź>ůßß@8ńž„Kçu)5ôö)©)ęjîĐŐ¶k1^ÇĎ#¶íaŮ›×]şÚvť+€°ş®®®®Z¶··«ćuµ›ßß@8ńž„‹ËĺSű˘Â":tHÎî3Bü¦,ČÍÍUbb"•`r:ť!Ď… ±&11Qfł9¬Ű 8B¶¬¬L ś"`ßľ}cîă‘G ű› źsîçŮňĺË)$‹ßţö·cjź––¦;vPČ[rssĂľÍéŃ´3`|Íf~—AĘĚĚ”Á`ůąf±Xř„›o|ăjkk ą}RR7"l:%@,Űşu«l6…QÁn·«şşÚoůňĺËe2™d±XtöěŮű饗tÇwhţüůžeóćÍ“ĹbˇřaB @”ٲe‹śN'…bĐżýŰżéŹüŁßrŁŃ¨ôôô1÷ßŇҢ––źe‹-ňŚšMMMU||<'bĹąĆzk60®úúúB­×ßß/§Ó©ĽĽ<%%%QL`‚źsŇŔ´LYĆKooŻěv»fĚÖöž8qBwß}·|đAN›F :´dÉÂX L¬V«JJJ(7î9^.\¨äää°lóĉúř㏕Ŕ ¦, Běv»ěv»ßňYłfŤK˙CÍ+•““Cń/ĺĺĺĘĘĘR^^ĹQeáÂ…’¤ÖÖÖ gÝaě† G6Ld€ÎÎÎ!ç†*,,Ô}÷Ýrß˝˝˝’¤ŇŇŇ€Ź˙ň—żÔ´i|HpkllTJJ …QéâŋڿżV®\9î˙L˛D@RR’˛łł}îlęf0ĆÔw__źúúú´nÝ:żÇćÍ›G L"999JKKÓ›oľ)IăĘvwwë“O>!ŚŤY" >>^K–,Qyyą •šš:îŰpß%U¸ŮWFFFŘć 0>’’’´sçN•––Žk(›““Łgź}–űODCdžž]ľ|Y‡R{{ű„mçĉzýő× c€IĘĘşGĘÖ×׏©?÷ Ăc#@€1›ÍÚąs§¦M›6aˇ¬{N¨o~ó›‹EeeeL8w(›ťť­Ĺ‹‡ÔG}}˝'ŚEäÄą\.e rl6›JKKuóćM­[·nܦ/`‚~ 8}ô‘śNgČÍf%&&RHV/^Tww÷¨˙>رc‡rss)`1B€ó);ÖŹ ~łE Ś,++‹Q"`ŇÍHWďżc#ʞD »Ý®ŮłgËfłéćÍ›cꫪŞJK—.%Ś‚Đ××'›Ír{FČ€Húĺ/©… yŁ/kDźé”€č`2™$ „;c e.\¨Í›7SP l6›>ţřc­Ył†b€°ëííŐĺË—U[[+I~ˇ¬Őj%ŚŤBŚ 9ťNť>}Z7oŢŐś˛LĐŚŢXćµZ­Ş®®Ö‹/ľH!@DôööŞ´´T­­­ZąrĄO(k0táÂĺççS¨(2a#dOť:R;ÁŔE2ŽőOOOWFFFČý477ëčŃŁr8žeFŁQ?üđú />>^ż˙ýďŐŰŰÔŤľŞ««µrĺJÂX ĺĺĺZľ|9ŁFŔ¤”””¤ť;wŞ´´Toľů¦¤‘˛îÁ , HQfÂŮÂÂÂÚ¨ŞŞjÜöăŕÁZ¶lYĚ…ĽîúďŢ˝[{öěuűŞŞ*íÝ»wČsQRR˘üü|=˙üó!źë‰ĐÜܬ#Gލ¤¤„g7€IďţáTZZŞC‡ Ęşç„Z¶lo¶€äĘÖÔÔhĹŠ ÖbÓ¦ę9©¸¸Řgt'FVQQˇ˘˘"ź0¶  @JOO÷,«­­UQQ‘***˘bżËĘĘtď˝÷ęČ‘#śDS‚ŮlÖÎť;5mÚ4:tHííí~ëxOĐĎ'L€Řĺeň“źĆFą ż©×˛eËTVVôúFŁq\¶[[[;®#mc…ĂáĐöíŰ% LQVV¦ââbżuĘĘĘTVV¦®®.mܸQůůůŽ9Bř`Ęq‡˛î‘˛›6mRBB‚$î– Ś§ţţ~żeíííşvíšßňůóç{ž‡Ń$))‰"LȍƨúH;†WQQá 5+**Ţ1Řh4jĎž=ĘČČĐĆŤ% ŚNŤ–‘˛0Ő¸C٦¦&%%%éćÍ›ŞŻŻ'ŚĆQgg§L&“ϲßýîwjnnö[wÓ¦MşóÎ;% ĚĎ6^ ¦SxsŹ*6 ĂXoĹĹĹ*++Ó™3gtôčQŠČl6Ël6ËétĘfłiĹŠúŇ—ľ¤ÜÜ\ŠŚŃŽ;”””äČ>ţřăęíí ř|tŹ>1™Lş˙ţű)"‚ŐsČ–””¨¨¨Čo>SoîąbÝóĹJRQQ‘çc÷’´}űvĽŃ“ĂáĐ /Ľ ˘˘"ĹĹĹ)..N÷Ţ{ݶoßpD„ôĹ«îţöîݫŋkîÜąZ»v­g_ÝűU[[p;EEE:xđŕ°5p·[»v­/^쳏7n÷iÜŁc»şş‚Zż¸¸X۶mÓ¶mŰ<ËĘĘĘ<Ç>T źăÁçćČ‘#Z»v­ćÎťëSŻ^xÁoZwgÎś‘$ť9sƧöUUUůÔÓ}ކ;űó>—Ď»»ŽŰ·o×˝÷ŢëYgăĆŤCÖ#Đő±xńâ÷ @쉏Ź×’%K”śśL Ś“ÜÜ\™ÍfżĺfłYąąą~_|câš ’\’\!÷QSSăé'##ĂŐŮŮé·NaaˇgťĘĘJźmţĽ/555.ŁŃ8äú’\đŰćîÝ»=ý=˙üóC¶q˙|řđaW~~ţŰČĎĎňřGÚżˇöŃýŘîÝ»GUsďă)..óy{ţůç‡\Żłł3ŕzĹĹĹĂŻŃhtŐÔÔxÖ/((r]÷5áŢŢš5k†í»°°0ŕuěą<|ř°«¦¦Ć•‘‘Ôľ»\.WSSÓë{_#ö “KT˛.—o@¸m۶!ó+++}{ţůç]•••>Awhh0<ë455ą>ěZ¶l™OČćÍČfddxÓ‚‚WAAË`0x‚3ďN’ëá‡v>|ŘUYYé:pŕ€+==}Č಩©ÉÓÎ`0¸8ŕ9†ĘĘJĎ>¸żšššÖ´lgg§Ë`0řá{öěń Gâ>¶ˇÂćÁçĎ]łx–=üđĂ®ĘĘJWgg§çÝű–‘‘ás.+++=çlٲe®ĘĘJO[7ďđľ  Ŕsľ++˙˙öî&6Şę˙ăř5 Mf:Zt3j䡣&m S]š–&ŇQ·´ˇ†ˇ,Ä-jšLc\´ÓqÓ΄®Ě”'ŤdF…S„ ˇ˝5”Âüż˙=˙;Ó™ét*üŢŻ¤qz>÷Üsn]|9sî±T H+ĘćzžłŤĄs,ĘĘĘLquűöí¦µµµć@ –m·Ë9Îö=;Ď›éXŕßgÎ ˛vAoş?Ů8g@Ú3ťŐlEßcÇŽeť%isÁ˛oÝşe |Ź'­°ç,Ŕĺ:ßŮąf±:‹ź™÷°}űvsn®|gA33˙~ŠxgÎśI+Ę:‹‘UUU©ýű÷OY u¶-łXlłgšÖÖÖNë\…|gnć¸ć;×YčÍ,ěg;&WćęSçxĺ3»}ŹÇl»téRŢgÄyžł €‡Óśdgú“ÍĄK—&ÍŚ´‹yĹĹĹY ~ů ˛ÓýJ˝3ĂY,sds÷ś}đĘ+Żä<Ć9ŇéóĎ?O˝ýöŰ“fSföK®"áýÎŞĽuëV*d-Ě:‹íű÷mŮúČąß9Ů.>f›Ąęě{v«Sľ‚¬ýĽ,[¶,ď}ŰEřĚ™˝ÎüTĎJ®1s>7S=c™ŮöĚY<Üžë5j‹‹‹µzőęűĘ(++Sgg§6mÚ¤ˇˇ!=˙üóćĺHťťť*++›Q^86źëęęrWUUĄââbŤŽŽęřńăćĄa™ÇL%ß1«WŻV$™´}Ďž=óş¶°ÇăQgg§FFFtüřqócż8K’†††ÔÜܬÎÎN;vLŹ'mĚjkk‰D‰D´˙ţ´|ű÷âââ´1(++Ó‰'Ě‹·>üđCŐÖÖŢWßŚŚŚ—qĺo{˙ďż˙n^ÄćĽ'{Ľ¦óĽN—óűä“OôÇ(¤eLçŔĂaÎ ˛«WŻN{űülŐŐŐ™ź]ŚÝľ}ű”¶lśíů裏ň»`ÁI2×Ě”Y°›í1S9q℆††444¤ älÓäńxTWWgúyddDápXÇŹWWW—$i``@ëׯי3g&Ť™=^iĹL»ťYännnV8Öčč¨Âá°Âá°<ŹŞŞŞTUUĄÚÚÚŕíb¬}]gQ9“łO&C§sí™ţDssłľřâ ŤŚŚhĎž=ÚłgŹĘĘĘTUUež{<˛dtčС¬'466jůňĺć÷ .¨ŁŁ#çîÝ»§ńńqIR0ÔĹ‹gťY^^ž6›tË–-úꫯ˛fVVVšĎ:{ö¬É<ţĽŮ7ÝbńůóçuöěY-_ľ\wďŢ5Ű;;;uřđá´c_xá555™ßďŢ˝›łťŹ?ţ¸ů<>>žvď7nÜĐéÓ§s^ź~úiÝşuK’Ô××§k×®™ţt^ŰyďůÚ9ť1zę©§´yóf­]»V˙üóŹ~řáÝĽyÓ/ß{ď=“ąyóf577kttT›7o6ĹÍ«WŻš{Ú˛e‹y>.\¸ C‡Éď÷ëÔ©Sć~ě"p8VssłŢzë-•——gŁĚgîÂ… joo7ÇŘEíéhoo×áÇÓúł´´4k^˝zŐ|^´hQÖgţ·ß~3ÇŘĎ’$}öŮg×Áő÷ß›vvvvŞłłSŹG555úňË/ĺńxîűďh6ăN&™d’I&™d’I&™d’I&™d’ů¨e~ýő×Z·nťü~˙üdŻ\ą"Izíµ×&í›0űíßł÷í·ßN:~ĹŠzîąçfťůÍ7ߤýţńÇkďŢ˝93mׯ_WII‰É­žžłŻ¨¨H/żü˛***äőzĺóůtďŢ=Ő××›Bˇ}Î{ÍŮź™íŚĹb:}ú´&&&ôÎ;﨨¨(ď˝űî»ZłfŤ>ýôSIŇ“O>©+V¤·aĂőööjxxŘ´á»ďľ“$­\ąRĎ<óŚ9ŢŮΚšSľxń˘âń¸ÉíرcÖÁ«V­’$UTT¨»»űľÚŐŐĄÖÖVI’ßďWż©‚gV·íâbCC$) ĄÍÝąs§z{{%IçÎť›q[‚Á ©´ç;ßî\mĚ—őꫯʲ,ą\.8p ­ýÓąĆt®ť©§§G»ví’$8p@ŐŐŐÓ:Ďnk¶±N$fą;Ó>ľĄĄE@`ÚýŤFŐÓÓcĆľ˛˛RˇPČěßşu«âńř¤v8ź…¶¶6SÄžÍó<›çm¦ĎŤÍ˛,EŁQutt(™LJŇŚű ٵ··kăĆŤŞ©©)ču˶±¦¦F»wďţ×tÎđđ°)dUTTčŕÁf*qGG‡b±ŘŚň|>źůŤFóŰŃŃˇŽŽ%‰‚Ýo,“eY’¤M›6ĺ,ĆÎôľdżŘ,Ë2m­¨¨Čši//ŤFŤFÓî-3«··7gWWWëŕÁć:Ó˝g˙Mu_±XLęíí5í,Äłm˙c“ŰíV}}}ÚK膇‡ůż%ŔC챇ˇ‘ŤŤŤf¶h[[›$ißľ}rą\’¤]»vͨxć\Â9Ă2SOOŹ‚Á ‚Á`ÚWć ©´´4çľ|mź źĎgŠť˝˝˝ćĄ]ůŘă!e/ČJ23RűűűMŐď÷ËívO:vçÎť iË5d˛ÇÝţďtŘĹßţţţśĹu˲´k×.3ćŮÚ÷ ő÷÷+ Ş««kZćB´ sgÎ ˛cccŠÇă3úqWÁ ) 555™ĄŰíÖľ}ű$ýw˝ga0“ýw[ii©ůzy,Ó¶mŰ&tc±Étą\“fsÎ%çLU{i…L­­­ÓžĹ:---¦ĐŮÚÚŞ†††IłE-ËRżLá´˘˘"çL^»ď,Ë2ł=ł-ŕv»M±< e˝żh4jĆ3×’ ÉdrŇLR狹&?-ËRCCYŔyü\r>WmmmYgŔN§č €‡Ăs}D"ˇ­[·Îč{ ÎD"‘¶TAćÚ™ŐŐŐf=Ůžžůý~S¤«¬¬”ËĺŇŘŘşşşÔŐŐ%Ż×«ŁGŹJ’vďŢ­x<®ÁÁAEŁQĹăqů|>ą\.%“ISvą\ęîî.čĚD·Ű­††…B!% mذAŐŐŐr»ÝJ&“ćk˙öRlIźĎ§––µ¶¶jllL±XlĘ™›ĺĺĺ:pŕ@Ţű±ÇÉžéś«ÚŇҢx<®±±1m۶M>źO^ŻW’ŇĆĹëőŞĄĄeR;âń¸’ÉdZa·˛˛RĄĄĄjkk3ł©äóůäóůL®s)…٬3;۱¶Ű•H$ä÷űUYY)Ż×k Ëv‘8ßň™>ř@/ľřbÁŻ›u†ěąsçôăŹ?Îk‡X–Ąm۶IRÚR™ň-]Y°ł [¶H$˘ĆĆFą\.Y–ĄX,¦h4jŠ~öˡś3V Ą©©É“ɤşşşĚWůS©”‰DLŰ~ůĺ—¶ć©˝né¦M›ň. `E#‘Č”kg3ßlăŇŇRuww›™ ‰D¬=kŹ‹ßďW(štͦ¦&S¤¶9—š¨ŻŻW(Jřčď8IDATËîéé1köş\.566š™×…R__ݶ¶6Ó×±XĚ´+™LÎ[»eË–-SIIIÁŻ» •JĄ279rD}}}Ú±cǬďç…Sv‘Ń.ŔąÝîĽEQç,BŻ×›¶îŞeYiłGó˝ kppP–eÉëőšŮ“S]/ߌE»2Ű4“¬ááa3ëÓív«ĽĽ<í8çýů|>S¤śÎµg2–ccciłSóőO6‰DBuuu’¤p8<­sť÷.ý·@îĽÇ|ײ‹Óąî¦ŮSőg®qésă|łŤ7Ś… jéŇĄżîśd§˝{÷* ©ĽĽ\‘H„ŔĽšŻ‚ěct=ćšóe^™ëóáňĺËşyófÁŻű]Źą‹Ĺ´`ÁłţíT/ó ©»»[7nTMMMAŻKAs"Ź«ŁŁ#mŰľ}ű¦\˙x”e-Č®\ąRoľů&˝Ys^˝^Ż™ €˙yY ˛«V­Ň˘E‹tçÎzłdYłb€˙ĂK˝0§(Ć˙Ź‚,€˙9K–,QIIIÁŻ» •JĄ˛í¸|ů2Kx$-\¸PK—.-řułÎ=räöîÝ˨ŔÄ’P d @žČ·sxxXĄĄĄiŰ,Ë’eY“Žu»Ýr»Ý“ÎφL2É$“L2É$“L2É$“L2É$“L2É$s>3Oť:Ąőë×ëő×_ź˙‚lQQ‘$éűďżWkkkÚľ3gÎččŃŁ“ÎٰaŞ««Ó¶µ··g˝(™d’I&™d’I&™d’I&™d’I&™d’Ić|gŽŹŹ«Đ¤R©T¶W®\ŃíŰ·µjŐŞ´í7oŢÔŤ7&żxńb•””¤m;wî\Ö‹’I&™d’I&™d’I&™d’I&™d’I&™dÎgfQQ‘–,Yňď)Č,^ęBA „‚,Y(˙µ}?;á@ďIEND®B`‚ceilometer-10.0.0/doc/source/contributor/6-storagemodel.png0000666000175100017510000014720113236733243023736 0ustar zuulzuul00000000000000‰PNG  IHDR$Dβői‡zTXtRaw profile type exifxÚUŽŰ €0E˙™ÂxĘ8ĆhâŽ/XMăů€››ćPŘŻó€Ą dĐćÝ  ^3t"1Ríśw eâYđÝQçC}űŹ&Öípu·f›mśvŢ…¤ŐLQY±ľSÂú(óěżWĹźnAą,"jD—@ iTXtXML:com.adobe.xmp ·"˘äsBIT|d IDATxÚěÝw|Tuľ˙ń7)3“IĎ„„tJˇ PQ îZײ°şö˝«ëş÷®îŞWeu®¸ĺ*kY]•âŞ4Ĺ‚„PÔ$ %…^'™ôß“ ŇH†„ĽžŹÇ>–Ěśsć|?ç€9ďů–~őőőőp!7J\Ť@¸p9 ŕrŔĺ$€ËyP wűú믵jŐ*Ůl6Š}”ÉdŇ‚ 4~üxŠč5č!ôr‰‰‰„ĐÇŮl6mÚ´‰BzzHçĎĘJĺçSčc ,UŤĐëHçĎĘJ…eť ĐÇ”úřHz%†l—#.G \Ž@¸p9 ŕrŔĺ$€ËH—#.G \Ž@¸p9 ŕrŔĺ$€ËH—#.G \Ž@¸p9 ŕrŔĺ$€ËH—#.G \Ž@¸p9 ŕrŔĺ<ÎŐ†ĺĺĺ)??_éééŽ˙o*==]6›M’4tčP§÷,‹‚)‹Ĺ˘ččhîşĐ9HX­VĄ¦¦*55U)))ĘĚĚěĐţlőgI R\\ś† ¦¸¸8s÷ĐI˝6ČËËÓćÍ›Ű@Ś&…„9~6šĽ< ÂńsiQˇJŠň?—ެ¤ČéÚľ}»¶oß.éd@1nÜ8Ť?ž; €čUD^^ž’’’”xÚb@Ô E ڕݠ|ý9hh§?/7+C•¶ OűQy'2••vXU•öaMŠ   Ť7N“&MbxíĐ+‰ääd­_żľĹˇŤDxô3 ZŇ?,R’śŽ››•ˇĽěL?vXGS¨ŞŇ¦‚‚mٲE[¶lQPPćĚ™ŁÉ“'swp=:HLLTbbbł ¦ac'hŘč d2{»ôśú‡EŞX¤†Ź»P’t8yż’÷~Łcż—dď9±|ůr­[·N“'OÖ´iÓäííÍť@=2ŘłgŹV­ZĄ‚‚ÇkŁIŁ'NUü ň ´ôs?ZăGËVnŐ‘ÔÚőĺ§*+)RAAÖŻ_ŻM›6iĆŚ4ŃŁ‰´´4­ZµĘ©G„Ź_€Ć\xÉYé Ń&ł·†Ź»PĂÇ]¨Ś#µóËOt"ýl6›ÖŻ_ŻÄÄD†rĐ GV«U«V­r¬`!Ů .ąŇ14˘7‰4T‘†:ŤC95wî\&żôig=HNNÖ’%KdłŮWŻhš1fÂÔÝ#˘=‰ĂÉűőő§¨¬¤HÔ˘E‹4{ölÍ™3‡;Đ'ťŐ@bĺʕڲe‹ăçˇ#tń•?éQsDt…ÁńŁ=XűvnŐţo¶ŞŞŇ>Ś#%%E ,Ppp0w" O9+DZZš–-[¦ĚĚLIö^—Ď™ŻÁńŁĎŮB›ĚŢšxéLĹŹ™ ŹW/U~N–<¨§žzJ ,Đřńăą}†›«?pĎž=zţůçaÄ€¨AşůľGÎé0˘)ż@‹ćŢů[ť?ő I’ÍfÓ?˙ůO­\ą’»Đg¸´‡Ä×_­7ŢxĂńóä+4öÂKűdá'^:SáŃCôÉ»ËTUiÓ–-[T^^®ąsç˛<(ŕśç˛K—.u„ŁIsnľ»Ď†Ť" ŐÍ÷="KH$iűöízţůçeµZą3ç4—K—.u,ééă „[îQä ˇT_öą%nąG1CGH’233 %çĽn$š†–0ÝxÇoÔ?,’Ę7a2{ëšy·+nô’%çľn $N #nąG&3ó#śÎôko"”ô ÝHFtˇ /č–@bÓ¦M„gŕÔPbÉ’%pNéň@"99Y«WŻ–Dq&š†ÔŠ+( ŕśŃĄ„Őju|›o0štůśů„g`úµ79–ýěłĎ´gĎŠ8'ti ńüóĎËfłI’.ź3źŐ4ş@Â-÷Č`4I’–-[¦´´4Ščőş,X±b…233%IŁ.¸XăGSÝ.`2{ëŞH’l6›–-[Ć$—€^ŻK‰ääd}öŮg’¤Q4ućO©lŠ4T“ŻHdźäróćÍĐ«uI ±|ůrIöy#®ľq!Uíc/ĽT˘I’6lŘŔĐ @ŻvĆÄÚµkUPP Işŕ’+™Ä˛MO¸Éi> z«3 $ňňň´aĂIöˇc/Ľ”Šv#ż@‹FOś*É>tcÓ¦MĐ+ťQ Ńô[úé 7QMxéLÇR ëÖ­c‚K@ŻÔé@"99Y”$ť?ő ůZ¨¦‹\>gľ$űŞLpŮýöěŮŁ»îşK+W®¤ĐE:H¬_ż^’}"Ë1¦RI꩸ŃH’6oŢL/‰nÖ84fË–-Zşt)€.Đ©@˘iďѧ2‘ĺY0á’+%ŃKÂŐ¶oßN(] S˝#Î>ż@ ˝$ÎB 8s$čŃsĐKµű‡+°¸$B 8S$¶mŰ&‰Ţ=_ E1CG8]tÁK×ĚýˇtV«UIII’¤qŁčŃÄŹť(I*((Đž={(H73yyJ@čP ‘””$›Í&I3‘Ţ=ÁŕřŃ2MŽëîG(g®CDă°żő‹¤z=Dăä–Ű·ogrK!”€3Óî@"//Ď1™ĺ /ˇr=HüŘ Ž?3—„ëJ@çµ;h:`PÜ(*×ô‹”%$L’”’’BA\P:§ÝDă®%$L~*×Ă„E‘$íŰ·Źb¸ˇtśG{7LMMuzđEĎ2hŘ(Řő•$)99Yńńń>˝+ZVQQŃć6ŤˇÄ‡«ţˇÂÜăÚľ}»$iáÂ…ZĐ®@"99ٱşFÄŔXŞÖEęřsRRR‡‰_˙ú׎kŚÎ!”€ök׍ĆŢ’=ŞőP˘5»^íŃ4pÂéM^mnĂđ hźvőhěĘ? jLfoŞÖCE ŚŐ‰ô#ĘĚĚěô1.Ľě§ ěA1[` i_]č)mkW ‘‘‘!I ĺAµ' Ź˘oµQRçç‘ěˇ8Šy†% uí˛ŃŘťß/0Šő`~'ŻO{&bD÷břś^›Drr˛ăĎ–p*Ö5]Ž5==ť‚ô„в6‰üüü“Ľôčé,!a’$zB h®cD“oŕ]i|´ŻĆGűjÉâEmnűÎëKŰ÷E>ţÍ®Î>B pÖf ŃřM{ă’’gÓ{o/UuuőißŻŻŻ×Ę7^=ŁĎxĺŻĎöę0#x€}âŃ3YiÝPNj3(//ď'ęîślmţhÍi·Ůöĺf;|Pîîîťţś/7Ě]nC(v˝ĺD== Š1L+—˝¬™ 7´¸ÍĘeŻ(nř(ĄůQµµ'W™(-.Ň’ĹŇçź®WnÎ ëŇWëއS`}ĘžťŰô‹ëŻtě3>ÚW^fo%&źh×ţĺÖ2M¦čA±ZńŃWZôČúj˧ި°jÇÁ<î44 %X´÷ęçć¦Ó¦)zęTYâ‡Éč ~ý¤Š‚ĺ%'ëȧ•±m[—}Ţ­ź&Izó˛ËŰő:z‡–®×ô%n˝ĺDkjŞ5ăšź(i×vĄ|·ŻŮű™iGőŐgźčŠŮ×Éf;FŘlşcîŐzďíĄzđŃEÚ˛ç~üĎZ˙źwtÇŤ3UQaď2~Â$íI+uě·'­T‰É'Ú˝ż§§ˇ!(Ő_žüĺś8®{űýćriťŚ&/îę^JĐS˘÷ń‹ŽÖś×˙Ą‹˙đ¨˘/˝Dޡˇň0ĺn4Ę',L/ż\—?ó'Íxţ/2řůQ°żî§ŽrP@ ˇŠ űĂ˝_€ĺ¬žhMMŤ®ţÉ\őë×O+ßxĄŮű«Ţ|M’4ëşůNŻŻ\ö˛R8 [~yż®ýSůúůëŠŮ?ŐĎďy@‡&kusN´wOOIRqQˇ óó´ä­µš{Ű/5ďçwş´NM—fmşd+%ĐaDd¤fľřň8PÖěíüűßőÁÍ·čí+ŻŇ;3ŻÖÇ÷˙JG·lQ}]ťÂÎ?_3ţßźĺćqćŃŢĽěň>ńŤyP\7ő'5NŽčxÖO6,"J&_˘Ź>X­’˘BÇë6[…Ö¬|S&_ް(§}6´V’4ăšź8˝~éŚk$I_lú¨ŐĎlďţýúő“$UWUéć;î?Ły,Đ·C‰ŻżţšÂôP˙áQ}}ub÷n­[¸PÉッŇĚLŐVU©ĆfSîÚúäSújŃ"Ő×ÖĘŻáso¤píd‰F¨?čCú`µŠ‹ tíÜ[šmäÇTIRdŚó*!1c%IY™é­~^göŹ6ś; íVVR ˛âÇĎfł™˘ô@^(K|Ľl……úâ‰˙Uu+ţÝĽEÁńń 4XĄ-¬xÓÄ ź;W!ŁGËčď§ŞŇRĺ~÷˝~x÷]e'%5ŰľŁó „ťľâŻżNÁ#FČŕăŁj«Uů))J]»Vé_5Ľ<˝˝5Ăz§Ąiím?×ŕ+®ĐČ›n’Ox¸¬ŮŮú~Ĺ úČľĆΚĄ7Ţ źđpUčІ Ú˙ćżĎ¨Ť#v“λóÎfíÝúäS:şeK§jfđńŃĽőëT|ô¨ÖÝ~‡.¸ď> Ľü2yLzçękÚ¬á€ńăĂőęßPCkNŽ~üä}żbĄj«ŞÎ¸ćŐŃöw¤ í©GŰw¦őö_>ccučĐ!•6é‘p6MżćZ=űLJ´úÍé–_ţJýúőÓŞ7^‘Źźż.ź9§ŮöÖ2IŇÔ‘-/?7»ŐĎëĚţţAÜYh—Ľět}¸ňŞ®˛I’n»í6Ť?žÂô@Ń—^"IJ]»VU%%mnżëĹ—Z|}ČĚ™šôđoŐŻI/*S` ˘.ž˘¨)“őÍß˙ˇ”÷ßďôyŽś?_çÝ}—ÓkF…Oś¨đ‰uŕ­·´çŐלޯ­¬´˙ÁhŇŔË/×”G9ůďYL´&ý÷ďTž—§ŔÁuŢ=w;Ţó0@ănż]•%ĄJ]ł¦ŰÚŘ™ăŐ4´ÉÝhÔČyóÝOŰýyĂo¸AÜwŻÔĐóM’|#"4îżPÔ”)úäW˙ĺü@߉šwDgÚßŃ6tő=u&őÍ”ĺ÷öň2ëŠY?Ń+ßÔןo”ŻŻź’ěŐ 7˙B¦&t4űřŞ´¸H‰É'äeöîđçťéţ®–źsÜńçřřxîđ^FL™2…ÂôPýGŽ”$eîŘŃécřF„ëÂß<(I:đÖ[:ôáG*ĎÍ•—ŢӦiěÂşŕľű”µk—JŇÓ;|üŔ!C4ţ—wHőő:đÎ ýřńG˛fçČl±(ćňË5îö_hÔĎ~¦ŚŻ•űý÷Žýęjjě˙A0{iěíżĐöçëČĆOeô÷×ÄT䤋4ú¶[奯ž~Zi[ż’)ŕä{ŻşŇHt¦Ťß˝ýŽľ{űť{‚t¶fŽ6yy)îÚkµíążččćÍNč-ń8Pçßs·ęjkµó˙§Łź}¦şŞJĹ Ó¤ß>$˰a}ë­Jú׿ΨćÝyĎt´ ­Ő˙Śď©Öô-n˝ń¤çÜhšńéş÷´ţ˝’¤„†kHŇŕˇö‡ňśÇ;őYgşż«U6Ya„č:^űÄľ%iiť>Fܵ?‘»Á ¤×—jĎ«Ż9ćź(ËĘŇ·ŢŇľĺËĺćᮡłgwîř sÔĎÝ]‡>üP{^yE%i骭¬Téńă:đÖ[Jů`ŤÔŻź†\su‹ű}}uěłĎupÝ:ŐŘ*eÍÎŃ·K–H’BFŹVęktdÓfŐV:żçÓmměôńęë%I¦€Ąő•mŘ ›M5­˙9ěÚősw×·ßVęš5Ş*)QŤ­R9űöéË'źRŤÍ¦ŃŁ»¬ćÝŃţ޶ˇ[î©NÖHôxçMś¬¨ÁÚ™ř…ľţěS :LŁÇOhqŰéW'H:99eŁďöíÖĚ ăőěrzÝŁaFüÚÚÚNíFś›<z`UWŘ:}ڰóěĂqúi‹ďŮ´Y’:nl§Žßżá!óŕ† -ăFűv ˝=ZrxŁóą•eeťÜóćß3x›»­Ť]qĽŁ§śwkBÇŽ“$űüófď:¤wf^­Ox KkŢŐíďhşűžęHý@ű»­ 'Ř«Ş´ő¨ź}ĂĎ´äů§%I<ňÔi·›{ë/őáű+őęßţ¬ÁCă5é’é:”ú˝ţřŕťĘ9qĽY5p°ŽJUŇÎm>z\‡÷#ÎMŐÖ2ýýeđńVeqI§Žá=`€$éúŐ«ZÝÎ'<ĽSÇ÷ •$kąGqCďďÓŁ<'Çéçşęę“ďĺžć˝&Ý𻺍]qĽâŚŚö×0ĚţyeÇŹ»¬ć]Ýţ޶ˇ»Űבú€ľĄÍQQöe4 r˛zԉϹágrss“»»»f]wÓi·3šLzu凚żđ.-~ę]<2\wÍź-KpżöŽf]7ßiű‡źř ‹ŚÖÝ?›Łź\:ľĂűźm™GI˛OF Âtk¶ýaľ~úőďźŇŻ˙T›ź5é’éú0ń»NíßŇyşZcO–Ž$Ś@×Ęýţ;Ĺ Uô%Sub÷îNŁş˘B­ś=GUee]~ŽŐ6|ĽĺáĺĄj«µůĂ­ŮËqÝĄ«ŰŘÝ5kéŰÓŰ[F??ŮŠŠÎzÍ;Óţ޶ˇ§ßSŕÜŐf §@˘¨€ŠőpŤ=Y{¶€0]Łqţ„!3gĘ7˘íá!ŁFiöż^Óŕ+ŻtĽVš™)Iň‹îžżźÖě’¤€[|ß?Ćţşőĉn«SW·±»kvŞ˛†ÚřEG÷šw¦ýmCOż§@$š.y<íG*Öĺfť§K A.ţűµ˙€NěŢ-“I—=ő´Ľ,A§Ý6hčPM}âq"˰aŽ×O|kďY1bîĽ÷ ź8Q׾ů†ĆÝ~{§Î1{ď>IRě¬kZ|?¶a%„ě}ű»­N]ŃĆ~îî.«Ůéj8䪫š˝8d~öÉÇšůâ˙ą¬ćťiGŰĐZý{Â=úp !I’¤JşdöhM—ülÚł„č‰ţł*ňó0xćĽţşFŢ4_~ŃŃr7eôóSđđášđ«űuŐ?ţ.sp°ňSSµçŐWű§®]«›M1—]Ş‹˙đ¨ü"#ĺćá!/Kâ®MĐ%Ź?&ż¨(yz{węüR׬Q]M­bŻąFăy‡|#Âĺn4Ę/2Rănż]±×\ŁşšZĄ®]Űm5:“6ÖŘěWb.»Tn2v{ÍNupý:Ő×Ő)öš«5ęć›eô÷—‡É¨Đ±c5ő±?ĘÝhTîď\VóδżŁmhµţ=ŕžç.Źöld±XTXXH‰®éő‰îdWÝCß}ŁěL®sK†Ž(߀ö=„ç&kvŽ>ľ˙Wşä‰Çe6LçÝu—λë®·M˙ę+}őô"§‰ ËNśPâłÖĹxTfĚĐ 3ší—źšŞ˝Ż˙«SçW|ěľ]ň’&üęWuóÍuóÍÎÔ×ëŰ—^TńŃŁÝVŁ3ic~rŠBÇŤŐÔ?ţQúŁýµ7/»Ľ[kvŞ˘ĂG´ç•WuŢÝwiü/ďĐř_ŢѬĆŢzËe5ďL=;Ú†¶ę¶ď)ĐljńăÇëСC*ČÉ’­Ü*“Ů›Ęő@]±Âơ£§‘źť®+~z'aDW–•Ąďş[1—]ŞË.“%>^¦€@ősë§ňÜ<ĺěß§”÷?P~JJ‹űűüs;¦óćjŔřńň R]MŤŠÓŇtlËgúá˝÷ś–Úě¨ä˙Ľ§˘ĂG4|îŤę?b„<˝}TUZ˘ÜďľÓ÷«V+gßľnŻQg۸}ńóšôŰß*(.Nu5µ*iXR˛»kvŞďV¬PááĂ~ă ˛ &OłYÖěűâ xë­f“;vwÍ;Óţ޶ˇµú÷„{ ś›úŐ×××·µQZZš-Z$Işę†?šĘő0¶r«–.~L’4kÖ,%$$th˙'ź|R™ “§ˇeˇC4ű¦_÷¸0âąçžÓˇC‡d.)QÜÁC\(čcR‡ĆŞÜĎO±±±zřá‡) ×hW‰ččh™L&Ůl6e=D ŃO;ěřs\\\‡÷ě±Ç(bümˇg´ź[{7l|Č=zđ;ŞÖ5×0™LN+ŁŔ5# cÚHŚ7N’TV\¨’Â|*×Ă4Eťé3C×î@bذaŽ?'ďŰIĺzܬ •J:Á5# sÚHkĚ1’¤”}»¨\˛ď›­’ěĂ5$\‡0:Ď­#7¶‘qä Őë!ޤp\oo–duÂ83 $¦L™"“É$‰^=ĹI;T]i(ž4iqÂ8snݡ±—Dęţ]˛•[©ŕYÖ ˛ş† F@×čp 1}útÇź÷íÜJϢŚ#••vX’4yňd ŇͬĄ…„ĐE:HDGG+66V’}2EzIś=»¶~*É>™eÓ ÝŁ¬¤€0ş[gvš3gŽ$©şŇF/‰ł¤iďéÓ§3™Ą FŔ™ëT O/‰łŚŢ®c6›&Ś€®áÖŮ›ö’Řůĺ§TŇ…čáZ“'OVll,at!ŹÎîŻ1cĆhßľ}:°ë+ĹŹť ţa‘T´›ŮĘ­úlýJIôŽp•ńăÇküřńşŰ™ě‘epp0EĐkY­VeddH’RSS%Iéééš={¶˘ŁŁ[Ü';+KëׯoóŘÜÜ”T\Üâ{eŮŮí>Ç]Ż˝&ٱĹ÷N;&I˛Ůl:xđ`ł÷“[Żż^Ő11ňôňj¶MZZšV­Z%IŠŠŠ’Ůl–Ĺb‘Ĺb‘——×ië@ ŃDtt´fÍšĄ 6¨ 'K›×ĽŁé×ŢÄß°.r8yżěúJ’ŃjŇ=ѦM›”””¤ňňreffžv»‹Ş’“Á­¸X•%%ö ˘Âi[ł»»ĽÝÝť^ 4ä_Y©âSzN45-$¤]çlÍÉ9í{ćŠ Ťňó“$eŰlÍŢĎ­Ş’$enÝŞÄ]»śŢóoč=‘S]í3Z 5${ůI“&iţüů§“ ””:tH©űw)_żJÁˇ,zlĺVmY·BŐ•ö´íž{îaU g=|HMMUzzşŇÓÓťľŮ˙ó#ʍ,;[¶˘"•eg«,;[µ =Ě%%ęo0Čŕî®@OOÜÝŕé)››‚N@śë‚ŚF]ćôZYuµĘjjT]W§ÂŞ*UŐŐÉ»¸X?nÚäŘĆÝ`Oh¨L2ůűkőęŐNÇPTT”ă=5¤č±é8Đ IDATŇ@ÂŰŰ[÷Ţ{Ż-Z$IZóď%şö–{%:iÍż—8捸ńĆ™ŔY±nÝ:Ą§§+%%E¶†'4úzŮ2ůxz¶ř^Ľźźâ†8ŕô|<=5<]ŚÚŞ*§§«8=]eŐŐÍŢĎĚĚTff¦¶oßîx-""BŹ=öXŹj«[W0::Z·Ýv›$©şŇ¦5˙^˘’Â|îŞÚĽćGqŃEiĆŚŔYńíÎťÚ»woł0˘żÁ 8]¤i!!§ #Đ}|<=őł%„‡kjp°Fůů)ÂËKćSćÖčWUĄ˘†‰8{ Źî8č”)S$IoĽń†cĺŤkoąG&3Ă Úcóšw”şß>éIll,K|čViiiNó?”ž8ˇâ´4;¦˘cÇäkµŞÂÓSBŚF }vEOŐŘł˘iŻŠ˛ęjVU©°ŞJ޵µÚűÖ[’ě“jú„†* &FÁ Ă9V®\©¨¨(Ť;ÖeSxt×§L™˘üü|ÇĘŤĂ7%ÚFDDDčŢ{ďĄ(şÜž={”””¤¤¤$Ůl6-úÍo”—šŞ˘cÇ«Z4şŔbˇ`˝PK!…$ÇpŹĚ]»än0Ȧ-_~éxěر7nśâââÜmççŃťŤOHHP~~ľ¶oßî%¦Í™Ďś-°•[őőƵNaÄC=Ä$–şL^^ž6oެÄÄÄfĂ/>~ăŤNŻŢ«¶ŞJ©©2»»«Ľ¶V’´wď^íÝ»W’} I“&uËś†ÝݸĆáMC &ştf+·:M`I +%''kýúőN+b4ŠđňR¤—aDd4ę'‘‘*¨¬Ôá˛2eTT8‰íŰ·kűöí ŇěŮłS4tW4náÂ…˛X,Ú°aŞ+mz÷_/č˛Ůs5|Ü…}ţÂçfečăw—©¬¸P’}Î{ď˝—0@—X±b…>űě3§×ú ěăŁHłYĆS&?Dßd4*ČhÔ’ *+•RZŞŚňrU×׫  Ŕ±„kWńpUĂd±XôĆoH’>_żJůŮYşřŞźôŮ‹}8yż¶¬[ˇęJ{W©‹.ş ,t©±úL’gż~Š4›5ÚßźŐ0Ц ŁQ“ŚFUÖÖ*ŁĽ\‡ËĘT˝c‡’ËĘ3uŞĽÎř3<\Ů )S¦Čb±hÉ’%˛ŮlÚżs«2ŹęsóJś:_„$ÝxăŤ,í  Ë䦤čÇŤUYR˘©ÁÁ 1™č 3ş»kŻŻ†řúJuuĘŢż_Ůű÷+ćâ‹1a‚<˝Ľ:}l7W7&>>^úÓź+IŽy%öîř˘O\ĚŚ#µú_/8“ɤG}”0@—¨(*RŇ›oęű˙üDZZF”·7aşÔ±ŻľŇŽ_TnJŠ$ű„©ĺq6NÜŰŰ[?ü°Ö®]ëW"qăZI9 isćË/đÜ[RĆVnŐ®­µçVÇkĚ +äĺĺ)88Xą))JY·®ËÇú-©­Ş˛_č?;vt¸çżÇŮ<ů„„ĹĹĹiŐŞUĘĚĚTVÚa˝őâź4zÂT]0ő ™ĚçĆúŢ_hç—ź:ćŠ0™Lš3g˝"ś±´´4=˙üóŁ‘§,ĺ ¸ÂG»ě#VŻ^-Ií~Öő8Ű'ŻÇ{ĚŃ[B’öďÜŞä};5öÂK4ú‚‹{m0‘Ľ÷íÝńĄc9OI3fŚ,X@Ż]bٲe˛ŮlÚ›’"KHśÁ~ 3f„†ęËÜ\UWkőęŐŠŚŚT|||›űyô”$$$hňäÉZąrĄöíۧęJ›v}ů©öîř˛×É{żŃÎ/?u,ĺ)Iš7ožĆŹĎÝ  K¬]»V™™™’¤ó #pVřxzę’ţýőQV–Şëëµ|ůr=óĚ3mîçŃ“¬űî»OÉÉÉZ·nť:äL ŠĄac.Pä ˇ=î”ć+e˙.%ďÝŮ,3gަL™Â] ôp•Ş  ôB^ĺĺň¨«Łúśm۶I’ú Š÷óŁ 8k|<=u^` v¨  @{öěió yŹžŘřřxĹÇÇ7 &R÷ďRęţ]ňńÔ‰S5(nÔYťÓVnŐŃß)yďNeĄvzŹ čůĘ˝ĽT "?ŮFôjž••yŕ;  OIKKSAAýŠ0=Ŕ__í.,Tu}˝RRRzg Ѩ1ČËËÓşuë”””$›Í¦˛âB%n\«ÄŤkĺă¨AqŁ3DáŃ»}XGnV†ŽüN‡“÷;Í ŃhĚ1š>}z»ĆËp˝77ĺ…†*ߤjŁ‘‚çţ>č‹ĘËËötsŁ č<=•[UĄôôô6·őč ÖÂ… eµZ•””¤¤¤$íŰ·O’TV\¨ý;·:–Ó Sph„‚„+84BŁIýĂ";ü™%…ů*-.TiqňNW^vfł^Ť"""§3f(88;čˇrú÷׉đ0Őy8˙Óg U`ÔpI’_č@ ô"y‡“”űc… ‡(Ż­m÷¶˝©aŢŢŢš2eЦL™Ňb8!I9Y*ČÉRę~ç}=Ť&‡†·ůĄĹ…Ns@śNDD„&Ož¬qăĆB=ýE//ĄĹD; Ëđ ‰Q˙Řńň0H&ß@ŠôR%ŮG)€>«ésHŽÍĆ„–8ëĘŞ«e=W‰¦š†’}üTjjŞRRR”žž®ÂBçPˇşŇvÚm1™LŠŚŚÔ°a稨(–íz‰ü  eFE:zEĚţŠwąB‡žGq@Ż–——çřóa«Uců’g×î&ĎásćĚis{ŹsĄáŃŃŃŠŽŽÖŚ3Ż%''K’RSSU^^Ţ®1,ÁÁÁ˛X,Ž˙>˝W~PŇ tü1ć2EŹźFaŔ9ç’©ßL]íÇŇReTTH’Ć…h׼ŠçrA Ŕ“@ßÓ4Śp÷4jČ”źĘ3‚€sŇ‘/ľÔ€k®VUI Ĺ€Ë%kwQ‘$ÉT^®‘ýű·k?¦bpÎ)ň÷w #F\µ0śÓŞË­úţ˝÷e`ůO¸Pem­ľĚÉq #bSReňôl×ţέ Ą Ś‘t2Śđ±„SpΫ*)QҲĺň`rK¸Č—99ŽaŤa„G]]»÷'pNI‹‰qL`3ájÂĐçě{ëm•dťP­Á 5Úßđí5Đ•Ü M “$ćĺw8ŚÎń9$ô-Eţţ˛úůJ˛O`ÉJ ŻĘضMYGŹČ®ýĹĹ:\V¦Á>>Šóő•ŃÝťáŚtxó;¦áŚUUť:€sFfT¤$űPŤ°áQЧy—–Ęł˛RŐF٬µµÚ_\¬ä’Ezyiźź‚ŚFŠ„VUÖÖ*ÇfSTĂĘ“MF§†ůAAÚť•ĄęŻżÖ”)SZ=>€sB~PŞţŁ9îryšĚôi~eVŤ<đťň‚”"›Ů¬ęúz)/בňrxxh°ŹŹůřĐkN—•)ŁĽ\ňě×OaĘŢľĂ)8íďĺ– ĄźČRib"€ľˇ8 @’d0ű+|Äd ÎyÁÁÁš5k–v˝úŞŚUŐ§ÝÎRP KAJ|ĽUh Va°E’TTSŁÝEEäăC1ű¸ĘÚZeVT(ŁĽ\Ů6›ŞëëďU××kŰĆM˛tůçHčý˙€ * ´Ńńô ÁÁÁJHHPć“Oµk{ż2«üʬ•Ąâ€X‚d¨¬RIU•"Î;OőµµŞ./§°}HşŐŞýEE*Ş©iöž[M­‚ňó”_ sĂJ]Ť@@ŻWÖ$ŐŠNAZa¬ŞRHNŽBrrTăć¦Üş:ĺî? IňʉQčŃňéß_Uĺĺz˙đayöë§P//…Ť 1™Ţq±yz:…n5µň/*’Q‘Š‹»ýó $ôzeľö•5Ü=Ť LAÚű@xĘ2ŤĹÇŽ9ć ¨4d=J’TTZŞ”ŇRI’·»» …Ť 4ęĺE!{‚ĘJUW«°ŞJĽ˝üü¤úz•df*㛝*łŮd2X>ĄĄÝÚâ´÷— @oWiđ”$™P €.â^SŁĐăÇUęăŁr??ÇëÖÚZY+*”Ńđđŕé©kÂĂ)ŘŮř=¸¶VEUU*¬Ş’µ¶V…UUĘ©¬tÚĆ×ËKY'”óÝwŞ*)qzĎ()ţ‡äłvţz˝Ć˙@š|)@W=,ÖŐ),ë„Â~.ńń–Ő×W^fU˝+śůÔÖŞ¦şZć  ąą{ČVTčtśo dps“gż~ 4äíá!OO |†öęŔ)CKŽ»[599=óă28WĽ(@7iśłQŤ››ĘÍöá©;w9˙^ćç§‘#ĺáęq*owwy{xČŰÝ]>žžĐ7—Ënčib­­•µa>‡l›M’t^` c¸…›Á Ů[őŞWmeĄĽóňśŽăVS+SąUćŠ y•WČ«˘ÂĺC0:Š@Đń‡Éş:§€˘©Ş’elۦ77™†ĹÉf67ŰĆZ[+km­$ÉÓÍMçGG7RĐh_aˇr*+h0Čŕććx=¤áa˝Q€ÁpV&Ýl:ŃTksklĚĘRî)Ű·¤07OîîĘض­ůg °‡ĺÍćé÷Ť ÷INNÖ /Ľ ťž§¤ś68«śuuŽ9 * UŢ m=Ľ†´«ŞęÓ»Y t~x¸&N™Ňć¶€-2#łĹ×Ë˝ĽTă~r‡ąĽ˘Ĺá ’˝‡†_d¤Óö-)>@źŐW˙ ¬swçâzŚŚČťHMŐá+4ţüV·%Î6ooý8,ŽBčłř7€łŻÜËKĺÖ2ů¤§·ą-C6€^Îl6S€‚)Đ˙ŢĎš5KˇÇŹËXUMAĐëĐCčĺ,X ôv¤Źç˛^x€|đÁ>Ýţřřxn Ź VBB‚2ź|Šb W"z9ooo~ Čču˛\Ž@¸ŔĄŠň˛µpĘÝ5męjkĎé¶–ćiá”úőśŃgĺó sOhᔺ˙ęáÜx Ça hÇô§«^Ńß~Ąü*/-–·_€b†ŤŐ´ëhÜ”+šíSVR¨Ô¤í:ď’«)ŕ)ަě“$EĹŽ›»{—·'ÔüÔsČ<ś,IŠ:꬜ϱ†Z6†Ŕ9---Íţďmt4ĹzzH@+2~üAĽíríܲVs<¨˙·úýăŁdÝůŘ‹:‘vHűÝ­Ú¸ęU§}ęjkőě}?UĘžm°Ĺ‡ä˝’¤a]×k 'ÔĽĄs~ţĹZúő =´řťłrNŤáĎŔx ç®´´4-Z´H‹-ŇĘ•+eµZűLŰ­V«RRRTęăŁ7í@ ç”·ţú¨ĘŠ ô‹ßż ó¦Î”Éě-/oŤśx©î|üEöÓńcťöŮňţ2eNÖ áă(`kÉĂĆvŮ1{BÍ{âu?Úţ ŚËŤŕśU^^~ňßâ-[ôČ#Ź(11±O´===]‹/ÖŹĂâTnöâf@Ź™‘©™Cb5oŢĽ6·eČ´âÇ»%IGś×ě˝ŘQhń{??±đ KÝďřůĺ˙˝W«^zJ‹?ŘŁĽ¬tmř÷?t`Çg*Ě=!Ł—YCF^ 9 ĐĐŃűÔ××ëž+†ČËŰWϮئ·˙öG}űůYDę—mŇľm›µqŐ«:üĂUUÚŁ‹Żž§+ćÝ)O§óŰ·m“Ö,]¬ôßËËÇW]qťnĽ÷úÓÝstä‡$=»r›B#Ů™++ч˙ţ?íú|˝ňł3e4™5tĚÍľí y^›uęČyK±×((4B˙|âíßľEuµµš<óÍ»˙qŚ'ˇúrÝŰúęĂĘ:vHU¶ ‡EiĘŐsuőÍ÷©_ż~§­ůóďďî˛:?šŞµKë‡Ý_«˘¬T–‘şřšyšyÓ=r÷đhőş?xíXĺeëĎ«v($"F’Ú}/HŇ˝W•úőÓ3oĄ7ź˙˝ľßőĄ &/MżţšóóÚĽ.ÇZÚÓöŚĂÉúă­—)4j°ž]áüK˝µ¤Hż˝ţőssÓ_ŢÝ)łŻ»ďź¶îoč 6›MË—/Wbb˘,X ŕŕ`Џą˘B~~íBE h…—ŹŻ¤“ß4·ć‰Ąµř$I’Ź–~}B‹?ŘŁĂßďŃc?ź¦|«»źř§^ü$E˙ýŹ˙¨ 'Sľ˙:Ü÷ŤăůŮެ(WHÄ ˝úÔŻ4jâĄzaM’~˙Ňműä?úëĂ·¨ŇV®Ç^űX_˙ť†ŽąP«^zJď˝üŚÓąěülťţú𭲄FčOďlŐ“Ë·¨(ď„ŢüË˙čhň^ůXaDNć1=ľ`ş¶®[sď{L_@-~[YÇé™{ŻUňžÖżeęČyäŞ07KŁv}ľ^×Ü|żž{w§&NżV[Ţ[¦uËţęŘöÝţIKźýŤ˘‡ŽÔ3ď|ĄçţłSˇQµzÉÓúŕµçZ­yWŐńĐ]úßŰŻRMuµ~˙âZĽfŹŹŻw˙ąHďżöçVϡ(/[EyŮňö t„»2Ua-U`˙úŕőżčg<Ą'–n’››»Ţ{ĺYĄîÝŃęu)Ě=ˇ˘Ľlůř)8,ŞC×*4rÜÜÝ•wěbĚ’¤jĺĘ•čÁ$ WÜx‡$éo˙ýs˝÷ĘłĘh¤đtŽ$ŰL'¬Ş¬ĐKřĄęëëő›çßÖQçËh2+zč(Ýô_O޶¦ZĽţÇţ‡~$Ą˙ř˝¦\=W¦%ČÓh’Éě­Ý_~$łŹżn}čY…F’—·Ź®]ř$)ń“wÇ¨Ş¬Đż?"ßŔ`Ýţčß< JţAýőóß=§›ŢW}}˝bâěó7ÔÖÔčĄ?ܡĽ¬tÝ·č5ť7u¦ĽĽ}5hřxÍż˙ űů5<üźN{ĎK:9„ÁhŇőwý^ŃCGĘěă§Ů·ýZ’ôÍ擦[Ţ[*IşîÎßËÇ?H~Áš˙«'döőw N­yWŐ±¦¦ZŻ>yż|üt×ă/j@ôůřꆻ•ÉěŁCľmőŽž2ˇdGď…ôCß;‚…îzDA!á ‰Ń¸)W6„»[˝.-MhŮ޶{ŚęŁÚÚĺťHwş·6­~M^>~şrî/;|˙´v] ł‚5kÖ,EŹľRń—ęüY)|ŘĹr÷4)öÂyެ®§H@Ĺ hŬ[˙K’´ţŤżiÝňżjÝňż*,f¨.ľfžfÜx»ÓđI:šě~ Ť¬´ÓđşsËZKÝŻřó¦(něENÇ2ę|űĂöI­Ö¨˝çeoź}hä+Ż—Ź_ ăőŔ0Iö^ŤŚ^ŢŞ°–ęÇ»4ú˘i’¤zńă”VkŢUuÜţé{ĘÉ<Ş9?@žFÓÉsí?@K6jűŻŤéÔ˝ĐŢLąz®ĽýNnÜŻź$ÉÍ­őJNNh9¶S×*,&VŮ釕ť~ŘŃ›ćËu勤(_ ’Ů×_Ű?}ŻC÷Ok×Î$HHHPéÖ““YĆN¸N1c®R^…—>řĆŞ11 Ź4P, ‡ˇ‡´˘_ż~š}ŰŻµř$ý⑿j̤éĘÉ8˘ŐKžÖŁ7_˘ÂÜ­>îţňcIŇI3šŰĂ`˙Ũ¦şŞÉ›}˙‹Żi{ şş:Ç·×ýĂcŻďßń™$)nÜEÍ?ÓÓÓé!ą±GBKK—zűÚ‚ëęë:TłÓť—ý!Űţ@:bÂ%NŻ×TŮkŕi0:^›yÓÝ’¤żýî6˝ňäýJIjyŤ–‰3Żăž­ök7tě…mٵsčě˝p¤áńă&9m›—e_Ún@ô6‰˝Ž‡˙Î\«°ˇ’¤ěŚ#’ě˝i>Y±D^>~şjŢťťş:r]ŕLyí˝Żjęúi÷‘jmÜ[ˇ‚˛Z ô ô€v0űřię¬ůš:kľ sOčőgÔźiéłżŃož»ŮiăJ ÇRíŕQ±#›ł1ĚěÖě}řůS›mźôőF}ţÁrKÝŻŇÂ|ŐŐש_Ă·ĺQCF4;FäŕřfÇ(ĘĎqzHn|č]ńŹ'´âO´Řöŕ‘­Ö¦˝çe můÜ sł$IA!áŽ×®š·,"µvébműä]műä]Ĺź7Y÷<ůŠă[ý–jŢUulýš˘‡Ž”ŃdÖ^~Fëßř›˘bO>L6~ăátŚ’Â<dgĘ7Ŕ˘ŕQNç÷ĎM‡eô2w¸9ŻŇ˘|4 Éđ p®ÇńŁ©->d_pŮl]pŮlĄîݡwţţ’w'ęő?= žű÷ikŢUu,noüBZ­AKçĐ8ˇeKµnϽиż_P§˘07K%ą  ËéĎ«±ľYŽ´ÝÄŘ{Häd“$}ôÖ‹Žą#uäţiíş€+ĄŻŃšK•“v@{÷îŐěŮł5mÚ4y{3ź ĐĘ˝ĽtĽ´Tiiim~ Ů€üóń»őë9Ł•{<­Ĺ÷Ť^ö_Z»ÚK-w‘÷ňö“äܿѾDű2‡ç_zŤ¤“ß·ôÍ÷Gď,‘$Ýń‡żkčč Žą!Ř%IŠzň[÷şZ{ywOçĺ+w}ľŢ~ü&ççáŮpţý:W§ŽśWcďIrssţĎOăę#'^ÚâçÄŤ˝P˙őěrIŇw»¶¶Z󮪣ÉŰ·]5hé{LÄtň^p 9e¸ĹŃ䆶 ëxÝHŘ{†äźH×ţí[”ńăşâ†;śćłčČýÓÚuW3ůźě‘·~ýz=ýôÓJNN¦0@ČŚĐG‡Ú·Ę ´ŔĂÓ ’‚\mßř^‹ďďŢú‘$iÔ„“Đé +4í’?â‚‹%©ŮŇ™ůŮ™úfóZE Ž×ySŻvz`Ź6¶ŮççgK’ú‡źL™Óp<ČGîx=$r $)7ó¨ăµJ[ą>~{Ił‡Ü!#Ďo8ÖwÎČŰ6ë÷7MŃÚe/´Z§Žś×±† -%ű* ŤŞ«*őÍć5ňö Đů—Î’$­zé)ý÷Ü‹='$©ľa.˙Ŕţ­ÖĽ«ę9Äţçś&u<¸ď=0FËźű]»ÎaP“@ 3÷ÂŔSÚŕh[Ü6B’ć+lt¤í’}R@p¨ słôń;Käĺí«+ćŽčĚýÓÚuW8v¦Ć_ýůő,I*((Đ /Ľ —^zIV«µ×´Ăl6+66Vć’yÔÖqaŃëH@ ţFľ­]ö‚>~g‰ňOd¨şŞRůŮ™Úôîëú÷óż—%4Rsď{¬Ůľ%ą˛•Ű™™{ďc2™˝µâOčhň^UUVčČIúŰďn“Éě­{źzEnîî lŤßŞ7˙yXĂÄ†ź˝ż\U•:đÍç:üýą»7ywůO~.IúĎËĎŞ´(_ÇŹ¦ęíţ ߀ ű/aM’zÇďäáiĐ[/<ŞôCß«˛˘\»·~¬WžĽ_ů'2ÝÂ|ť=ŻĆoČ'NKĐŠż?®ÜăÄ'n IDATi*.ČŐ›Ď˙Ź sł4ďţÇO.˙X_ŻśĚŁzç8?GeĹZ˝äiIjöP|jÍ»ŞŽłną_’´nŮ ˛–éXę~˝ţ̲–iüĹW¶óNťşâÇśRĂÓ·Í9h>ÉfGÚŢ(,f¨*¬Ąú~×V͸ѹwDGíó3ŽěŮĆ˝Oü–;.Žéh”‘¶HHTË>›LŇŽqžšńQaŢť"JHHGŇ’„„Ůą†Ľ?ëW̸;Ö±Tcµ­źV­J1ü¶{$i–Ă»˙Í‘=Ű ďĎ­“§) "Ň%řE0lÂ+Äüř)vź6đĺţB Š47Č•˘e?ED®!ŢţÝÉÉ8Ç˙Íy‘G^ś‹›»™Ý[Ö˛aů"ÂŢČčźNU¤QeeĄX“ńţ¬_ańôá©Yďc0¸^×fłŮHMMm˛ś»»{ŁË”%%%5«˝¨¨¨÷edd™™Ůdţţţ\ŃëIIIˇ°Ďž‡„„4¸,b[\O{Ƕ±ëiďŘ^é{Ąą±mŹ{Ąą±mÉő4§ľ–r÷ô =Ż‚u{ěô 1Ó˨_"JHHC&<ţ"Ţ~$¬_ÉoIyY9݂ø󡧹ăţ§p5ş)HҨ—ď˙!YÎ;śź‹îÁ×]×µwď^.\ج˛M 1ť;wnłęiě’„„Ö­[×dwŢy'ăÇŹŻwźŐju SoÍő|ôŃG;v¬ÉzfĚAttô»žöŽmc×Óޱ˝Ň÷JscŰ÷JscŰV÷J[(«pâű”R¬e ék¤$ď GŹ%>>^ż4D”‘jÎÎÎÜ~ßÜ~ß †\–?¬ŘyM^—Ýn'°G0çĎĄ5Y6=·‚e˙ný˛ŤŐq*Ą´Yu|źRJ~ődź-jVM]Oznó†Ło űÄ ĘŠŠˇ˛’R»ťó˛íťwřú­?qŰmuęýę«x…„}âË'M&áOâěľ}çćRQVNqn.g÷îeűÜwůhňdrNźĆ§W/~ôĘ+zÚFH\¦€°ĽűrúŔÎ$mĂËż'Aˇ ĚdňńŕBbbŁĺ˙ë_śÜ˛…ÂěěZŰ  ×Í7Sjł±î×3Č?s¦ŃzňRÓřě×3řáôu Ýú÷çÂáĂz!Ú€FH´‚«›…đ'0pĚÇü„Oľµqü\©s…䥦ĐďîńŤ¬¬¬“ŚĽc,‡ţőŻ&“Őr­VŽ|ň QŹ—ÖSBBDDDDD¤ řEŕéBY…;“Křr!E L;°|97Ϝɭo˝EXÜ\ŚĆftqňŃ“[ľjQ»Ç7m®uĽ´žŮ‘kŢöíŰŮúďrl• şő)DDÚEz^źî*d`+Q=]9ÖŠŮlÖ¤—­tä“O0űű1ôż oüúĆŹˇ˘¬Ś ‰‰śŰ·ź´Ý»Iݵ‹ňâúWVň  ëřńµ[]ŢłG˝mD ąćeffrúä1BD®ŠďSJ9r*‡=ź- 7'‹»îş‹qăĆ)0­°çýż“´î3®źđS®»ĺüúô!pŔ `ĐS(+,äȧ«ŮµhĹůůµŽu5[(-,lQ›ĄöŞeU]-z‘ĚąŁG9±|9“'On´¬""""""WŘąÔăäćd°víZxřá‡‰ŽŽVp.SÁąs|ó×|ó׸y{4h= "ä¦éÖŻ'O˘×Í7óĎGĄ03łVbÁÍË 7OOŠrršÝžŃŁ*Qj+PđawwÇn+ŔĂjm˛¬ćąÂÂb<öY,>Adeeńî»ď˛zőęË®344”3fĐ') ł˝°KÇ·87—S_ÍÎyóřřÁ‡Xqßýd&'ăĚđ§Y«lŢʼn,»µ0äYu|Zšnč6˘„„H;đôˇ˙-?ÇÍâëضnݺˮĎb±ŤW C…&Ϭ)39™/_ţ-׍Ykßą}űżőÇ-ŞłĎŹFpć»˝ pQBBDDDDD¤d¤ŕ»ĎćPl«ZŠŇd21}út¦ÂâFpÓOŕîďßdŮę‘ 7·ZŰ“.&"︀¨¨fµëJôřŞeF×¬Ń ŃF”ifO_ĘK‹6lłgĎf°–l‘!?˙9Cý9Ă~ŮôŠI·Ţ Ô]Mă‘#śÚú5ÎcçÎÁ?<ĽŃz<{öäŽwßĹ`2qôłĎZĽ:‡4L“ZŠ\a˝»»0dXýýÂßß_“Y^¦o,dÜ_ç=nŢ!!\±’sßŔž™••||đ Ąo|<×ßsű?ř N=_ÍšĹOűôĆ;4”źýß˙räÓŐ߸‘ĚcÉç`ôđŔ·÷uô5Š?ű“‰Ś¤$ţýÇ·ő"´!%$DDDDDD®ł†GąŃçŞë§ ´BÚîÝlţŻrËË/4x0AŤŚ0©(+cÇ{&yý†:ű ł˛řףŹ1ćµW >śëv×˙ěžë:öĺ—|=ű÷”ÔżÂĆ»w5zŢ †Ţ¨ŻJH´‘ÂüL F®n„tť›‚ŇĆŽ~ţ9ÖożĄ˙Oî&ä?Ŕ·W/ÜĽĽŔɉ’‚rNźćĚž=Y˝šĽÔ†WÄ(ĚĘbíÓĎrÓMDÜ~=bbđ Âĺâś™ÉÉśůn/IkÖp!1Qż”‘kŢ#čŮ—ďN”*"rĹś>°Ô#[ą.úFž|ě~Ţ NÓ#""""""-tîř·ßý‰cO“Éĸq㏏Wp:9ëÎť\ą’÷ŢKȰpď°8ö鍶Ą„„H3ćgrtÇrrÓŹ;¶ĹÄÄ0iŇ$ kÄż˙đG˛Žăú{ďĹ',Śň’RrSN+0mL éR‰ô“»›,g4űţ÷ź=ö %öś&ëés[ű˛Ď&“wáD“uxuëÓčܧlh·ëéŢ{(îžţ­Šm[\O[Ŷ-®§=bŰž÷JS±mĎ{Ą©Ř^z=.®&BúÝrĺ;PF¶śŞ•|}}™4iYzR:ŻC«ţɡU˙T ”iŁÁ SeVł:eţA}ąaXäžŢEć٦;Coľ˝Á}é‰Ç9}ŕ‹&ëz+ݢ"Ü˙u;^OŘu}čćU˙7ŔéyÍ‹m[\O[Ŷ-®§]bŰŽ÷JS±mĎ{Ą©ŘÖw=ŢÝűâérĺ:OΕü0ÖŹĆ‘‘‘Á¸qă4W„""""Ťóópah_˙nFY_‹3·27¸˙Îd6ŁžĆę(:íJsVµďčÚh=«Úńz†ö5]=‰nÍ‹m[\O[Ŷ-®§=bŰž÷JS±mĎ{Ą©ŘÖw=e%Wne‹Č 1˝Ś¸ą:¦y"DÚ„SeeeĄÂ "ťŮ´iÓŽEŘŕŃ H’˛w3iľ`ѢE H”Č»ďľ ŔŔ1O´ŮňľĄĹ6\Ý,x›ťÚ×HC‡ľö>IIxŘtCČU—éçGŔđá ĽőÇÄĹĹ5ZV#$DDDDDD.ĘĎLĺÄžOqu3ńŔÔéô 1*("-éď‡őÜYň”iJi±Ť3IŰjÍoQtá„tÜ +¸óÎ;Ůýß˙Ť[IéU?ź'v荒­˛Ľśâü|ňĎžĺÜţ$oŘŔůďżď°1­ľ-ďŮ>śéʲĎ&óÝgsk%#îĽóN"##;ôy0~üx‚Ξí¤¤CžŁ“‹ &şőëÇŔÉ“°řďŚ_¸ŹmÖĆŔIëM†Hǧ"""Ňi•Řr‘.Îl6NznŁ{‹Ž--¶qtÇGd¦tl gҤI„……)¸—©ćč'Ü<=ń§÷¨[7ŽŕˇC™°x1˙śúçĎ·ş˝nýú)čť”"""Ňiĺg¬`tQaaaĚś9“e˙nŮ„Ž©G¶rúŔĘK‹0™LŚ7Žx­ Ń¦*ËË)ĘÉ!m÷nŇvďfß?ţÁŘ9sŠâÇłgńŻGkuݢ•謔‘N«¬¤Ş#áďďŻ`Hł•ŰH;˛Ő‘Ś6l'NÄb±(8WXÁąó¬}ćWÜżęcz Dؤ$$Ô*cňńađÔ‡é‡gPÎöŚLÎ|·‡ď/!űäIO}ażüĄă¸ęÇ6ľ|éeŽ}ńEłëiHČM7qĂ#S ŠÂŕćF®5•¤Ď>ăŔ˨(+ońůÖ8p 1÷ßGŹ1ýý(+.&˙Ü9NnŮÂ÷­ (§îŔŔôŔĹĆbňń¦8/Źóßd˙pfĎ%$DDDDÚKQ~6…9UC}Ł˘˘i¶ë‚<ňčT–ýc)S§N%::ZAiG…™™|żü#†<ö(}FŹ®•đ dÂâżcéŢ˝Ö1=‰ĽăúŚĂ'Ź?΅ÇmŁ5őT”•sÝČ‘Üţöqrqql÷ ďËđgž&pŔőlxáż.»ť>ŁGsëďgתŰčęŠx8ţááôűÉOXőđTlééŽýQwÝĹŹ~÷ŰZǸűůqÝ-#ąnäů÷ŰďppĹ %$DDDDÚC–őăçŽ>ńśt f# ŹrهŹčÇ[o˝Ą \%'·neČcŹűĐ_<†Ą{wŇbŰś9d;@@d$7?˙ÝúőcŘSO±ć©§Ř»d){—,­weŚQż}ąŮőÔQYÁ_É‘O?e˙—šŠ›§cÇ2âWĎĐgôhzÝÇémŰ[tľŐ~đä8ą¸°wÉR­Z…í &7‚ßŔ_‰gP?xň 6żú^!ÁÜňŇ‹|·x1G>]Ť-=s@·ÝĆŤÓ'îŮgIýćrNźîT÷VŮ‘N)ďlŐX___M>'"MjŕŽĚ“rŐ?ĂSSp÷÷«µÝ+$„R›ŤMŻţ?Î˙=e……”rn˙~¶Ľţ4YkęqvuĺÜlťý{rNť¦˘¬śÂěl|đűţďDÜ>ö˛Űńş8ďўŋÉ?{–О2J lśţ÷żůâĹ)ÎËĂđź{÷މ¸Ť|»pßĚ˙+y©©”—”ć ß-^Ěî˙ţoś .ô˙éO;Ý} wŁt:EůŮd§&{É·k""P5ie¦ő ń?ű%CűşáçárÍ]cbb"ďľű. ą>IIxŘ:Íą—ÚpuŻ˝2ĘęéO4xLÖ‰UÇÍMÖßÚz®ü¸ŢíÇ6~Éŕ©Ó˝żËn'űÔ)ü#"őňKlźű.öŚ Çľô‡řűč1µĘ‡Ü8€¤uëęmçčçëąé‰'č9ä†ńÚúgfIĚJHȵçÔ®Ď?kF|©)?3•¤bĎ9 €ó…üés˘Łqóô 87·Î>“Ź7îťHČM7⻯/ÎC­ůšŁ5őd&'×»=7Ĺ PkCKŰŮüÚkŚ˙ë_ żőVúĆÇ“~či»v‘úí.ÎěÝKeyy­ňž={đĐgë=gŻŽ‘ČĘbHϞܧ„„\[rÎś ŰšTÍŚPăŹBézRRRX±b鹌îd¦těóőő%¤tҤ¶€‹“çź;W§óýÓ÷˙K·n­Şżµő”Úíőn/+ŞZ™ĹŕćvŮíd$&ńá˝t˙ýôŤCŕŔČ ?˙9öŚLľ]¸€#ź|ę(é(’†›1r¤ŁQBBDDD:ŤŇ"»ct„Édbܸq ŠHg·ŰIľäŰl“ÉÄ1c?~ĽÔAő3 Îr•ĂźyK·näź=Ë7óçsvß~Šrs©(-Ą˘Ľś'v}۬ú[[Ź‹›e……u;Đ&SŐďŁÂ˘VµS™ÉÎyóŘ9oޡˇ„Nßř1ôĽáFýö·¸¸9¸rĺŶ 1zxđţ¨QRPpMÝJHH§qj×zÇRźcĆŚŃč©#<<śGyDź_ßľDßuP5˙AMŐó%¬}úirNŐ^1ÂŁGŹf·ŃÚzüúô!ýС:Ű}ző ŕüą6;ß\«•\«•+VĐ˙§?ĺ–—_bĐ”ű ‰\«•nýúás]/Ňş¦î­˛!"""ťBĘŢÍdśŘçčpč›OŔd2a2™>}:3gÎT2˘óéŐ‹±sćŕěęĘńŤëĚŐŕb4`KżPçŘ˙TVVud uż[Ż9gCkę¸~„z·‡ßv+€#Yq9íÄżůŻ˙śî®ŻsLň_Ôz$ő›ŞQ±ů¤‚""Ž„Ä{ď˝Ç{ď˝ÇŕÁČŐb!pŔFüú×ÜűŹ˙Ă+$\«•­łf×)›}şj”Á°_>…ÉÇŁ‘î×_Ďío˙ŁĹĽ´4 ę‘gWWŕ?ó:ôŤłÁ€»źßeŐS­˘¬śŕ‡2â׿Ʒwo &7<‚4ĺ~ÝwIë>»ěó'ĚüxÖ,z݇ŃĂ'<‚ţôÓd;ć8źC«VQVTDßř1Äżůޡˇ8 üąţg÷pŰ[żÇ;, هG§»7ś*+/¦lDD:©iÓ¦3аÁŁ‘kHŐśë##L&Ď=÷aaa ŽtyŽe?ˇC,űůÄî]Í*—¶k_ĽřE99uöEÜ~;ńoľQg{Áąóüóç?gŘÓż$rěXÇöCoäîE‹ę,yąń·żkq=ΦíÜIIA_ľô2cçÎĹŮPwĄŚÄ5kŘňÚë—}ľžAAÜłt î~~őƧĽ¸µO?Ă™ďľslëOü›o48˘ăBb"«§O§¤,ý:ä±Ç¸iú´&Ëi érÎśŕÔ®ĎsF(!"Ň9•cĎĚŕÜţýý|=Ö;,›Ľ~=&o/Nš„gĎžŘ32Iýöv-ú¶ôtv-úľ˝{ăß·Żc…Ž­żźÍ¨—_¦[ż~T”•“sęÔeŐăb¬Z9Ł´°”„Ö<ů$C}”ný˘1¸ą‘“’‘O?ĺŕŠ•­:ßüłgYq˙b&Oćş[FâŃ˝;.F#¶Ś ÎěŮĂŢ˙ý?˛Oś¨—ă7’}ň$±>@đСýý©(+#űÔ)Žmř‚}DEii‡x˝ŹF„łoďwl}űmfÎśŮhYŤ‘NO#$D®-EůŮX÷mqŚŠ€Ş9#ž|ňI,‹$"rQG!!U »—áááM&$4BBDDD®ş˘ülňÎź"+ĺŮÖDÇv-Ý'"Ұččh-ZÄ‚ˇ7*Ň))!!"׌Ľó§°îۢ@tEůŮŰr(+.t<–QÓ°aĂ7nśfËąF)!!"׌üó§Č?JéÄ|}}‰ŤŤ%>>^‰‘kś"Ňé“vqI%é|ď_BCC‰ŤŤŐ„•"""]"Ňé˝ňĘ+ ‚H'ă¬H{Ó ‘HLLdÁ‚Ő»˙Î;ďltEęejÓśe˛DDDD:;%$DD®q7ndßľ}WĄmłŮĚĂ?ŚĹbąfâyôčŃ“ÇŇ ůüű¬Vµ‘e+sÔ‘“žĆ÷ŰÖág10qâDͱ "")))¬X±‚3§¦b.,TPäŞ3âDhhh“e•ąĆ­\ąň޶oµZ‰ŽŽľ&c3ŞÎ6gź0Îç—¶čKąyř8ęČÉ. óĚI2»Ý®ZDDěv;ÉÉÉŕĺI™‹žĆ—Ž!$5Ť!·Źĺ¦É“›,«„„Ha4{ăćáÓníĺ§źľćc6xt»#"""r-RBBD¤‹č>¸];Ă;–^›«źDFFRP\αô"ÝT""""­ „„H DGGăÔ›’C9 †H+čA#iwJHH»SBBDDDDDDDÚť"""""""Ň&RC‚Ywô(Ë—/o˛¬&µ‘6awwÇn+ŔĂjm˛¬"""ĹŻý~<•ŰřŞ€Chh(3fĚŕÓéÓ1Ű ét”iŐ«WłnÝ:†?üúoĎŐdƧg˘Ł|©Ĺb±Í–›‚!ť’ć‘v§„„´;%$DDDDDDD¤Ý)!!"""""""íN iwJHt`EůŮX÷maÍš5ddd( ""â`łŮHJJ"ßĂ2guí¤óŃ]+""Ňĺg“ş k×®UBBDDj±Z­Ěť;—ăQ‘ŘÍî t!©iÜŢ7śI“&5YV ićÂB‚˝Ľ k˛¬Aái>Âz÷%ŰV¦`´‚"""-GźCŮp(GÁi=˛!"""""""íN iwJHH»SBBDDDDDDDÚ„ÝÝť3ůů¤¤¤4YV i©!Á|~,™Ź>ú¨É˛ZeCDD¤3yú3ŠŘP  8ÍfÂĂĂ9óÝwĘ+ét”iíŰ·łu[Y¶2Üţó+ŢžÉÓ—°ÁŁ?"P … IDATÁ‘ZÂÂÂ9s& †Ţ¨`H§¤„„H dffrúÄ1BDDD¤•4‡„´;%$DDDDDDD¤Ý)!!"""""""íN iwJHH»SBBDD¤Ë9s‚K_aÚ´i$&&* ""â’’Â;ďĽCrDvwwD:˙Ě,b{0bÄ&ËjŮO‘NČn·“śś ^ž”ąč»féüł˛Ňł'7ĹĹ5YVw­´;Ťi#FŕÜ›]'  ‘VPBBDD¤(uő$ą8GÁi=˛!"""""""íN iwJHH›(sn~šA ‘NČl6;~.5ş) rŐ•9;spp,ďďýŽíŰ·7Y^ ‘N(,, ___\ťŐ­“Ž!+ Ŕńshhh“ĺµĘ†H dddp2ĺąg đę}ĹŰóéهáżÎÔ ľÔ1uęTşy{łî±_`S8ä*˛»»s.¨ÁÁÁ„……5yŚRi"""-ŔŇEó8üĹbCDD®şččhü‚¸ůůç ąjěî Â`Ŕd21uęÔf§„„HçääTç?Á@·nÝ:t(Ď<ó ;vě¸"mőŐWôďߣŃH÷îÝŻÚµ‹tv}~ô#~öŹ˙ĂŐbV0¤]ĺyXÉ€‰'6kt(!!"ő(//'##={ö0oŢ«ŐÚ&íĄĄĄ‘žžŔÖ­[),,ěqŘłgŹné´şEG3ńĂ0iÉśîÖ˘eEš+×Ç‡Ś‹É/“ÉÄôéÓ[”Ś%$D¤ŐŹlŚ=š÷Ţ{ŹË™3g˙üó:evîÜÉ˝÷ŢKPPFŁ‘ŔŔ@îľűnľúę«:e‡ Fż~ý˙®o.‡ .đüóĎÓż, FŁ‘°°0|đA>\ďy65'Dcűßzë-śśś8xđ`­˛Ë—/× """ťNhh(111T śëÄÁÁ±śî†ÝÝ]’ËND yě1Xł†›¦Oăąçžă©§žÂb±\V}JHHłôčѧź~€U«VŐÚ·dÉnľůf>ţřcÎť;çxäcőęŐŚ=šżüĺ/-jËjµ2xđ`ć̙Ñ#G°Űí”––bµZůÇ?ţÁСCٵk—^éŠňł±îŰš5kČČČP@DD¤Yxę©§1cáááŽíŮíߏÄč(Ň»uÓ¨ iPŽ·©!ÁXk%"L^^UŰ/3ˇ„„´ŘÝwß Ŕ¶mŰŰŽ?ÎôéÓxńĹINN¦°°'N0kÖ, 3fĚ ))ÉqĚÎť;©¬¬tü»zîŠjŻżţ:iiiÜtÓM$$$źźO~~>۶mcČ!ňŇK/µéµýć7ż©÷śÚę‘Ö$$R÷oaíÚµJHH‹EGG3sćLfĚÁ°aĂţóűĹbáLX(%nn ’ÔJBśîĆA1ś '#0ŢĎ<ÍCëÖÖJD´B."ÍŐ·o_Îź?ďŘ6ţ|Š‹‹™5kV­$AďŢ˝y饗¨¨¨ŕwżűűŰß3gNłÚ9~ü8žžž,]ş”ččhÇö¸¸8Ţ˙}bccŮąs§^‘$&˘ŁŁ7nűöícăĆŤ<ôß#qÍZN~ő¶ăI×Pl4RŕáA®ŹžŽŐ2Ş™L& ={^±ö•‘fóđđŔfł9¶mŢĽąę—ŮCŐ{Ě”)SřÝď~ÇÖ­[›ÝNuťőąţúë(((Đ "WEdd$ĹĺK/R0DD¤Ó  >>žřřx222 [t4?śů<I\ł–3{öuě©!Á”ÝđČĎÇŁ sYMÚF¦źÖŢ×Ő»/&&†#F\ÖD•JHČ‘ťť €źźźcŰ©S§€Ş‰“süřńµ•‘‘ÁüůóŮ´iV«• .PZZJYY™^ąŞ˘ŁŁńęMɡCDD:µ€€€Z˙îM·‹ŁS‹ňňůâ‹””•‘çë€sYůU‰ K~>îv;†Š ˛ę5ň‡DDFbÝ»¨ Kll,‘‘‘­žB is{/~`………9¶5w¤B~~~łŰ9yň$7ß|3gÎśQĐEDDD®‚rn:”¤¤$Ç—Ryľ>AUY›ŤčÄ$¬Č÷đ Äh¤ĐěNˇ»›—'=­©ôvr" *’ŕ!CŚ"řơŽcě«W[ëďűö¤„„4[őęŁFŤrlóđđ 77—ěěl|||Ú¤ť^x3gÎĐ«W/fĎžM\\ţţţŤF ...-®ł¤¤D/ H3Y,yä jäjRRGŹ­• °tď΀A±d=ĘŮ‹_^Ő”é燱¤—ňr=ňцň<,Ř=˝Ş’nFJś4čÎ;xč©§¬güřńWő:”‘f9xđ K—.Şć…¨Ξ={HJJâ?řA›´U=‡ÄúőëkMj púôéŹsrr˘˛˛’˘˘"L&S­}‰‰‰zEDDD.C@@ÄĹĹUó‰Y­VŽ=ŠŮlć‡ńńޞÉ?{–ڤŁdMât~>Ů5–u-.ĆX\‚{ˇCy&» —ň =ţQCţĹyŰ‹‰»Ż/'şwo°ŽđđpBCC‰ŠŠęĐת„„4)))‰»ďľ›’’î˝÷^ äŘĎž={xçťwXąreťcׯ_Ď3Ď<ĂĉyóÍ7›Ő^qq1ÁÁÁuö˝öÚkŽÄCII FŁŃ±ĎÓÓ“ĽĽ<öîÝËđáĂk÷Ç?ţ±E×\VV†Á ŹH‘KY,ÇŞ—Şž‡˘ĎŹ~Ŕ§Ó¦ŐÚ_ęćF©›6/ĎZŰc÷|×`{y*\\p)ŻęśwöŃŐŁj&ęĺ0¸˘’€d$nžž‚ŃÓnŃŃdddđňË/Śżż?ˇˇˇŽ˙.ťD é|”yy>|•+W˛hŃ"l6ááá,Z´¨VąéÓ§3oŢ<>ţřc¦L™Â«ŻľJŻ^˝ČĚĚä“O>áżţëżČĎĎ'77·ŮmGEEńÝwßńâ‹/ňꫯâááÁxë­·pvv¦Oź>?~śU«V1aÂÜ.~x8íŰ·óěłĎ˛hŃ"˘˘˘¸páż˙ýďٱcľľľµ†ÖÇl6c·ŰYąr%÷ÜsŮŮŮꆹ sçÎĹjµ’™™Iff&V«»ÝNjj*EE˙Y±jʧźć,i{öPśźOćŃŁä§śťlĂą¬ w{!gĎŕU`«·L±ŃH¶ż“çëZ\ŚVVűÓ»w§˘ĆăĂvwwĘ/yś¸ÜĹąÁy5\-fś""9×ČőTë9¨ówygäTYYY©·‰H×ĺääÔ¬rŁGŹfůňĺtëÖ­Îľ+VđŔPZZZď±7Üp›7oĆŰۻ޶/ýZ¶l<đ@ťzBCCIHHŕ7żů Ë–-slŻ>~ůňĺÜwß}uŽ3Ť¬[·ŽGy„ÔÔT***prrŞ·ýQŁFŐY˘´łLN»řÍDpĚ(ÂŹn·vw,}€3fÔű Jgv6· í´ĘFi‘[Ö9nŕKhhh»Íz-""ŇRRR°ŰíŤţ­0ţ|8Đd]·÷ 'ŘËë?I‹ €ŚĘJvą87Y‡oEĂhřďăťTÖzĄ!Ż<ö ÇĎţQ‘jś×ęŐ«Y·n&“‰G‚Á˙bÂ$22Ňń·ďµţ{_#$D¤^îîîôčѸ¸8¦L™Âí·ßŢ`ى'ŇżŢyç¶lŮÂąsçpuu%::šÉ“'óôÓO;F14Ç”)SČĘĘbŢĽyś:uŠ=zĎkŻ˝FHHŻľú*GŽáŕÁµfžś´´´zëtwwgëÖ­Üx㍵¶˙éOâŮgźĄľŹ•!C†°mŰ6L&S­sę”â‰'X¸p!qqqlذ‹ĹÂ’%Kxě±Çę='''ţüç?óË_ţ˛Öö’’ÜÜÜčÝ»7űŰ߸ăŽ;ęMľ¬\ą’źýěg<ňČ#,Y˛¤NWWWVŻ^Íرcë=çŽlÚ´iÇŚ"lđčvkwÇŇW1cŃŃŃ×Ôűęlnq»­˛Qy†Sß~N —+“&M",,Ll"""rMĐ ‘®đFwvćé§źćŃGĺČ‘#”””pţüyćÎť‹‹‹ ˙üç?Y·nťŁü믿NZZ7Ýt äç瓟źĎ¶mŰ2d………ĽôŇKµÚ8tčĎ?˙<ůóç“‘‘ÍfcëÖ­DGGłgĎŢ|óÍ&Ďő•W^aáÂ…ÜxăŤ|öŮgX,Ž?ÎôéÓxńĹINN¦°°'N0kÖ, 3fĚ ))©NŞFJ<ţřăüâż %%…ŇŇRľ˙ţ{nşé&ţň—ż°eË–,Y‚‹‹ ďĽóçĎźÇfł±˙~¦M›ĆôéÓqvÖÇfW—ČW_~Žuß–viĎĂż'Ć>ĘĚ™3•Ś%$D¤s)))ařđá,Z´ččh\]]éŢ˝;3fĚŕů矨őčÂńăÇńôôdéŇĄ ><<<‹‹ăý÷ߪź¨iÁ‚”——ó›ßü†'ź|Ěf3#GŽäĂ?Äl67ůŘĂĽyóxăŤ7‰‰aÆ xyy0ţ|Š‹‹yýő×™={6áááL&z÷îÍK/˝Ä+ŻĽBYYűŰßjŐW=#33“‘#G2ţ|BCC1 0€ůóç°˙~ţ÷˙€gź}–çž{ŽîÝ»c6›‰‰‰aŢĽyŚ9’ŠŠ ÝP]ÜŃŁGůęËĎIÝżEÁQBBDšňä“OÖ»}âĉě޽۱móćÍäĺĺŐ;Ěľz>†‚‚‚ZŰ·nÝ Ŕ˝÷Ţ[çŘŘXl6[s=|řá‡üęWż"::š/żü__ßZçđĐCŐ{ě”)SjťC}žy景%77€oľů€űŢ:ŞGiHë‘®!&&¦Ţíś={¶ÖöŚŚ ćϟϦM›°Z­\¸pŇŇRĘĘĘę­çäÉ“ôíŰ·Ĺç¶aĂ~řa\\\Xż~=Ý»wݵ˙Ô©S„††6ZĎńăÇÜYg›»»;đźů ¬Vk­\jŔ€ş‘DDDDDÚ"]„§§g˝ŰÍf3………µ’ 7ß|3gÎśivýŐÇ»ąąµřÜîąçÇd“ ,ŕ­·ŢŞµ˙ŇŃ ÉĎĎopꇇG“ÇŰíöZ1ą”ĹbŃŤ$""""ŇFôȆHQ3áP_'Ľfgű…^ŕĚ™3ôęŐ‹eË–qęÔ)ňóó)..®w•‹šÇgff¶řÜů裏pssăí·ßfÓ¦Mő&˛łł©¬¬lđż†Fo4Wő ĹŞúŃi=%$DşĂ‡×»˝zeŠšŹCTĎٰ~ýzîż˙~zőę…‡‡FŁŃńXĂĄz÷î T­@ĐR;wîdâĉüţ÷ż§˘˘‚|ŚŚ ÇţđđđZçzĄôěŮ€'NԻ߾}ş‘DDDDDÚ"]ĢE‹ęÝľ|ůrÇĹĹĹ×)˙ÚkŻ9VŻ())ql9r$K—.­sĚţýűqwwgřđáőžC·nÝřőŻÍm·ÝĆŮłgyä‘Gűăăăxçťwę=~ýúőDFFňŰßţ¶U12d+V¬¨w˙Â… u#I»+Č<ĂÁőçťwŢ!%%E‘k†"]€Á``óćÍ<÷Üs>|»ÝΩS§;w.úÓź€Ú+XDEEđâ‹/’‘‘AQQß~ű-&L //Ź>}ú°jŐ*GňâńÇÇŮٙŋ3{öl.\¸€ÝngëÖ­Üwß}1bÄFĎÓÉɉ%K–Đ­[7Ö®]Ë{ď˝T­na6›ůř㏙2e ÉÉÉ”””pöěY,XŔĉINNnő#“'OŕřůË_ČČČŔn·sŕŔ¦OźÎ®]».kŽ ‘Ö(+."˙ü)’““ŹX‰\ ś*«§—‘kNii)FŁooo>üđCĆŹ_ď< S§NeńâĹŽ/[¶Śx NąĐĐPřÍo~òeËŰ«?FŢ~űm^xá…zĎĄ_ż~l۶ ??żZ šÇW[»v-ăĆŤĂÍÍŤť;wËŠ+xŕ“_^ę†n`óćÍx{{×IrÔ×FCűďşë.Ö­[W§śÁ`ŕ“O>aĘ”)äććRQQá8¶Ł›6mÁ1Ł<şÝÚݱôfĚQﲝŐęŐ«÷Čđ‡_żâíĺś9Á‘/—\“±‘®M#$D®aEEE@Ő„“cÇŽeăĆŤÄÇÇăëë‹Éd"&&†÷Ţ{Ź˙ůź˙©uÜ”)Sřóź˙LDD®®®„††ňČ#ʰ}űvBBBxőŐWąá†0ŤŽůfÎśÉçźέ·ŢŠŻŻ/®®®ôéÓ‡^xť;wÖJF4ć®»îâ©§ž˘¸¸űî»›ÍĆĉůî»ďxřᇠĂh4b±X2dożý6 u’—cŐŞUĚž=›ţýűc2™đńńaĚ1lذ;ďĽÓ±ZIulĄë1b=ţKúýxŞ‚!"""Ň !Ń]:1ŕŃŁGë”±Űí N^ŘĚfs­I«ůűűăďď_ëßzŃDZA#$ÚŢŮÜb6Ęi—¶4BBDD¤6›ÍFjjj­ľKÍDDD4ú;łˇąÉ.őüóĎ7¸oűöí¤¦¦ÖY2^}šć3(ť_FF†c©ĹęäBÍ„Bff&YYYîĽ÷ďßßâc"""€Ú ŤšořęąDDDDD¤óőiěv;‘‘‘µ–¤ŻiăĆŤ¬\ą˛ÉúĆÜ|3>ĹĹ50ÇXrrrłÎëÔ×_7¸ďřţýl?x°É:zčˇěرwwwG˙¦+ői”čŞ3€V«•ÂÂB¬V+v»ýŠ&|\]qmÁóń&¶ňr xľż>9ĄĄ”¶p€NÍŽĆŐ‰‹ĐĐPĚf3!!!Íf%,DDDDD®’””Ž=Z«OcµZë< ű‹űď'üşë(ĘÉq$ Îź§¬¨˘ěěfµeÝ·Ź¤FF|w3?]\đuuuü»¤˘‚싫ɝ޶­Á: óň0»¸`//oô\Îý5űj,+ďÓ«WUgÜdâ\a!k×®­÷¸ŕŕ`DZ!!!ÄĹĹ)!!Wö š™™éH>Říöfgî.evqÁâ ŃŃŮŮńfó©ńf3:;ă×V (./wĽůJk|d—–Rrń ˇĆö†őĹĎĎĎG˛"22’ł°"""""Ňz}ôÇŽk˛ÜńŤ±ą»×»Ď©´”řůa1ü§+ëa0ŕQŁŹÓ? jőőD{yíĺŐdźĆ٬ŚÜÉ‘š?ź+,l°ţ´´4Gź¦_DDŁ ‰””üýý;]źF ‰«$)) «ŐŠŐj%33łE‰‡ęŃ ®®X\\° X †—\¸n..ô¸äĂ'´‰7UÍ7|úĹěęů‹˙ŻoFVVYYYub^ť¨ŠŠ"$$„ĐĐP=ë%""""RŹšŁ¸kökŢ{ď=J ±Ą§;F5äś>MQn.¦ôô:}šę/O›Ű§ńpumqňˇ#ôiŇĂÝťű{őŞ·OSýelNi).çĎłuöl\ŚF<1Lxâ›·7łfÍę”}%$ÚAFFGŹĹjµ’””äČt5'éh29F5řŤ¸]ő őżázăgc++#»¤ÄńhÉĄÉŠú&“‰¨¨(BCC‰ŚŚÔ#""""Ň%Ą¤¤ššęřbµˇ>ͦ·ŢÂPQQďľ>Dyy©Os™}€ň’Nj̋ý–šŹÍ7Ő§7nś]%‘””Äľ}ű]ĐŐÉ WWM&,ľFc§ĺĐůąąáçćVg´Eu&2§´”ě’ÇĎŐŠŠŠŘż­ů*‚˝z9ţ«–Hhhh»ÎCˇ„D+“űöíkp„«“ÝM¦Ş7ëĹočĄs¨QŃ×Ó¨I‘^TDvI ©……ŽŚcÍ+W®$88#F(9!""""ťŞOcµZyĺ•WČHJ"çbÂVc·ëÍf"L&ői:aźćRąV+ąV+§·mĂĹhħW/"#y÷ŻŞV,ŚŤŤm—>Ť-”’’¦M›LBt3 µX”€¸Ć¸ą¸j±j±ăëëHP¤ÖA‘––ĆĘ•+ɉřřx ¤Wç»”·¸6•—”™śĚľ}űŰ’““INNfĺĘ•DDD0bÄFڎ„ÄŐ”@BBB˝«a»»âîN٬áJ]0AUáRíöZŁ'ŇŇŇXşt)&“‰ŘŘXĆŚCXX‚'""""íÎfł±˙~Ö¬YCVVV­}®NN„ÍT”–‚ľTí’B-n78QP@jaˇă ×ęäÄG}Ä#3fL›ŽšPB˘‰úŢ´Áîîô©Ń!•®­z8TŚŻ/ĄĄś((ŕ„͆˝Ľś˘˘"vîÜÉÎť;‰`âĉJLH»Ůľ};+V¬¨3»ú‹ŐęÇ”E}?77†Rő…kÍäDQQ›7o¦˛˛’É“'+!q5fúX,ôńđĐ%i‡«+1ľľÄřúbµŮ8ał‘VXTeg͚ŰaĂ7nść™‘+˙÷©››#QݧŃň›ŇšÉ «ÍFb^JJ號Oö©Sř^w]›´Ł„Ä%RRRX˛dI­•2Ě.. ôöVćPZ¬ú±Ž‚ŇRľĎÍĺ¤Íŕ1q×]w1zôhÍ1!" *ĘĎćÂń}¬É´0|řp%2ED¤ŮJ I۵‹śmŰôđŔ×hTźF.»OS\^Nĺ… řŕĽCCéőö:1ˇ„D kÖ¬aíÚµJDH›ópuex@˝˝k%&Ö®]ËŢ˝{™:uŞă‘zĺg“ş ©ű«f˝VBBDDšăěţý˙ňKĘKJęﯠH«ÔQ“kµrŕ8ľńń¸^˛ähs9+¬UĽ,^ĽŘ‘Śpurb€—? Q2BÚTubb|Ďžt3ŞÉ/çĚ™ĂŢ˝{ i•ŇÂB®\ÉŃuëÉ‘+ĺü÷ßóÍüů¤9‹)‹$ě IDAT/ľHJJJ‹ŽWB3g;wîŔÇŐ•1Äřú*0rĹx¸şňă  n¸xź±páB¶oß®ŕtpţţţ„őî‹g÷^ †t6›Ť×_ť-_~ÉţeËȬge@‘+ĄĽ¤„?˙ĺ/dee1gÎś%%ş|BbůňĺŽů"‚ÝÝź–ş‘víĺĹčîÝqur`ĹŠ-Î*ŠHűŠ‹‹ăçOüŠcU0DD¤CX±biii,˙řcŽź>­€H»‹ňňŞľhť3g¶‹Ź¨7ĄK'$ٲe P52â–îÝ5Ó¬´»îîü°[7ÇxĹŠ Š4ËŢ˝{Ł˝ÝÝéq™Ďň‹´F_OO~ŕç×â>M—NHlÚ´ ¨š3bL` î"ąjz¸»3ŕbV199™ÄÄDEDDDDšÝ§1»¸0LWĘUÔ×ÓÓѧٹs'MÓe6›Ťĺé©‘rŐEyy9Ýرc‡"""""ŤĘČČ ůâ|Ń^^ęÓH‡čÓTKHHh˛|—MHX­VÇĎÝM&Ý9rŐąą¸ŕăęęřĺ"""""Ňš3V˙)ŇQú45űÜ Ń*"""""""Ň6*+°ŰíM5(ZSZŞÉ_¤ĂÜ‹""5™<} ŽEl¨…€€DDDjţ^°••) Ň1ú4-¸•’ňňčm±č™+ąŞŽççSz1›("×öíŰŮş-,[n˙ůoĎäéKŘŕŃŚˇÉ—ED¤¶šŹlĚÍĄŻ§§‚"WŐělÇĎn˛ĽŮlĺĺ|—•Ą@ČU“U\Ěw5ŢĽ"ŇqeffrúÄ1ňĎźR0DD¤ĂčŻ6¤ôićĺh±ßä1JH\tŇngÇ… „\•7î¦óç5:BDDDD.[yj*ÎFŁ!WµOŕ\VÎ-˝®kÖqJH\ T%%ľNO§¸Ľ\A‘vaµŮj%#ŞďE‘–Č9y’ó‡))!íîxţ˙oďÎĂŁŞďýż“™Ě>Ů& Y&ŤŐ° !* ÄĄ˛h­µUŠÚRĘ˝·Z­Uéí˝ŠôÖź m5ÖJ\+ŕ "bXDHŘ$ „=!“möÉĚä÷ÇdĆ Ů·I&yżž§ŹÍ™3gÎ|rNĎ{ľç|µn=MLq1”bqĎzq–ĎQ0Ië@ËŚF|VY‰R˝ž…ˇAc¶Ůp˘®‡jk]'®úR1$wDDDDÔ7WÎśEőąsůűłäGjkq¬®Î­§ llěńóHđµYź_€ Z Ç=%ŐÖb_Utśő€X‘N‡ŹËË‘ŻŐ:Ž?« c/\„Š÷1!"""˘~şrć,~řŕCZopy¦ˇ= šŔ@€źŮŚÄÎ÷ş§á,ÎBŘí˝| (;vˇ5f3>®¨Ŕ8™ Sˇđócˇ¨ĎŠt:śih€ľÍ%Aţő ).†Đngh@XššűĆNX¦NÁ~~8ÓŘČž†”ŻH‹^Ăî=ŹĐęš>ő4 $®ŘŘĹ™3(W«Qâ¸Sí%— „‰ĹHR*ˇ–ËY(ę]s3.éő(ŇéÜ‚?łQĄe˝ÎDDŁSCEÎąGŢÖŻ_Źääd…zDsĺ éÖÓDKĄ'—ł§ˇ3Űl ® ÂW @Ţî=°´Î¨QYŐçm3č¨(­Ł%ĆTV˘*"ÂLÔͨ1›!ŻŻG´L†qr9‚{xł]'ląŃ2eFŁŰc~f3ĆTTňň """"t•UÖÔąő4eF#ĘŚFČëë§P`ś\ÎQÔiOS¤ÓAoµbĹĉ0h4(řŕĂnź[ŹÜś“8ř ظq#‰ľ[,®`âJX(ęT!° ĐŰlČ×j‘ŻŐ"P(DśB0‰„áÄ(?akL&Wqőž˛¦&¨4u "hČzšŞ4şzš3ŤŤ8ÓŘôČH†ä !ŞŤF\jťôÁéë?”ŃÝ $zxG—•#ş¬šŕ`hTÁ0´Ţą¶ÁjĹɆ€\ @´L†0±a‰kX ŤLşćf” ¨1›ŰŤ„7« ÖÔ"´ć Ä F4B¤§§cÖő7ăós , ©äädlßľŰfÎęq0a--Ec` ®„‡Á$“Anµ"88đńq Á§Ń×Ó”Ť¨1›Ű=îg6# ˇŇúC@Uçř–Ű,ˇ10uŞ`d2p9ab1Â%ÇĄRoś¬5f3Ş[OÖ¶÷„hB44  ˇ÷‡ """˘aGh·»ő46ą§Nbc5k$ţţ05Ôľ×h ŘÓŚPE:Î^D9{š M-üuúÁ=ů+č±Ĺ‚°š„ŐԸ ťB¦ @×:Î{N8 …—J$!L,ć°¨aÎ<Ô[,¨6™Ú]†áäL ZC""""ňŞž¦­ĆË—Ńxů˛ëç¨E7 @§s[ÇůĄk ź‚D"ö4^ĐÓŠDnŁ÷ýd2řŠDĐ_ąß’ 0 ­ţ B0Äpšr4B§TşFOŽË;ZGO€źŹ‚D"×˙äSÇ! kn†Ţju…şćf4X­ť®ďkµAˇŐ:BťŽ—cŃTxä($ccÝzš«żtőóńA´L†ą!!,Ř0ęię-×îE±cŁTŔPW‡ęÓgÜB'_D˘!ëiH ťŢ•*Y}}aIˇW*ˇU(\÷ž€ć––v'4ŕI!.‘¸B‹«S-ę˝:łz« ÍÍŽÖfC˝ĹŇéČ×Ył ­RŁ ­˛Aş~Šh8‘ŤH>ź«Ż/tJ%tJ Ri»ž¦€$0öŰ*Ł>{švQ«Ĺ%˝ľĂ{?´uú»c¨++ďôńˇü‚•Ä`Řnw­Ë R)tJ%,"żv'4ŕIÖtëja­3y‰Důú"ĐĎĎń_žÜ¨3›Ńl·»Bgč ·Z;ĽßCgáÔ`„Ôh€Ôŕ „v;d""""Ő=M`cŁŰĺÉ©ĆÖľĆçR1N~s ň÷GؤIđŹŠ‚H.Ż@C%%n_:{šp‰ÄýgŽwő4Î>¦ŁiY}E"drm6Ôt0‹_ŰžF®ŐBfľ_¨22Ł±Ý·ěf‘F©F™F© Vo» €+¤č*sžĐ"__‰DąPy›Ŕ çÉxő{·´†`±ŮşĽĽ˘«ŕÁĎl†Ěh„ČŇ ‰AďŃkĄzüµXeX,Âýý k3d–h8ô4WOkoijBŮ‘#®ź­ľľhž–ŇŁžćžŘŘN_ŻTŻGsK‹[Oă͆stCw=MD` ĄRšš`¨Ő ćÜ9×l(©˛č(ŻîiH b‹b‹ĄÝMÍ"Ě"?drŘľĐ*“L»°ă@ˇí‰]ÖĂË ś—‰\­m¨1Ú† WëÉĺ˝!1ŕkµBli†ČâH }mVDÔ/yyyČ9›‡ŇÔ)7 úë)T‘|ËX3/śĹ'""7%%%ČĘĘBEB˘ĘʆĺĄĹB»‰?ś‡UŕŰ®§iű¬¬© W !V(!’Ë wü»'‹á\ą‚jˇŰžfNH‚[ż ˝š®ąŮt%T,Ć.ÂŽ3 ¨6™şíĹ®Xś#@ÓÔÔíĄpţ뎂ˇPbá…a÷;—ŤPFD@­V3)A…ł‰Ž¸ęqTęvrpťŕvˇĐí4]q^&Ň‘˛aö‡Í×jÄ o­Ź#lŕ d#/ł ˘ASPP€Żżü<HuĆ`0 °°đWÂÚÚ GΠ¤łž¦I!‡ĐfÇ•.úmB<ĐÁň«{›Paa‚P,Ať¶ g**şÝß”°pŚ ęî7yü:ÝpuOSVYŐé6"*«:ěŰFŁ„s¤ĹŹŘÜ‘÷ö4˝ýÂŇ_§Gʉ“ýŢŹIgĎń—1@|Y"""""""ň4DDDDDDDäq $ČăH c:MÎ~ö:^|ńE”””° DDD4b0 ""Ƭf´ŐĹ(,,„ˇ›ů׉†ZYtöŕťwŢév]βADDDDDDD • ×AQZÚíş $z!11:ł jL, )µZŤőë×ăߏ<™ÁČ‚×a ADDÔ ÉÉÉËą†”\.Grr2čô,y%ŢC‚<Žy """""""ň8DDDDDDDäq $Čă8Ë ˝^ʬ¬,„„„`ůňĺ,QČÇ`ÂŤk°drÔj5 BDDnźµĘĘĘ U( 5 ´ŰYň* $hĐţÜĽy3ĘËËsçÎEHH C^ďăŹ?ĆŢ˝{ÇőýĎúëůIdŚŚCrr8‹ODDnJKK±eË )qůůđçôź4 D—•#nůtĚXµŞŰuHŃ€+))ÁćÍ›a2™\ËjkkHŤp2ŁQţţ‰‰év]D4 ňňň°mŰ6·0‚čjĽ©% ěěllٲĹF„ŽOaQ¨C!ADb÷îÝŘłg@ŕ'Fě¬[ –âĘĹ\‡Úa ADý–™™‰ŁGŹp„o΀B‰†Š"‡:Ä@‚úěę™4dAáHşáH”A,Ńi6`¨ŻF~~˘ŁŁ!—ËY"""x "ę“ÚÚÚvaÄÄ›2F 0}]~ř"/˝ôJKKY"""Ö R)*´Z”””t». "ęµ’’<÷Üs®0"t| ®I_?‰ŚĹ!""""ĹʢŁđé…BěÚµ«ŰuHQŻäää`óćÍ®™4“ŻEüü;Y""""“ÉdŹŹ‡¬© B›ť!ŻĂ{HQŹeggcçÎť®źăćÝŽđ„é, Ť*éééuýÍřü\‹ADDC*&&7nĶ™łX ňJ $¨G®žI#qáÝŚŚca¨OHQ—ôz=˛˛˛:śÖ“h¨ĺĺ塬¬ sçÎĺLDD^†uŞŁi='Ţ”Á›WѰ±eËŔţýűq˙ý÷#99™E!ňĽ©%u¨¤¤Ä-ŚP†Ĺ2Ś ""˘a«®®[¶lÁ®]» ×ëY"/ŔDÔŽ3ŚpΤ:>…3i‘Wřꫯ››‹ 6 $$„!ĆH‘›ěěldeeąÂ¨© 3m C4D$Ę DM]µś¬‰ş:>&m=´5—áă'ćßL"/Ŕ@‚\öíۇwß}×ő3§ő$j/;;ż=Ś:˝“—<0čŻ'Q!fÚ"¤Ď gń‰ş ’"~ţť¨řá0üĂÇâÓ3uźŕĄdä¶<%%%ČĘĘBEB˘ĘĘ 3y ĐSię©óću». "Đ~ZOΤAÔ1ŤFËEX"˘abýúő8VÔŁŔ9ŃŃUk›ńq®)1 LŠ™ło ţJXĽ= Şş:ĚŚÄěÔÔn×e A4ĘéőzĽńĆ8uę@$ @Ң»F‘WHNNFQł5Zk»ÇšíŔńbJ4f,š±M;ŃpÂ3’hsNëé #dAáş|-Ă"""Q޵Íxďű+Č+ŐŕwżűvďŢ͢ !A4J•””`۶m¨««E'a|ę6­gVVd2NJDDDCŁŮĽýŢżQWW‡={ö ''kÖ¬ALL ‹C4DHŤByyyضm›G§ő,//gá‰hH…ĹO¶ş†új”——cÓ¦MX¶l–/_Îâ DŁLvv6vîÜéúy0gŇŹAPt¬ ?ÔěE„ĹOc!hTS¨"qMú:”ä|…ňÓ_GK ĺgT–€hôŘ˝{7öěŮŔ1“Fě¬[uZO?‰ É‹ĘÂőCCEÎąGŢpÜI>99™E!"ę§i‹“Ś‹Ůr´ŃâM-‰F‰ĚĚL·0bâÍFyB~~>®”Á¤­ďŐóśŁ%˘¦.t-Űłg233YT˘~(HÇk9'ń /t».GHŤpz˝۶msĚQ ÇLăSďŕLDDD4"ĽôŇK€¨© 3mQŻźßv´„Y×€éóobQ‰<„ŃV[[‹­[·şn() ÇÄ›2m& ˘Ń`ŢĽyPEŤĂńK:h„pŽ–h¨(BÎ!j­őź±px(ONNĆöí۱mć,ţÉ+1 ˇJJJ°yófŹÎ¤A4„„„ ŮO‰Bs‹AD4ÂFĆJë-xďű+ź€X•„…!$ $F śśěرĂF„'_‹¸k—˛0D^.++ 2™ ŃŃŃX˝zu§ë˝řâ‹=ÚŢoűŰNËÎÎĆ‘#GşÝĆÜąs‘ššÚác%%%ČĘĘęvÝ˝źwŢyeeeÝngĺĘ•ťŢ!ż§űŇŐűńtmâý TműXéim‡Ó±Âó°÷µőä±2šíŔüF¨Śź«ŮŤFĂ™8HQW^<5­' >‰2Čő˙ť—_iMVä”t~Éóž1ÝéjçŠ*{´˙đ±©;ŢNŮ%MʶŃÝű9ˇUĄ—şÝΩKhÜŻ}éęýxş¶ń~޶}¬ô´¶ĂéXáyŘűÚö±"ÎHçh‰ŞŁď˘ŕüÎÄAÄ@‚:˛k×.|őŐW3iŚO˝ŞŘ‰, ‘—QS˘©ęÇF E†SeúNźŁ ‹íѶ»ÚF˝]ŢŁíÔŰĺťnG×Ôł}éîý´ČĂ  łw»ťËM€¦źűŇŐűńtmâý TműXéim‡Ó±Âó°÷µĚcE˘ Bh\Ę ý-Ö6ÖŁŕüŽ™8rrr°fÍŽ– ę'ź––––ŃřĆóňň°eË@\~>üuz 4ä âađ÷G||<6nÜŘ«çfffâčŃŁ®0bâÍśI†Ô‘7ž¬_żÉÉÉ#ę˝U6šńů9ŢC‚h4Ńi*p1űCę«]ˆËh ŢÔ’Ľµ§ńeąĽ›^ŻÇłĎ>ë #dAáşü— #s&ލ© ]ËöěŮgź}%%%,Qđ’Ť!¶öű〼ݻqŕŹĎv»ţÜ_˙ )÷Ţ `t$ˇÎú´ŐbłÁ¬ŐB[Y‰ŞS§Qřůç¨>s¦WۻՋN‹úâb\ţö[ś{ď}XtŢ5Ť_II věŘáş®\‹¤îć´žD¨¶¶—JŞĐX©C@Ä8„h”‰™¶Á1É®ŃĺĺĺŘ´iÓŚ–¨­­Ĺ‘#GP Ťb‹Ĺë?÷÷¤ÇéhÝ‘ü™$ă‰abüâĹJşľŹŹ@€Ä%·ŚúZů"tÂLY˝ wfľŽôW·A1&ĽwżĐ±ť”ĚyôQ¬zçřGEyU±yófW:>“oyaŃ ;|ř0ŢŘţ2~ř"“Ĺ "Ą:-‘ťť ˝Ţł—×ÖÖbĎž=¨ŠŚ€YäÇ_ĚüĚďŤ /ŕÁiÓ{t :GH ~2â-BÁ'źtşNĚÜ9…¨Fe}Ú¦ź>ÄJ%Tńń·đz$/_ލ™3qgf&>X“]uu·ŰHip0˘gÍÂě_®…bL8ć­˙5>űíĆa_Ź«§őŚšş1ÓńD""""ň ¶Ł%˘gß‚Z“r9ë2\úo˙Ě?p„Ä0`Ńé`njBҲĄ]®—´l9ĚZ-,Łüś-6L (˙ţ{|űâfĽłjjóó! ĹŤÚÔăíŘ,說·{7ľřĎ˙r4ö^rĚľ}ű\aD:™aŃqŽ–PډĂüFě?_łŐΠŢü™4ŕ‰aŔW(DńÁo0ţĆ4(ÂĂ;ü†_¬Tbě‚ëPüÍ7»`A§ŰŠž=Sď^Ť°É“!V*aŃépĺüyś{˙}\úú Űş~r9:ř5ꋊđÎĘUś>3zˇÉÉĨżT„S˙|…ź}ćţ">>Ľb&¤§#06VłU§Ná»W_…¦°÷}˛ň°0ü}ţ|XMć>í[o說±çń_áž÷ßĂk®AĚĽy(9|¸WŰĐ\¸ŕ;ě6Ż8fV­Z…Í›7Ăd2ˇľ4…‡>@Âuwňd""""bĄőĽ÷ýĚO@¬J‚ #Ý}ć—bÚšű›š eD|…Bj5¨8y'3w ţŇ%·őçLÁÔ{îĆ)S!SĂj6C[U…KŕĚ®,ÚĎČ>y2®ą÷^D¤¤@sSŞĎśĹ©·ßFʼnŁŻća9ô~~(>t>ľľH\Úń(‰ř›o†@$BńÁo ‰:\'ĺg?Ăň­Ż öşë ‚ŻPI` ÔsçbÉ‹/âÚużt[ßfv„±ŃłgcůÖW={6ÄţţJÄť0iĎ?‡¸Ĺîßľ/úĂÓ¸î?6"$9 B©’ŔŚ˝~îxí›< ~2Ç8µ¶aDo÷­·Ś ÎĽł ·¨÷ŁÂ'OTť>íÇLLL 6lŘ€   @mQ.ňľzÍ&O("""˘!Öl‡Űh‰]»vˇ¶¶–…b]}ćW„‡cĺŰ˙DĘ˝÷"hÜ8%ř …PŚ Gâ­·â®7ßDčĉ®őă-Â˙ř;âoĽŠ1áđőóHˇ€*>3ţs¬ü×ۇ…ą˝FҲe¸ăµ`|ÚbČBTđ ! ĆŘëŕ¶W·aňĘ• $Čó|:›Ĺ‚¤Ą·v¸Nňňe°77ŁřСW%$`Σ뀖伱˙şë.ü-u>ţyŰí8öĘV´ŘíľfŤë$»Ő ɸţÉ'đÇâÍĄËđęµs°kŐjÔś=˛r•ë9Ńłg#iŮ2´Ří8¶uŢX˛ŰçĚĹűkÖŕĘç±đÉ'ŰÝśł/űÖ—:FYD¤¤ô¸îŠđpL¸ýv¤=˙ĚMM8ú˙ţź×7111xę©§ŐzSžúŇ<śű<“ˇŃ0QZoÁËo~ŚŻľú Ď=÷öíŰǢ AŻŐ“Ďü3ţäaa¨9{d<€ż_·żn>|đ!\9B‰sÖ­s­í/×ÂG @ÎŽ7đÖňtlź3Ż-\OÖ˙ÚĘJČCCqí/׺Ö÷ŹŽÂőOüp23˙Ľýüm^*ŢJż Ç^Ů »Í†Ôßü±±Łę÷ĂK6†‰f˝ĄGŹb삟2ĹmËŔ±c6i.űm§SÔLúÉťđpţŁŹpôĺ—]Ë›ĘËq23˛LYµÉ·ĄŁúěY·çJpůŰC8ô?˙ëZVwń"ľůź˙Á]oî„*1Áµ<ńVG`r6+ '_ݵĽćě9ě~ôQüdÇř ¶o˝ŃTVŞ‚;|Ľó©€¬(řôSś|=ŤĄĄ^uÜČĺrlذ;věŔéÓ§al¨Ćé=Ż"é†ŐP¨"yb 1‹­`2™đî»ď"77kÖ¬AHH‹Ó‹Ďěý}~wźůýŁŁŃ¬×c˙3@Cńe×ňŞS§pŕŮç°ň_o#|ĘŹ_ :gę8‘™‰ćÖŮU,:+.:c}–ýőݵůO^±‘Ç^ŮŠ“™?ÎÔĄ­¨ŔÉĚLřřú`öÚµxÇ8ü—żŚšß7GH #÷íŕ ŃVrë|Ćľü˛ÓçŽiđĂGuř¸söŽ1S§vřřéwŢi·¬®¨ V(\ËÂ'Oäďý¤Ă“üDćëľo=uŚŽ‘~RiďNˇă.Ä´űďo7¬Ę[B‰uëÖaÎś9Ž?„úśű< E<©†XäÄy˛ěd€ÂÂBŽ–ŠĆ·›Ďü?˛˙¸~ˇ[qu_ä'“ą–Ő>ů„[đ8ľ¬}}Ńběyô1ײčY3[ű¨˝÷Dź:îŰ9cş××ZŚ“••ČÎÎîv]ŽFŠż9{s3Ćßx#ľ}q3l‹ăľ·Ţ›Ĺ‚âťßřŃ?"Âqb\*îđqç Ł3¦ĂÇ.—´[ćĽÇ||~l~[Oކ˗;ÜNĺÉśß·ž+ýćĆĆżz řř@ ˙Äßt#¦¬Z…±×ÍÇűk2 ­¬ôşă'##‰‰‰Řąs'ěÍfś˙râćÝŽđ„é<ą†B‰©ËעôÔTçs-±víZČ9WhçźŮ;ĐŐ(Šţ|ć—`ňŠ•ž= Šđp×˝ď|‚vŻóŐ˙ô­[ÓMź–†šsçP~ü8Ęľ;ŽŠś´ŘÜoś©ŚtŚ^ľď“˝]ľ7˙čhď$TÁ(­Ş„öđa¤¦¦vą.GH #ťĄÇŽA¬TbÜőĆ-ŕ IDAT×˘Ż˝ňĐP”=ÚĺtźÎű6XŤĆw.żúţW?Ţçó›;YßÔÔ4ŕűÖS!IImUUĎžĐŇŁFęłg‘ýŇ|»ů%HU*ĚyěQŻ=†RSSqß}÷AŇZˢá4÷O.˘¤R©3n<”a±,őźD†¸k—bÂŤk đpŚ–Řżß?˙†„`éŇĄݍ€ŘŇĚ"÷ă3ż22+˙ő/Ězř6 ĘČHĄRřúůÁÇ·}Ű\›—Ź­X‰ś7vB[Y‰đ)S0ýţę6Ü·w/&Ü~›űďż‡Ł¸EmFaŚ!1Ě\Ü·±óç#iŮ2\řňK$/s\ľqńË®‡t5M)äJĄ®kÜN™´Ë ˇ§l–f%bĹ"·Y4śÄJĹí›s6ľN—“żg®űŹŤP·^úŕ͡„Z­vM ZvęLÚzN J4€çXÜä™řü\‹ADD˝‡¸‹PxôSH$¨Őę~ééé(ö9¶źźůç>ţ8䡡ĐVVâŘ+Ż 2÷LŤŤ°77ĂnłaíńďÚm˨ŃŕčË/ăčË/#@­†zî\ŚO[ŚČéÓ±đ÷ż‡ŔO„łďľëęuD ^[xC§÷Ť8BbąôőAŘ­VDĎž±R‰ŘëćĂf±¸f茶˛×áăAă˵ýÚ?Ć1]‘TÇC‰"¦M’} ?ŢŢ8Żżę-gňŮŮ´ŞŢÄ9-¨sŽÚ˘\śýěuÎŔADDD4Ä&Ś‘â±źĄă†nŔSO=…iÓ¦±(ÔŮg~ç=ö<ö ?űşŞ*XŤFŘ­V(ÂĂ»Ýnci)Îfeáßżx7ý pÍOďq{Çr„%‰a̢ӡôč1řúů!ĺľźÁO&CÉá#h6tÝHV´Ţ»aâ·wü‡ď¶tÇz9ąýÚ?MA`üŤií&ˇ3ČđřľĆĆâ–Í›áë燋űöASXاí$,ąŔŹ÷´iˇ„¶şÓ‚ ™Č7O ĵqţ }±zőjδ1:űĚď (ô5WÚ=gÖ/~´´¸zH{ţ9Ü˙٧k˝é[…_|‡†ş–•sڰHą÷gî—zî\ÜóÁű˝v- Z[ď¸;é®»Zţ˛Űçś{ď=Ř­6$§§ăÚuż„tb1ÔjĚ^»n» v« çŢżűÖzŤŰôűďǤ»~q@|…„Mž„导Ňáe±o~r9Â'OĆĽ_˙+ŢzţŃQh,-uĄ‘=%RȡJLĵʮĂü Ŕ5¬j$Ëĺxúé§]3pŞqň-Đi*x˘ ˘¶_M#Ĺm)!ł0C 'źůë[oÚ?çŃu@ !lŇ$,yá!’+ĐT^Ŕq™¸ŻźČBBpă¦Mťź ‘BĘĚ}Ě1»†ćÂ…{˘÷߇ŐdÂř´ĹH{ţ9¨Őđ ! QaŇ]?ÁÍţoÄÄ@¤PŚŞß ď!1 t\¶!V*a3›qéŕ7Ý>§ţŇ%ţËĚ˙ío1=#Ó3®©ĐŇ‚Ă[^B}Q˙¦‚,üü LĽóNDNźŽ˙ő_Xđ_˙ĺzĚÔĐ€zwż÷Ţ€ď[wó—?Ž/~÷ĚZmź·gŢyy˙ţxÄSJĄ8pŕěÍfśű<ń©w@;‘'Ńk¨(ÂĹĂ!iî-řÉMsDxP_?óźzëźH{ţ9L^ą“W®t-×UUăŔśÇ…t4nÜ´ đÖňtDĎž˙¨(Üú—ż´{ ›ŮŚ#˙÷W×ĎÚĘJ|őĚ‘öüsHX˛ K–´{ΕĽ<|·m+ Zf­eÇľCLę<”>Üă0ÎĽł u.âš{ŠđÉ“!R(anjDŐéÓ8őĎ·Q™“Ó˙ťkiÁŢ_ý3|ăÓŇ AS‡Š'đÝöíĐ×T;VłŰu߬&3 šZTť:…‚O?Cé‘#}z;6łşęjTť>Ť>úUą§Fěqµzőj¨Őj×´ _żĂiA‰PłÉ€˛Ó_ŁęüQ@á·Â˙ÖYH Ąž|ć/üě3Hü1eŐ*(##a¨Ő ě»c8ľýoĐ×Ôŕřöż!hÜ8¨Ćʇ¶Ş ÚĘJdÝóSL]˝cŻ_EX"ôµµ¨8q9;ßl÷…ëĹ}űPéR~v/˘f΄LĄ‚ÝjE}q1.|ţNďÚ{óčš-ŧĄĄőbQ&//[¶lÄĺçĂż‹)5©çccq÷űďÁÜŘ×§± ˝Tż?âăă±qăĆAyŤśśěر&“ 0fÂŚ›}ë ýŁ|ńđG°ZLüĺŽBÚębŔúő둜ś<˘Ţ[eŁ™łl‘程ŢńďD"ÁňĺË‘–6řź‰·ÍśĹ_yeOĂÔ+ˇÉÉ7ŇľĄÇyJMákš6mT*•kZĐŞóGa5›eZP}]ęKóXtQ>ţřcěÝ»0÷ţgY"˘QîęQŹŚŚŚAżiĄëKÖÓů%+ *M1uŢĽn×e A˝ríş_B=w.ŞrOá»W_Ĺ•Ľó°YšˇŚŚDҲĄ¸ćŢ{÷ěa±†±<őÔSŘşu+ĘËËQ[” }]%&Ýś?‰lP^3** R©”Ĺe¦M›6âFG9 娢áJUW‡‘‘ťšÚíş $¨Wľ}q3Ň_݆1)× ýŐm®sáË/‘Ď@bŘ Á† °uëV\¸pƆjÇÍ.çß…*rŔ_oĺĘ•lL‰hÄ0iëqţË®ź=5*‚h$á´źÔ+ —/㽟ý ąo:nŇb5™a·Ú`¬ŻGéŃŁŘ÷ű§đĺďž`ˇĽ„\.ÇĆŤݦ=÷y&§%"""ęF°*‰'C"‘`ĹŠظq#â^â ę5C­GţďŻnÓŘwËČČ€JĄÂŢ˝{]Ó‚Žťu gŕ """ęŔ„1R¤Ä(`ťđŚF#˘>b AD€ôôt¨T*×´ E‡?†D](Í=P§ÜĐé:Ő…'aŃ7v»­Đń)(:|̤­Ç•‹ąÝnC$čňśuîowşz? EĐÖ\îvʰXFĆők_ş{?ž¬í@ĽźŞ­'Ž•žÖv¸+<{_[O+ž8=Ň<ů×% V%…rČĺrţHÄ@‚ú+55*• ۶mÉdBŃáŹĐTU<(3pŤe§ŤĹŚënětťüKą¨«,îv[1c㪠íđ±+ M®×ęJpÄXL™>»ÓÇŹśęY#ÔŐűŃ4”ôh_g-BXRbżöĄ»÷ăÉÚÄű¨ÚzâXéim‡Ë±Âó°÷µőô±2ŘçaťŢ «}pţÖ7T!02ŃA"\—±W˝1 ˘A‘śśěşŮe}}=j‹rak6aüĽŰm"o˘V«Ű-»uŠŞÓőĎ}&D]¶{mś?’“;ŢNžßîÁ6‚ĺÂ.÷ĺß=|Ź]mĂzIŠ‚l#!LÚď}éîýx˛¶ń~޶ž8VzZŰár¬đ<ě}m=}¬ ćyźźŹ+eM0ů*;…Ń:M.|ű!,ú<ü«˙Ä5 áüG¨ R)*´Z””” &&¦Ëu}ZZZZFc‘\söśł—†Ť‚„xüýŹŤ7éľčőzlŢĽĺĺĺi`xݧm¨(rÝ}zýúőśeÜĂ? šş1Ó Č6Ks¸ŤÚ3g222ŘÓ pOĂĂÄÚ[ÖbłÁ¬ŐB[Y‰ŞS§Qřůç¨>sfŘż‡m3gń:Čĺrlذ;věŔéÓ§al¨Ćé=Ż"é†Ő2-(ŃPsŽŠ06T»–-]şéééĂre2âăăQqň$„6űď{ę-^5Śů"tÂLY˝ wfľŽôW·A1fŕ†‹MYµ˛Ă?DÎPbÝşu®iA-úN JDDD#RiîśŮóŞ+ŚŠŠÂ“O>9lɉÁĆŤ‘Xx2Ł‘= yŽfÚ&q>ÄJ%Tńń·đz$/_ލ™3qgf&>X“]uuż_/tÂťş•‘‘µZŤwß}öf3Îěyqónç DDDäőĽmT{ö4# GH c-6L (˙ţ{|űâfĽłjjóó! ĹŤÚ4 ŻšĚ“—z&-- ÷Ýw$Ç4WE‡?BĹąĂ, yµş’<ŻÁž†=ÍHÂ^DWUŤ=Ź˙ ÷Ľ˙Ć\s bćÍCÉa÷†PikîGlj*”đ a¨Ő âä śĚÜúK—ÓÖÜŹ9Ź>ęzžsÓ—O<‰ _|Ńăít&zölLĎX¤$Ĺb4––!˙“OpúíÂnµőiźťÂ§LÁÔ{îĆ)S!SĂj6C[U…KŕĚ®,ÚíOřäɸćŢ{‘’I`ĚMM¨>s§Ţ~'NđŕęˇÔÔT¨ŐjlŢĽ&“ —ż˙ úş*N JDDDŢŮ ů÷Żľoh ‘’’ ‚=MźzšľĽ{^ɨŃŕĚ;»0㡷h‘Űɫǝ™ŻCćöĹp$Ţz+â-ĆGżř®üđC—ŻŃźíŘ­6Ś]°K^ř_ř®ĺÁńă1÷ńÇ>y>˙Ź˙ěókĹ-Z„›ţűOnŰůůAU|<&Ü~;Ţż ô55®Ç“–-Ă OýŢí9Ňŕ`Ś˝~Ć.¸‡^xgł˛xpőPLLŚëf—ĺĺĺ¨-Ę…Y߀¤…«9-(yŤč ®K€Xč‹§ź~šaOÓ§ž¦/ŻĂžćGĽdĂ ]:x‘’â¶|ćĎ‚<, 5gĎáŚđ÷ëŕď×-Ŕ‡>„+çĎC(cÎşu€śo¸]۵mć,l›9 ľř˘WŰi§ĹŽëţc#Î˙űßř×]waűś9ŘqăMČ~i Zl6Ä-Z„Řů©}Úg¸ö—ká# gÇxky:¶Ď™‹×.Ä'ëme%䡡¸ö—k]ëűGGáú'~8™™‰Ţ~ţ6/oĄß†cŻl…ÝfCęo~ŔŘXX}%˘˘˘Úębśű<Í&‹CDDDÚи!)i‚ ˛bOÓżž¦/ŻĂž†„Wk*+HUÁnËýŁŁŃ¬×c˙3@ő™3°Ť°Ť¨:u ž}>er·ŰďĎv|ýüPuú4ţéżŃP|v« Ćúzś~űmäľů aÉ-}~-˙ÖřDf&´••°[­°čô¸|čľřÝď`nj‚,$ĵţä+!‰đÝ«Űqě•­h*+Íb¶˘'33ńýß˙_ˇď¸V/9§uÎŔal¨Ćɶp"""vJs@§©@t+f†"V%aQŘÓ HOÓ—×aOó#^˛á…šŤŽoˇý¤R·ĺ?˛¶ÓçÔ9ž#ë~H}·söÝ÷:\~aß—¶ć~„MśĐçת/.†*! ź|Ů/mˇ¶ÖőXÍŮsx}Ńb·mDĎš Čß»·Ă×(řô3Ě^»‘38[D_C‰ŚŚ ŔŃŁGao6ăÜç™HZx7#ăX """tK—.EaŤ‚ŔvʵťAĂRťŹG—˙aD˝÷’’deeˇ"!QeeĂvęĎ‘ÜÓôĺuFzO#3ˇŚ€Z­f 1‰•ţscc»Ç$Ľb%˘gĎ‚"<Ň  ř …n×őD¶Ł),ěpycI©ăm“öőöµľú㑾u+âoş ăÓŇPsîĘŹGŮwÇQ‘“›ű 3•‘‘€ű>ŮŰĺ>űGGóŔę‡ŚŚ $&&bçÎť°7›qţË›w;Äň@‡Uzz:>9ŁAŤÖę¶Ľ4÷ĘNpýě'đAmm-B®ú,ęÍ  %¬ďü>’zšŢľÎHďi˘ËĘ1cÉ-˝z5‰‘($)  ­Şjw ŢńÚ?  í×öű»ťfCÇ÷°šLŽN,îókŐćĺă_+Vâš{îÁř´Ĺź2áS¦`úŔP«ÁwŻnĂůŹţýă?ű@‚7mBěüT řPFD`îcŹ4.¸¶îý÷a5™0>m1Ňžj5|…BČBTt×Opóź˙11)<Řú\ާź~+V¬Ŕ#Ź<Â@‚͵ăü±~ÝϱtéR<ýôÓĽD=ŤÇzšľ˝{WÔŇŇŮŚ2yyyزe‹ăŕČχżN?¤űłöűă=ZŻüřq|ń»'`jhh÷XÂ’%H{ţąvËuUŐřŕ0ç±G‘xË-®ĺŰfÎÂmŰ··›fßďźęőv|…<|ô(,:ľ|âIÜňŇKđ¶ż«lŢîÝ8đÇgűĽĎo-OÇOŢŘipp‡ő±™ÍŘóŘă¨8yҵl|ZŇž®ÓôóJ^>~äX†ř€‚„xüýŹŤ7ň_/""""bO3Ś{šľěŻ2"bD÷40㡇0ű‘‡»]OđĚ3Ď<3OŢÚÚZ=z¤Ń@liŇý™ő‹_t¸Üj2CW]ŤËß~‹#}Ç_ÝîJŻVwáĚŤŤP«!’ËˇŻą‚˘ŻľÂţ§˙}M 4……HI4 M8łkŞĎžAHB¤AA°šĚĐâŰ_ěőv„)¦gdŔÔŘoţügTž< EX8ÄţJř¨żt '33ńݶW]Ă–ú˛Ď'ţń >ý -V$ŠDđńń®¦Ĺb˙3Ä•~p«K}Q.ř~R $ţţŠĹ°Y,Đ\¸€Ó˙|_oúS§5ő4Ť*Íb1‚‘ššĘe‰=Í0îiú˛żťnD÷4eŃQ¸hoAu}=&OžÜĺş!á‘&!ADDDDěihôô4Ľ‡‘R«ŐXż~=âňó!3Yň: $Ľ\.Grr2üuzív„Ľ """""""ň8DDDDDDDäq $ČăH‘Ç1 """""""ʞDDDDDDŢGŻ×٬¬ Z…R3m×á """"""/TZZŠ—^z “aIY˘Ëʱd||ŽdOCĂCQ\ŽÖ×!''§ŰuGm ‰D¨S©xÔĐ3HĄ0Éĺ€iÓ¦± DDDDÔĄäädWOc”IYr h Äąââ­?Ş/ŮXĽx1 )( “‡‡cÖĂżŔ„ôtäää 11±Ď#˝HtBŻ×c˙ţýŘżżŰIěkµ"XŁAhÍ-Š:Ą FťJ˝żŇmy||<–/_Žääd‰<ŢÓř™ÍPię¤Ń°§ˇ.5ř»¬iÓ´|& ŕMřHôđ$>|ř0ęëëÝ“čőÖÔ! ±‘'2ąNŘĆŔ@4şŤ†DŃĐö4W0öÂE66˛HÔiOscp0®ż˙~„BĂ@˘˛łłqřđa·{L89Ă …N™ŃČbŤV__蔊NC‰D‚ąsç"--Ť—fѰčiöďߏňňrE"¬‰ĹĄŻż†ľşšĹĹ=Mc` «Żąş§ąá†°zőęAym}P[[‹}űöˇ  ĺĺĺí÷µZĐĐ…V…NÇŃ#ŚVˇ€^©tü÷ŞË1ś¦NťŠ””¤¦¦˛`DDDD4,{šüü|×çŐ+yyČŰ˝'N ®ő ؆€Řö4#T“BŽŠčh:¸˙D"AJJ RRRútłJ<‘sssqřđáĂ ŔqŤ–B«…Ô`ä /ăa’É» Úž°ýą© Ń7Şĺĺ(?qŰ?ůĆÖvŃŮÓ(´:HŤFö4#€_J Ž |Űő4‰‰‰űb•Är¦ŚČÍÍmw}V[ň&-”:üĚfžĐĂ(|0ĘdĐ+•0‹ü`”J;L ť˘˘˘””Ţ‚F˝^Ź'žx˘ÓžĆ×j…Ô`„R§ĂĘJlö4©ÁŤë!Ó¦!rĆ „$%"rĆ HüýńěłĎşľXŤ‰‰ńřţ2D%%%(((p…]ÎBd1Cli†\«…ŔfcP1'©E$BłX ­B‹Řń˙»â Ôj5RRR8 ‚F}O#iiÁ”ü|4ë ť~öÚí,ä Đ*Ýö4S"#±jŐŞAą!%äňÎcIIDAT /ăAQZZŠŇŇŇoŽŮ‰^ÍĄNç.´ZpýLí™E"XD"Řľ0Éä0‹ü`‰a”IŰݤĄ#AAAP«ŐP«ŐHLLä""""˘Ö€ÂŮĎ@ĄRaÝşu055A“_€ň'P[‹V‡Ęś$ÄĂŕďďÖÓřZ­Ťü¶P?ľŰž&%%eĐnHÉ@b„śĐŤĄĄĄČĎĎGYYY·#)®&orRŁB›Ý-´i'ąstW©6 G#®µZ •JĺÁŃDDDDDý÷«Ç‡Élîr_«SOť55iŰÓ8ż@í¨§‰ËχżN?ą !‰IPFF@‰¤DÔ™ĚřűGş¶ŹŻëiH SyyyĐh4®°Â`0ôxDEO §ŽFY8ĂŚÎô4äh{˘u¦í čäÍŕÔ—ájAAAP©TP«ŐÉdHLLDHH§â$""""DűöíÁ`@~~>ŚFc§<8m:¸FW@eNŽăżcPé /¤c‡˝‰CC§}JŰޤ?_ÚjŠ—ë•î7żďęľmßOWŇ,ŔŠźţ´ËžŃŰ{^V@AA ?? ŃhP__?*k©TęJť˙cč@DDDD4Ľčőz”––Âh4˘´´žžŢéú˙řŰßpüĉn·;ËfGŹŹ[áÔ¤Ł()©ŰmČššXŘůÁÎKPşłrĚ(#~ B’!V8B‹CgNăŔ±cÝö4ŁáŇq#TII [pŔ5Ú©/—x*\prŽjŕ śËyiŃČ–——çÖÓ8żurŽ$_ż~}§ |^^¶lŮŇíkĹ„‡cÍrG8"R*ÚÝ ň…^čŃČőM›6uúĹhŰ^m´Ę@‚Ú©­­Emmm·ë9/)éHbbbŹ^‹7‹$""""˘Áć•Ń]/ŁR©ššÚévÚ† N•Ýw $Čă|Y"""""""ň4DDDDDDDäq $ČăH‘Ç1 """""""Źű˙¸*ĚľťZIEND®B`‚ceilometer-10.0.0/doc/source/contributor/2-2-collection-poll.png0000666000175100017510000010021713236733243024477 0ustar zuulzuul00000000000000‰PNG  IHDR/„ć4ĎębKGD˙˙˙ ˝§“ pHYs  šśtIMEß ! i¤‡C IDATxÚěÝ”Őĺ}/úţH:SęĐ$˘h ÓŢäjlŔäôfzOďQđ$zcn0±bW LWoXe%ĺGNF#žjî9§3“Ő›¨!uL´Úfʧ b´ŚEĆ 11r˙ľ›ďžŮ{Ďža~|×k-×r†ůůĚţ|źçyźďóŚ9|řđáČS4EÂK “„—@& /€L^™tš& ?;vě˝{÷jőőőQ[[«!ŁőŘŮŮ;věC‡i f555Q__ŐŐŐúBUUU1sć̨©©éóo±sçNŤ#ŕśsΉ™3gžÔm0ćđáÇ˝(ĄŁŁ#î¸ă #8Qűň—ż¬!Łőř­o}+žyćŤ#䪫®ŠK/˝4˙öŽ;âž{îŃ00BÎ9眸ůć›űĽ˙/ţâ/˘łłSÁYľ|yŮE.':ŹŤSVww·F€Tn¨aôëq˙ţýFĐöíŰ ŢîččĐ(0‚J­r\ÂČ:Ůç‚§bż1ö}ń±é5 ż{~źz„ă¬/š61jĆ˝OÁëěúE<űR˙7 ~ëCccú¤34ŚňŘô÷/¤Á`ü°}üŰŰżĐ!ĽdÎűľř÷łĎŃ0ĘDő٨ǏMźÓ'0ä^Úw°˘đrú¤3ô…±©:„áŃľď đňŹŤ™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “NÓĐ×–mąřÁóąŘ˝Ż3vż¶?""fLŻŤyłë˘aŢ, ŁŕŽuÍů˙_ľ¨q@ź{ «;îÚôDO-O«ŤĆKf ŮĎó‰ ębŢě: NX»öíŹ ßmpý<ÔňtěůigLţPM\Űp±†ÔVŚRżéuĹńč@Wwüŕů\lŮ–‹Ú;""bü¸ę¨ź^őÓk+š“ËX-™fÜy<´mË–±ł}Oź¶ť;».ć^`\Kö/!Ąů©íŃôµGb׾ýE;°µź©“&ĆĘ?]0$áÇÉf˶\ěŢ·ßšAٵo¬omÔ ´ů©íqű}=Řu+®’ź'ůzË5 /9ˇí~­3˙zŹxvĂŠ¨Ż›Üďç­om‹­ĎçbîuC~ÝOnHś(7†ł­8y%uëuĹń$ąľŻÝřDčę.:¦‹:ib,_Ô×Ěź3,cµ­Űrůđ3Káĺ뚣aîĚŠúáRź_IŰ޷⺲m¶ľµ-tý,nşúr/ZF„ÇĆáH'ąčÖâʦ»óÁĺŚéµ1÷‚şXľ¨1ć^PSÎŞ‰žĺʦ»cíĆÇ5Ü\Ůtw\ńĹ•±űµNŤÁ \3˙âÔ€éé}n˛ęrüŘ*88F‹n{`Tż˙®}űăŁźą­ PŕÄ“]ńĹ•qű}ÍůpmĘY5qÍü9EçdźżőobŃ­ âN4»ö폺?řóž¶yűĐŕúď[(hŰdľŰ0ofź¶˝â‹+㡖âăí+ľ¸2>ëßÄ®C^´Ś+/áČD,ąÓ4czm¬Z˛°čť¦‡ZžŽek6Ĺ[oŠek‰ńc«!JÚkŢěžAŐî×:ŁeËŽ övy$&€łł˝#nżŻ9nľ~tV˘ě~­łčż’ŕ2łÍ˝ .–__|ĹdóSŰăöuÍńB{G<Ôútě~m|˙ަ!ýyćή‹ĺ‘ť—ÇÚ÷=Ôňt›UOť41żçKDOxÚ´ć‘‚§ÔBµ<ľŰ™V«—.Ś;Ö5ÇúÖ¶üăĆUGăĽYńĄE Ež+ľ¸2"">óć”|T>ů‘t#ůťľŰ–?ąoĺź.(Řl:Ů ;ýsőt˛=§Űžh'ý1pI µlŮ-[vÄ®î˛uŰüÔöük©Ô†îCýşK^óI­ł3·'šľöHź:Hż˙ű÷6ĹÎÜž¸ăţ–‚›$óf×|NóSŰ{VÁĄ®5×Îż8V.YP˛mvíŰw®k‰ć-Ű úŤ—ĚŠĹ /s%Íť]‹».‹»6=1$ŹŹŻom‹ő­O<‚6uŇÄ{A]ź×pRé×lÓšM1a\u|dZmLW?x>ăÇUÇc+o,úý®lş;ŢęęŽ)gMŚu·?Ľ+éď/Ľ¬ /-U7óf×Ĺ5ó/.zŤé]Óµ<ť˙¦Nš‹^ZŃaËÖlĘźŰűç‚ÁH^ç+˙tAL™41šÖcńkćωŠ/«xÜĎđKƉ¸yă® ăŞăćëcŮšžëęúÖ§K^S‹˝†ë§×ĆňEŤEŻ©Ĺć5ĹĆ™wmz˘ ëoNŐßřłXm\ńĹ•%űľRcÍŢĆŹ­Š]ÝńÖŰ•Ľ«–,ŚÝŻuĚuËÍëz·QŇgn}>×g|}ÓŐ—¸Ďś0®:–/j(řűnŮ–‹»6=[·ĺ Ú'ů,^xiÉëS±ˇ’Ď[ądA4?µ˝`î<ůC©ßá% Z:ôkś7s@ĘwV-.y!Lď×Ňű"ľvcĎEîű÷,ës1}ˇ˝#¶>ź‹ŹL«ÍO {ţí÷5ÇżěŽkçω+nXŐg0(ôŘĘ :âÝŻuĆ–mą8|¸głćäw?¶*ŢzűPčꎇZźŽć-Ű‹ţlI§ü‰ J2“ďQěwJ˙|É>˝ŮôÎÜžXtŰEŰmg{Gělďć§¶Ç÷+XˉíÚůç÷Ľl~j{Ů}gÓŹČű¸ťą=Eë¨÷ëîŃű“Ľć.ý1Ţ>”Ż•t¤ßźŢs¨w-^qĂŞřцů‰]±kŰÖçsńŁ +úÔK©Ż›´góSŰăÚů— vŕćëŁeËöŘýZgܱ®9ç üÔÓ]ÝűM÷îÇvíŰß§?J×GşN#"îąAqǑߙŰÓçgڙۓú~ą˘óSŰóßăľ×ÔM©­b¶lËĹ–mąh޲=Ö­¸®ŕk¦ćć§¶ÔŢ®}ű+:ě ÝgŻ[qťŕ’!‘Ľ.·>ź‹;úOŽWW5ÝÝçc¶lËĹEźą-žÝ°˘O˝Ą_·ĺú›u+®ëÓ?—»>4?µ=¶nËĹňE %÷Ý۵o\Őtwɱří÷5SfLrsfüŘŞ_ăćÍŠekzŻő­mEĂË^ę»6ő=a{g{G\ŮtwŃqO±yM%ŻńôśŞÔ5»ÜřłŘX¬\ßW©ąłëb}k[ělďek6ĹŞ%ĺCĎbăćróşŢcÍR}ćÎöŽřü­k7>ާËő™şş jşÜ5&ý7xtĺŤ}n’”šŻ§?ďLĎĎť:řü­ë[źŽGWŢč3„<6ÎIí­Ô¤a ŻRĘô…pńÂË"÷·_‰wž»?^ßüőXµdAţ®×7¬*ąér\^3N<»aEĽľůë=Ç#Ë÷ďÚôD,şí8|řp¬Z˛ ˙1Éţ$‘żkŐŰÖçsńPëÓ1czm|˙ަx㮍wž»żĎĎ6TŹ]¬Z˛ ŕNÜ5óçÄ÷ďmŠďßŰ”ż“w «;®úłoäŰmů˘Ć‚vKVľílďČŻŕäŐxɬ|-$wÄKŐăúÖ¶üç›Ŕ$Żőńc«bŐ’ńúćŻÇ;ĎÝążýJÁë®Ř¤m¸5}í‘?¶*Ö­¸.rű•xvĂŠXĽđ˛Ł5Ótwܱ®9żéú뛿ĎnX GnÄěÚ·żĎ©ě[¶ĺňÁ)gŐÄş×üÎÉç>Ôúôďʼnc¸ęX—z¤o0ŹŹ7­y$L4Ě›ĎnXďÝŰńc«bńÂËbů˘Ć1˝¶ß•˝K‡2Ô’Gm—/jŚg7¬Üß~%?&Lú˘«šîŽĂ‡çűŁtq{Ż›hÍOmĎżn{×wňąéŻß۲5›Š^žÝ°"®™?'tuçĂŞb}:¸,57¦ĚŽt(U?}ŕ󱩓&ć÷ľ,X'ŻÉäužĽľźÚ3s ăžĄ«7ĺ_ăs/¨+ه-şí>űC§çZIźRj,–ÔAąľŻRéş[»ń‰řŕ'˙8Ýú@´Y=y]:ŘëÝgűý’ńuăŕ¤Ďś{A]Ütd|ű}Í×äošľN$mÝ´fSźŻÝű‘üýľoSĚ˝ .tu—<´÷ßoŐ’ůďźľ6¦ÇÜ/á˝đŇŃÓě†ÂÚŤGWK®Z˛ V/]źüLW7]}yţŔź]Ýq}™ ÚňEŤq˙-ź‹úşž}I/™Up‡lg{G<~oSÜtőĺůŹąéęËóáF©;aIhŃű`˘›®ľ<;˛št(7g®Ż›\đ}¦ś51ćÍ®‹yłëňw˘Ön|"˙ł>¶ňƸůúĆ‚v»ůúĆXwdĚÎöŽx¨ĺi/Ţ“\rpĎ–mą’ŻóôŠŤd°S8akÎÖ[µ¸`ë©“&ĆÍ×7ć…;Ű;F|ĂňÇÇă÷6ŵ ÇÔIŁľnr¬^ş0˝ÚŮŢ3¦×Ćsß’? ˛ľnrÜËçňÓŢwę“kÎř±Uńě÷ĵ üÎßYµ8?¨LÁ‚bćÍ®Ë÷7Éăă•Ú˛-—źt,^xY|gŐ₍—ĚŠgľ%C-Y]®Üv.•ŚĹ“ńnOßîFÝhKŽsąmÍ„±ýŻnK^çÉëaŢěş‚C’súýys{ňűs6Ě›Źł©lÖ; _¶fS~|•ŚóŇc±űoů\L9«¦ –Ęő}•š:ib<¶jqľLVţ§ewÇ?ůÇqѧoŤek6EË–íš×ë3‹ý~ßYµ¸ ŕ+5ŻŰµoAź™nç¤ýç^PßYµ¸ /M®é4íˇ–Ł[Ő$×äëΛ]Źł©ěŤĹôß/™?$ßż÷üˇÜ€^rŇ؆R˛ lĘY5%÷[IřS.x)v*rzUÉŚéµE;¬tç±űµÎ˘_»Ôž›óf×ĺ/ôĺV´ µtTęQ‘k.Î.’A-'Żt}”dĄ7~ď]+éU™×ĚźSrĎ­›®ľ<˙şKľŢHiĽdVŃOě‹í 5a\u~ĺBzuyúzłjÉÂ’Ź±¤o’¬ouŁ€Ňnľľ±`‚UééăIX0~lUÉý2'Ś«ÎߤXßÚVqÝxIOVnĺeňu{.;s{ň5’„2éşąůúƲustEKń0¤aŹ× .)ĄĆ¬éľćšů Ú“Źé]ź óf•Ýă˝\HőĐ‘ľągEÓÂ~Ż=ĄĆâ3Žě;WLOdLy"™‘Ú—±XXTęő0a\u~,u «»˘kSăÁR %Ay±ů^˛˘ďšůsŠö ={;ö¬Îź|dçP™7».Ú›ż‹^–ďłň}ŕ‘őIYl[˘ţěĚíÉ·˛Ŕ¦ż.5ľž{Ańpv׾ý±xáe=×2{n§Wń¦_ÉŤËróőRמĚ’ßĎ ’ˇ#Ľ„!’^éLŚJI˙űÖl±Á^ú}ŤóЇ|ă+ŘWŁÜ$(ąďÚ·żâ‰č±Ř™Ű“ď”ű»ŰšL&‹MJ9ą$z”šxěÚ·?§µŘŞËtÝőWŻI­čꑚčýzŻt°Ţźôď\®Ö¦NšXrĺ&ôî“óřxĄ'ž¦of•z°·†Tß^}ťü˙”łjň_·÷ ÍG&”SÎŞÉOŇuÓ0oVٶHęŞw(Zl"UN:¸\µdŕ’ }J‡µúš7_ßßYµ¸ěk÷Ç%jşu뎊®Ĺę1]ÓŤófUÔ·ďlďđ”Á }Í„^ˇ\݇ö5ÉţśĄćkĹĆZI_˛%u°LąźéÚ†‹ăą‡o‰ď¬Z<ä{&NW«—.Ś7ţçÚxvĂŠü#Ůişşăöűšă˘Oß: úHĎŃú;O˘!Uýµ_ďqjrŤ)čęŽ1cŠß$<î·O/2/X%ÜĎ8=ą¶–0pěᤰ˝ĐŢ‘|üX¤W9ö7ČK˙{±Ő‘•<ú0\Ôô# éD†Kú{´lŮž?±®ż6.v'—kćω­ĎçzÖéőzXźZąQtŁôÔŕŁÔꍂÁÓş‘«‰üĎUÁA•"Ô;„ąľźiĚ‘ß[]‡ĽĐ(+y|| §Ź'ˇ=?í,»ç\z´u[®ě©ÄéšHúö-ŰrůúO‚řąGoK«Űş-—ŻŁdwzBłűµý×[ýôÉѲeGɉ^%ŹCn}>W0ůŰú|{E§‘Ă`UŞO@_Sě5ýB®#ĽÝť?¸\@”_ýÜO˝ tŇăÄô)ČýŽ)Ű;*şľ0LăťÔ8l°Aňî}©yX?O­”ťV^&Żß=ŻuVĽoj±ůŢř äR_7ą ˝¶lËĹúÖ§óăčd_ĘR§­÷í·+?O"]ĂÇ2ŻŰú|.?ĆÝş-»_ë,ůdcúýý…ÂsgוÝ+ű®MO”}R1ů›»92t„—śôťĺ Gî¸čęĐť­rß_g4š§Ž g0:/´ďé3¨¨ýߪśě/™ËÖlŠ·Ţ>µ¶ĹęÔ 'L${Aövśl*]Qyŕm.ú7ÓÇÓřr{3‹ąÔĹ íÁEň˙ÉJ‰ąłë˘eËŽŘú|.®m¸¸ PIďw™žıŢ`›{AϤ)9uÖ ăoîÚřDÜžÚ[ş`ś<¶*¦LšXQPTz,]Uö߇ëúÂĐKŹĂűšŘóÓÎükk¤ćG˝÷k­ÄÖŚ?Ń’ě_ą|Qc\ŮtwţF`Ą}Ú@‚ş‚кȼ®żŔůŽuͱvă%Ż1őÓ'÷ Ó!r×~űůöĹ;„—śÔ晼$ťÉ@&ýĚmů Ćş[®Ó4嬣–UK¬ü,g Ź0qâI˛ZßÚ­[wÄęĄ=űÓ¤÷ęď‘đd 5Ň7ҡýh´ŰŁ+o¬p0_ĺ…FEŻ©u·|.żeŃmÄłßRňc×ĚźSQŤöžäôçÚůsň+A“kA2ÉHúů¤˙OçJ+?¶Ş`‚6~“›c .×­¸./™}úÖŘýZg,şí›:ä˛néęMűŘÍ˝ .ćή‹)gŐÄ”I=|lŮ–«xĺuń/ŰňřŇ0ofţ†Ň@Çeۦ૾ÁôaéˆŇC-Odžď¶ĹřqŐńXcŔ©“&ĆÍGĚžŐŤ•ôků»ĄÄ¶Ë_\™Ž“ rî캨ź^›ßú塖§ű„—éqíc|şč±•7fbőěÉDxÉIŢQΊekzND[˙ݶŠ;Ľt8ň‘içôąčö÷x[ú.ÝHwbýí™UF"¸H_ô“ë R7-Ľ,Ö·¶Ĺ®}űó«“’Cf¦śUSňő”®»ţŽ ńŔ(<Ž=eRMÄó=BuĆP+öřxĄź7Ôęë&Ç”łjb÷kť«\Ňű“%+0“Io˛ďq@ň(x¦”›Ą1¬ąÔĺ÷ Lá]ÝzlFÓ®}ű Ě{üަ•>ľŰ_¸9~lµľî8Ň8oVţZ»văýn?’–>X¦Ô>’/´ď)űzHćG•ě›l;2ńÔ@¶`hٲ=ĆŹ«ŽÓjŹůćŐî×:óóĎJĂát[ fÜşkßţ˛O7Ąkx OAĄOđľfţś’‡P{T?Ŕöw )¶J6=?Î5f¤9°‡“ZúĐŹô…°?w¦:Éd/Şô!?č' ¬ôŕŚáRîĐ‘¤ ŇT:9+uęsŮIfŞclîçóożŻ9®lş{P§ßqbŞŻ›ś\%Żźdđ›>‘ĽO@Ş»ôˇĺjbüŘŞ!¤ EČ1P¤Ňßůʦ»cŮšMťş ‰Ţ§Ź'Źń•z-ö·‰}óSŰăŠ/®Ś;Ö5řńϤƛ·lĎOJÓ5śIźśü,˝'ľ}T?u“|Ť†~)¨ÔĽŮuqÍ‘GŘ·lË9±”ăBú˝›Ëś6^ęĆ`RsÉ ĽR’}z×LĄcʵĎŹ)íI7ú/™UĐTz@bĎŤmůąK©…(ĺńMĎ˙*Y…›>ś­Ük'Y]śţ}*íSšźÚ˙iŮÝqůVÉŁćéĐmíĆ'*úśô „JW';¤¨żž1Ŕ•ĎéżW©ŕ˛ÜX;飋]Cşş‹.řČüaŃ­Ģ[0oBÂKNz«–,Č˙˙UMw÷`.şőŁ˙_PW0PJز-Wňëčę.¸#=űíÝqKńŽ=·'üô~ ˘żÓ¶lË•$ÁnďN~¸ęüäl}k[ÉÁĘÎÜž¸c]s4?µ=väöxá’—ěOײeG4?µ=˙+÷(OúĆņﶕ F¶lËĺkb Ź"˝đRńLwíŰź˙š#=1Hę°ÜdmíĆÇŁů©ín!}=Oź>^Ş®’kţ®}űËrM_{$¶lËĹ׿ýř€Wť$!äžoĎ÷[˝o>$“;ďo‰]ÝEřJ×Í]›ž([7•žr<°1ĘÂTݶŘĂŹăŕ:pô©ťRŹTöŚ…ź,úo7-eýwŹ<9 ]®m¸¸ ľ«ź1Ţ®îhúÚ#ţćŮ"Ý·óPËÓůNď3=PĄľ~zNŰwŢpq~ R*X\Tâ`ËŢó‡RŻ©-ŰrńPëÓńPëÓúí!$Ľä¤W_79`čęŽ+ľ¸2Ýú@´l9‚ěĚí‰ő­mQ÷y$uüŘŞ>{]Ţ|}c~’qUÓÝ}îČěĚíÉ?–LJFCóSŰcŃ­\đ·lËĹ7¬Ę˙n˝W­%wÜvíŰßçs[¶жó‘}‡Z·îČź¦š|Ť›^–o·+nXŐgµ×–mą¸ęĎľqt`ëäU &*§&#ÍůÉK“‘ĺGIJÉě=ÉIŠÇŹ­Šĺ‹*{„©ń’™G~kéSgźşaU>|xT&ÉcX;Ű;âŠ/®ě3čşkăů­4fLŻÍ?ľ •Jďo•Ü[¶ć‘X¶fSź|®lş;?ŕżéęËKÖó†Ö¶Řú|®Ďk9™€čę.yzqúŃńb˙žîŰÓuÓ{"2śu3a\uÜdB~ «;®/1ˇ‚¬HŻpľłČŤ˛d,ś®Łô*ĚúşÉůkČC­OÇ_\-[¶Ç®}űcë󹸲éî˛7×–/*‹÷ivíŰ_0/÷”#߬[q]ţzwŃgnËĎÉŇZ¶lŹ+›îŽ+›îÎ˙×­¸®ß§cÝö@A_‘Ěű’ńßÍ×7Vd§ű°»6=ѧ;ĐŐ]°ŘeńÂË ľnzüŮ»’±l2‡L÷éíĽJő}ĺ¤çťK×lŠ+ľ¸2îÚřD~N–Ôز5›˘îţ<˙s•[ÝŘräé†ô*Ĺäű$µV¬ĎLÂÁ)gŐTĽohď9iĎ5¦ď‚śdN[*ŘlĽdVţ:•<Ő·őůÜ‘ĹŰă˘OßZvUeÁßď†U}>v˶ܠćôĎž— Ă’ÉGr§¤”)gŐÄc+oěłjr¸ęxüަ¸üČ čʦ»c꤉1嬚‚“L+íd‡ËŚéµůß1é’źmüŘŞ˘ű-_řáýů IDATÔÍOm?r˛óŃĎMŢŞ% ňmا͎췷kßţ¸ü =´˙ަ7»îH€Ľ0ßů/şíXtŰ1ov]ě~­ł ÓÍv#›’Ő»ë[ŰŽĘQÁʧdĽč¶ňˇDRŻé×Ýř±UńŘŞĹŻ’ľfţĹńPk[ĽĐŢ‘Ż•©“&ĽŽ[yc~ô‘ľÖílďČ·ŐEźą-˙;ďl?şRtüŘŞ¸…ČśôéăĄÜżâş¸˛éîŘýZg¬ÝřD¬ÝřDźţ¨§žćôŮűlŢěşüžcéţ¨÷!AÉI Ň»_ëV–şnô®›ş?řó¨?˛fşnfLŻ–şiĽdVţwIwŹ¬ŞŻ›\đzMę%™ě'ő˝|Qcţ†ă®}…׊ŐKĆ[owÇúÖ¶˘O3Í^s/¨+8(1uŇÄxlŐâ¸rŮ]q «;–®ŮK×lŠyłëŠŽĹGň€úwmĂĹ1a\u|ţÖż)¸Ć—2~lUÜËçúý;&ăÄ‹>s[Éľf ×Ődľ÷B{GA6a\uÁëőšůsňJ–¦ű”ôç6Ě›YĐ˙Ő×MîÓ÷MWŻoţzĹ}Éş×Ų5›â­·•}R0=˙+vC.©ńťíůy]îożS'MŚĆKfĺç„ĺúĚd>=Đ•Ď×6\k7=‘go}>—_홌ß{ĎKwďŰ‘ęó[ucţď×üÔö>dNŰ{7ý÷KćűĆUGýôÚ>óÖűoůܨ|KÉ×ęë&ÇłßRđX\ú‘ę†y3ăŮ +FuEÓă÷6ĺď8ílďČ˙l×ĚźŹßŰTôw›:ibÁçĄ;ýąÔĹă÷6•=ŐqŐ’…źŰÓ!ě)čż·©`ݰôÁH3¦×Ć÷ďm˛ŚŇ‹ ę´ŇÉȵ Ç÷SŻë¤^“Ď5óçÄłß2 Ŕ<ą‰qMę1ŢŻăŃś,ÝËçbÝŠëň˝äw>ú¸ýś˛×8¨¤Ę=ľ—î+§Vާű٤ż˝żÄ׹˙–Ďő9x«ĎD-F[U9uŇÄ‚˝¶ĘŐĺý·|.[ycţăw¶wäë&YYQŞ˙Şşőř8Ç‹űoů\~őä®î|H˛ł˝#ć^Pßż·)nľľ±ěŢsIÍ5Ě›™í'×…ţš7»®`,žŚ)“ëÄŚéµńŘĘŤ)3Şń’YŃŢüŐXĽđ˛’›N9«&V-YíÍ_­hLuÍü‹cÝŠëbüŘŞ>}ÍŞ% Jö5ýŤőŇ+}“~!â芻R_·÷řłŘç~gŐâ˘s¸t›čęĐž­×6\íÍ_ŤkćĎ)ٶăÇVĺ童N*6ŻKß°ĽéęËăű÷6•ě3/ĽěĆšé9c2ŽMĆďÉ8ö¦«/Ď˙ŽľŰÖçď÷ÜĂ·ÄŞ% ~ŹÓkcŐ’ńÜĂ·Ä„±ŐĎ’ë\ţIŽ#×97G†ÖĂŁńěÇŤ\.kÖ¬‰ßúĐŘř“†˙í¤ůÝwćöÄ·t’ąsŇ{čŃÚWçöűšów¸ßyîţüĹ>éhňłĄ?o0í˛e[®ěçőľ3>ض?žÜ´îŮü˙ó›ßTŹŁ ÷ën(Vř¦żć„±U™ ÓµÔňtüŕůö2©¦ě^q?~ioDDŚWu\ŹĹůkîpĽŽeŢ8ßo¨ÚäX绕Ôřpö™ąĆ,]˝)ĆŚ‰1­ôŐéÓĆçš·f†đ€Q÷ÖŰÝů=ćÎ,®¬ÝřxţńĚJö¸†ÁHďΉ%9ô«ń’YEWI¦O‘ź+Ě á%Ł®aެ¸ýľćxëíCqĹ «bŐ’…1eRMLţPMěůig¬oiˇ›3¦×Ú·’!µkßţŘóÓÎx««;żObýt[śH/™•?ě룟ą-–/jŚÓΉńăŞű\cćÍ´Ş2C„—Śş©“&ĆŞ% cŮšMq «;>ëßý¸äĐJ[·ĺbŃmĽď¦«/Ó0'ôiá»öí/yŤi7sŔ91Ľ„—p’rVMźSá K®m¸8ćή‹µź­Ďçâ…ÔÉĐ3¦×FăĽYV\2,Ţz»»`î´|QŁŁOŕkĚëšcg{Gţ3czmÔąĆř»gŹđN˘‹´Y7uŇÄX˝tˇ†`DÝtőĺqÓŐ—k“äceĺńĺMd‘đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@&ť¦ €“É »ţ-^}ó†(ŁfÜűâüɢúý¦Ś¨GőŁË+ź!ŐýλńŐ˙öbĽůö;ŁŚŞ÷ť‹.źÓ&ťˇ1PŹęőzdííěŽuŹż¤!*đ~|J\rţ5ęQ=¨ňŘ8CęŐÎnł úĹŻâ‡íű5ęQ=˘QŹŚôëěťw5B…vĽŇ©PŹęFť•—0J:»~®@=ę‘QôUťc=GÚ›‡"žŰűž†@=ŞGČ á%Ăć·ÎڏiÎé"ĺuľw=ó+ zTʍG ˇÉ€3«"~żîT Ń«źŰ«PŹę˛Ăm “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix dŇiš€JýüżŠ˙őZWŮŹyőÍn UˇC´'¨GőHv˝úfwŚ3¦âŹE=CZ/ť]żÔkE=ŞG†Ö±ĽFÔŁz¤üśŹÂKбŠżnýg 1TíŮŮ­=QŹę‘ăŘ˙űĚŤ EĎľ´?ž}iż†PŹŚ2ŻőĂÍcăRUUĄ ĂőX]]­a`ŐÖÖĽ]SSŁQ`ťyć™Ć¬Ŕ¨łň’˛ęęęⓟüdtttTôńÝÝÝńꫯj¸ 'Ĺ˝äśÜŞ««cÎś9ęQ=’ázlhhęęęŘż`«˝ÔŁzdŕ&Nś—]vYÁűfÎś÷…ęQ=2qţŮE˙íÎŤĎFDÄg>ů;1ĺg¨G†ÝĚ™3cćĚ™ÇüuÔŁz„J/9î,üňßĹî#ťĎ?Ż»¦l‡đÂ+űăß}éôű5ëĎ›76ÔÇg.ýť>˙ö©żčůü˙ďÎ˙#ć~älPŹ Ő#”´űőń…żŢśűçź]¶FvżŃwn|®ßŻ{çĆç˘ţĽ‰qď˛ĎÍ…äó?qţŮC–€zTŹÂKŽ+-?|9?1‹Ř°ů'ýŢ=;ÚQM*Ú9íyŁ+vľĽ?®˙ë'cçË˙+}BCző¨apőřŁW""bĆąă…Wödž'RqŔ˙Ą«/,úţÝŻwņÍ?‰ť/_ţ»xćż,ŇĎ Ő#d™đ’ăĘú'’ďHîÜř\lx˛ňÉŮ÷ľü‹ľ˙ŔŰďÄźÝ˙÷±aóOâî–˘ácçYAęÔŁz„Aą»ygDDüŐç/ţÝ—ţÇ‘› V´«\ÝŢŘ8#>ţ'ŹĆî7ş˘ĺG/Ç5—ţ®Ćő'§ŤsÜ8đö;ŃzäÎŮŤ ő1ůăz:‹ľ|L_wÂŘ÷Ç}riLţŔ¸ŘđäO46¨GPŹęlëŹ_ŤÝotĹ䌋ą9;ćôÜžúŮ|ěőSŢoćWJ·üđŤ ęNÂKŽI'3˙ŁçĆ„±ďʆ#ťĎú!šLÍ=˛áňî7jlPŹ Ő# ĽŹÔ]R7 ;·ŕýÇjĆą#"â­ź˝Ł±A=ÂICxÉq#Yňźt:‹ë#"˘őGŻÄî׏}BeRęÔŁz„Á*XÝ8٧.?z^Ś˙ő÷ Éjčżq_DDLů€C@@=ÂÉCxÉqaçË˙»ßčŠńżţľhřčy=ťÄĎČßíşëČÄmĐł×ĆŹ_錰ź¨GPŹę¬ĺG/Çź˝“?0.úpĎjčžÚ<ÖŐĐëźüçŘůňţřÄG&ipPŹpŇp`Ç…»›_ž»eé“Ünlś_řëÍńđćÜ NA=đö;ń÷/ľwn|.üěť˙ëď‹Ď|ňw48¨GPŹęä-=ő¬~N×ă†Í?ÉŻ†.wPČťź-úţÖ˝’Jfś;Ńá  á¤"Ľ$óŇKţ?siáÄ©áŁçĹźýúßÇź˝ëźüç˛GuăÝý~ŻżúüďUtň¨GőęHě~ý`>ĚHöťMÔź÷›1ůăbĎ]qWóβ7îÜř\Ůďó‰ó'Ĺ#ńď58¨G8©/ÉĽô’˙ŢŹ¬%K˙7lţI<Ľ97¨»^3Îť3Îť_şúB3PŹ Ő# X˛EĂ”Ś+z’ńÔž{Ţčęw5ô—®ľ°čű'`\Ě=˙lµęNJÂK2ďáÍąŘóFWŮŐ![üjŮĄ˙ÝÍ7jLPŹ Ő# [=î~Ł«ěj­ţVCéę‹4&¨G á%™¶űő±őÇŻFDϲüR^xeĽőł_Äťź‹űţäR ęÔŁz„ŃňĂ—ó{Ă–{„ôÎŤĎĆţqß WCęNVÂK2-Yň?ă܉ń˝/˙DzťĎťź‹Ö˝Ţ~§ŕĐ@=‚zTŹ0\’S‹>z^ź-ŇŢúY}üŕ÷ő»PŹ@ˇS4YVę ‚Ţ’PüěťhůŃËÔ#¨GőĂn÷ëóőŘđ±sË~lĂÇ΋ńżţľč˙ @=G /ɬ–ľ»ßč*|•2ĺgÄü#'É}ŁĺŤęÔŁz„áŻÇ#AÉ䌋†Źť×ďÇ'5›¬†Ô#Đ?á%™•,ůź˙Ńs+zĚ-ą»¶óĺý±óĺŐ€ A=ŞGVwŮ¡áŁçVôńÉji«ˇA=•łç%™tŕíw*^ňź¸ćŇߍ?»˙ďă­źý"în~ÁÁ A=ŞGÖzLÂŹţVA'ęĎűÍř«Ď˙^ĽőłŁ«Ľ¦|`\|éę ŹégI>ĘĆůĂ Ő#śp„—dŇ„±ďŹîćüyŻm\TđöÜŹś=¨Ż“v¬źęQ=‚z„łżtőEţĽĹŤőoOůŕú:iÇúů Ő#d™ÇĆ€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™tš&`¸üۡď˝ôž†HŮ˙3ízTʍGő¨ÉŠy3âOZ©!@=&ĽdŘĽy(âďrżŇ ő‡&Nś¨@=¨óŘ8CŞşşZ#ThÖ¬Yő¨QʍGFXmmmśy晢UUUńńŹ\C Ő#Śş1‡>¬J;v쎎ŽLýL/ľřb:t(ęëëăôÓOőꧦ¦&fÎśi2ËIYŹ‰ŽŽŽŘ»woLź>=jjjÔ#ęq„üň—żŚ_|1"">üág˘_TŹĐŁ»»;|đÁ¨©©‰«®şJŔ(×bUUU\uŐUú%eÂKNxßúÖ·â™gž‰sÎ9'–.]ŞóQÖÖÖ>ř`DôÜI^ştiÔÖÖj·ß~{ěÝ»7""fÎś7ÜpFڏçž{bÇŽńÉO~2,X Q@-ÂIĎcăśĐŇÁeDÄŢ˝{cőęŐŃÝÝ­q`”tttÄŁŹ>šűСC±zőęĚ®…­_L‚ËžŐ ßúÖ·4 dŔ“O>™K""6oŢ\đ602věŘѧsąś†Q$Ľä„ž ĄË„FOGGG¬^˝::Tđţ$ŔěěěÔH0Âýâ3Ď<Ź<ň‚QîÓ7öŇu«o„‘ÓÝÝ]ô¦Ţ>hţŁHxÉI1A»đśSâ?ןš[€ Ł31K—żvZÄ˙ýżźżvZĎż:t(ľńŤo¨KO>ůdź~ńÂsŽ7oŢmmm FAwwwÜsĎ=ů·'ť1&~ŁŞ°oFĆ>«ţFUäÇ©ťťťŃŇҢ`”śú—ů—©8‘ .?=óÔ8{ü8łjLüřőžm^</ľřb\xá…™:¬Nä‰Ů›oľ=Á›ćśż]3&~÷§DŰî÷Ô% “¶¶¶řö·żÝ§_śńˇSbď[‡ăŤźőĽçÎťQSSc˙Ya÷߼ňĘ+©ţńôńˇÂľ±»»;Î?˙|ŤĂÜ_~ď{ßËżýů OŤßýŔ©±}_O-ľňĘ+Q[[úЇ4Ś0+/9ˇ” .ŐZ #­»»;VŻ^ťßg/ .Ď>cLDDś}Ć>u™^ŰD,9+"â·ÎŚ‚~ńÓ3OŤIGj1˘gʼnýgaäôŢçňÓ3O‹3«úöŤVGĂđęěě,ŘşaîÔ1ńŰ5§ÄG>4&Î˙ŕ‚ů¦ą#Ś<á%'Śţ‚Ë„FV:¸ŚřĂźš.KŐe{{»CDŕőŢCoŇcâóžVđ1U§Ź‰›>^`:@ FF.—+¨ŃOMë JŇ}cz{‡G}TmÂ0Î%ÓŹ‹˙~]፾ô6G雂ŔČđŘ8'LgSIp™đ9Ś\mţÓ?ýSţí˙\j\T{JÉş¬:}Lüä_{ęrďŢ˝ŃŮŮ3gÎÔ0@˝÷ťtFOHYuú>{ú©câ‚IcâéÝďĹ»ďEĽűî»ńÜsĎŇ?üá?~ĽĆ„aĐÝÝ_ýęWăÝwߍ#«˘gťÖçă¦Őډţ×ĂŃőNOmľüňËĆ«0Äž|ňÉŘşukţíĎ_xj|pě)ýäÔßĎîíŁţô§?őř8Ś0+/9î 4¸LX #[›˙ńĂĄËÄĽs W™<óĚ3ńä“OjL€d«†ôáXźžY<¸LTť>&nšSx€–Gă`řÜsĎ=5Ú{Utş6Ó«ľöîÝ[ôTr`pzÄó©i§Äo×ôŻţvÍ)1wŞÇÇa´XyÉqm°Áe L---±yóć‚ÚlřÝĘjsƇN‰Îî}{ęňĹ_tT( .{ŽŐ{«†bÎxĎZĎżÚłSźĂ×G¦ÇŻ_ühá*ŻbµyĆűŹŽW÷îÝ«_„!rĎ=÷Ä믿=O)|vöi%?vęoډç÷˝?·g%ô믿^xˇF„ Ľä¸u¬ÁeB€ C«­­-{ě±cŞÍ:%^Ú˙^ü[Ϣ§ CzŽń§żWYp™č’óôSÇÄ9ă Ż®®ŽóÎ;OcÂ0óŘ8ÇĄˇ .!‡ˇŃűdăó?8fеůů Os 2 ŔŁŹ>Z\ţçúS\–ę CŁ»»;îąçžüŰżufᡠýůĂź’ďmíÇ¦ŁŁ#Z[[óojÚ)ő™˝oii‰ÎÎN ĂĚĘKŽ;C\&¬Ŕ„c®]»6˙vĎÉƧĆé§ŽÔ×KI*‡@…}cąĂ±*íŽř—7 Cĺë_˙zţńÔ_;-bÉďť6 >294$˝µĂÁŐ% ˛<ł–{\Ľ·©żQxVGG‡'` /9®'gC\¦'kL¸ädăäÔÔr'Äé§öěÁ÷٧ C)Ź<ňHüŕ?Čż=wę¸lÚ±÷ŤÓ&î?kź=ĽîsYŠý/ahęńţá""Ůúô¨ŔT/ą‘жű˝č9ôÇăă0Ľ<6Îqc¸Ë„GČa`’ŕ2}jęP—‰3«Â)ČPB[[[źĂ±ţđüÓ†ěëzć©qá9G‡‹>ř`´µµix€\.×çńÔb§d¬š®ËG}Ô¶*0€qkşżîÔ8łjŕ_çě3Ćħ¦­CŹŹĂđ^r\©ŕ2=(`B˙ş»»ă[ßúVap9ç´! .ÓÄt€©&ah÷-çÓ3Oíł˙l.—ó€ űÉcŮç˛ű_ÂŕÇ­ézśwîŕ#‘߯;µ żńŤohd&ÂK2o¤Ë„ú¦O6N‚ËÁR‰łĎřáľ5 'ŁŽŽŽ‚ŕrŇc†µoĽéă…ć=÷ÜcĄTŕž{î)¸Á÷ů ‡fetŐé=5źľ©÷裏jp(ŁĄĄĄ`ÜúéYÇľ-XşďÝ»wo´´´hhöĽ$ÓF+¸LŘJűĘWľRp˛ń˙uÁińŰ5cFĽ&"ÂÉf¸ö-'9@ëů}ďĹĎßµ˙,Tb¨öą,Ĺţ—Pą\.ßţö·óo7üî©ń»ż9fHę0}Ŕ]{{{Ěś9SßClĚáÇk˛¨wpYΤq6ďŘÂÄ˙öŹďĆÖ]••Ă9çśK—.Ťęęj(ÔgűÉƵĺ•÷âżżř«üŰ˙řÇăłźý¬? 'ĽÎÎθýöŰ VrÝréiĂ\¦˝zđp¬m{7~Ţ“›FMMM,_ľ\˝ärąXłfMţíOM;eH/ćῊçööRUUK—.`BJwwwÜqÇů=)Ď˙ŕ![ťXŰöËř—7ŹÎoľůf Cč4M@VU\FDěëŠř_ťďÓćç•—=w¶;::˘®®ÎŠ“NďŕrҸűyÄ÷^zŻĎÇž˙Á1Çôů«Ç?ľ^Ľ6í´Č(Ď<óLÔÔÔDCC?'ôäëßřF>¸Śč©˙ç{ďöůŘ3«"Ď9}P‡$µ÷_ź{7ŢtĘ1­Ś.öT™U=7#zžH>}zĚ™3Ç…“VďUĐçpL|äCĂ·ÍĘĽsO‰^űUţńń{îąÇÖ*0D„—TlćĚ™qĂ 7ô{'ąµµuHżď™gžŮזּ¶V€Â ďŹţčŹĘţ{KKË×ßüůó=účěěěS?ţéářńOŐçc˙.÷«řĘżř~íÉS •xôŃGŁ®®.jjjüq8)=řŕ[­ÔTŹéłÍѤq=7ăÇ?}/Ă ?f­“?Ľ§łł3ZZZbÁ‚ţpŚ„— ČĚ™3ű=Ý{¨ĂűYp"ůů wUČS éĐN6±cÇŽ‚÷myĄřv*+.řJčWŽ˙úżę÷ă6oŢsćĚq#á%Ŕ1Ş©©é÷鄡Ľą×ßS Ó§O·ę*đf÷{qfŐŔV_ľđÚ{Fđ`ô÷tÂP†—žJ€ŇjkkËŢLhkk‹7ß|sHľWą˝ź§OźnŐ% á%pB)w3!—Ë YxYWWçF łS4EÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Iş… IDATx d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%IÂK “„—@& /€L^™$Ľ2Ix d’đČ$á%I§i˛Ş­­-žy晲łwďŢaůŢ«WŻ.űď555ńŮĎ~Ö ` /ɬ|pÔľw{{{żs饗Fmm­?Ŕ0ńŘ8™5mÚ´Š?ö×N‹8űŚ1Çôý~ëĚĘ?¶ŞŞ*jjjü‘†‘•—dÖýŃĹęŐ«  żđśS⣵}CĘß®9öţ¦9§Ç«ǡ_.x˙«#ţű‹żĘż]UUK—.Ťęęj$€adĺ%™U]]K—.ŤsÎ9'˙ľçöľov÷„•é˙†ĘŮgŚ)řşU§Ź‰żËő .=.0ü„—dZ±óŰ;ĎvĽ7ěßűŐ‡cmŰ»ńów{Ţ\Ś,á%™7¦ŕ`ô /9.Śd€)¸Čá%ÇŤ‘0˙˙öî>*Ş;Ď÷ýĹ đ˘@´FŇŠÝJeFs[â4šŽćÄMLśŐ® dťîĚ1ęŠFWǤŐ)t:D Đ´Ą€$j¸ŔŢTQUĘĂŢĹűµÖ¬–]»j˙öo>~żżÁ%€u^ÂVz3Ŕ$¸°ÂKŘNo—ÖCx [ęÉ“ŕŔš/a[=`\Xá%lífL‚Kk#Ľ„íÝH€Ip `}„—×`\Řá%"FwL‚Kű ĽDDé*Ŕ$¸°ÂKDśp&Á%€˝^""… 0 .ě…đ+T€Ip‰`ďŢ˝*((PUU“°µh¦‘Ě0wîÜ©ŻľúJK–,!¸DDs»Ý*))‘$µ¶¶ę‰'ž`R¶Ex‰çp8”——ÇD ˘577kçÎťfpiŔÎ/Ŕćš››•źźŻęęj&Q,^ž–}Ř‘Ăň•—_}őO :1‚K˙Üůqć`%8¤Ła"„­ÖĽL?yJq>O €%Ő¤¤¨îö”^żNçŕňÖhi©+ZS’ŁT^˙1ذläČ‘#ňx<ć×·FK+ł˘5fX“8„—`×\öŐÚ›ôÂK°˝{÷jĎž=ć×·‹ŇĘ3$|Ĺeuuµňóó™<€m^€ĹąÝn•””_w\&8IşĘ¤"á%XXçŕňIQZę _q™#ÝwÇ ËlÜó9ó7Śđ,¨ąąYůůůŞ®®6ŹM;HK]×çO,‰Đ`°–› .$T^€…TUUÉív—‹ľ?XŮăů·&ŔŔCx QUUĄüü|µ´´Ç~ś9Xw9 .á%X@çŕňÖhéˇď\6ÂKčgGŽŃÎť;‚Ë•YŃ3,ŠÉ h„—ĐŹNž<)ŹÇc~Mp @ú ůoĚ#IwÜEp @;*/ Í™3GUUU*))‘$ť¨mŐoŽ]ŃŹ3+f!fOąxń˘ŢxăŤ.Ď1b„˛łłĺrązäš•••fUísĎ=gw»Ý:{ö¬rss•šš*I*..ÖˇC‡4nÜ8ĺĺĺńŔ á%ô3#¬ň0·6]ŃʬhĚâőzőüóĎwűyĽőÖ[=vÍĚĚĚ€đň±Ç“$ĺććšÇŠŠŠôĆoś ĽKČËËSbb˘öîÝ+Iú˛QÚz„ł§K’233µeË–€×*++Í˙óx|¸YuénOU}@¤ Ľ‹X°`ÍVă/Ą^aźŕú‡‰ťĄ¦¦ę…^ÇăąéđŇ*ýÉp!Ąq. ö€…dee´}Ą­óÜĄV&ç&”––JR—ÁĄ˙ë•••7}ÍC‡I ^^k€6„—`1YYYZż~˝bbb$u'j 0o„Ń.]»-űâĹ‹a_óx<şóÎ;Ą¨¨(Ť?^Ź=öXČ÷!eçk!Ş˙±˘˘"Íž=[«V­ řŚŮłgkáÂ…’Ú*3-Zd^{öěŮ×č|żŹ=öĆŹoŽó…^Ô¶YĐěŮłĺv»ůĆ` ´Ť€9ťN­YłFůůůjiiŃ×WdîB~—łëw:w©U˙Yg˙ ó«¦ďzäs‰ˇI’|đAóŘĹ‹ÂÂěělŤ1B^ŻWn·[ĹĹĹúôÓO5bÄ kfgg‡‹ĺeQQ‘Š‹‹Î-..6Ź˝ńĆZµj•†®ěělUVVšaf¨ëΞ=[/^Ô¸qăĚóźţyUVVęÂ… *..fc ¶Ax e;věPCC$éťŇ«’6ŔląÜŞýă&ĎO¸ ±3·Űm®7jTŚĘƢ˘"ł-<ś'ź|R[¶l1ż~ăŤ7$) ô—ššŞáÇëoű[@dW›ő\O5ć“O>˛JŇřüÔÔTósŠ‹‹5|řđ ŕŇŕrątöěYŞ.Ř á%Ř€`îÜąS%%%’ÚĚŻŻ´­3$*č=‰‰‰Z°`Á€ž7Ż×kn¨®m|Ö¬YJMM5˙×˙˝•••>|x@yg.—+ ˘Ó$‡đy]µ‡‡«Ć DŻůľĘ.\¸đšU•ětŔN/Ŕ&‡ŮÎl'j[µµéŠVfE‡ 0:˙ Ď»+\›wgť« C’/^ ąăy¨křWcú‡źťďéZUťťmćT^°“ALŘK^^žîąçóë/Ą­G®¨ĺr+“ÓIwB˝pŚŠÍ®*CUY†ZďŇ8–™™yÍÍz®š†ú|C¸÷\Ľx±Ë÷€U^€ -Y˛Dąąąć×_6J˙úÇ+:w‰ąńםb†bě0îßVn’ת°ôŻĆ t†s¨ő4;_§3cĎĚĚLľ)Ř á%ŘTVVV@€ŮĐ"íţËU&ĆŹôÝHxi„gĎž `ËăńH’ŮÎ/u§q˙jLˇÂOˇŞI;oÔů|că!ZĆŘ á%ŘXVV–žzę)ĹÄÄ0ť!źtcˇÝÂ… 5nÜ8UVVjőęŐŻkѢE’¤çž{Î ݵ{řŤVcv~Oçő0Ťđ´¸¸XŹ=ö*++UYY©^xAłgĎ6ĂWÂKvCx 67qâD­Ył†ł“pAâő(**ŇđáĂĺv»5~üxÍž=[wŢy§>÷Üszţůç®éVVVšááµv7Ţ®˝;Üž©©©zë­·$ÉëřńăőüóĎTç˛Ţ%»!Ľ€ŕt:µfÍ%$$0~˛łłÖŁĽ^.—K^ŻWO>ů¤ącykk«rssőé§ź—á®yńâEegg„ţçúŠĆą]Ť9Üëyyyúâ‹/ôÜsĎ);;[O>ů¤ţđ‡?´´Sy Ŕn˘™ N§S6lP~~ľęëë•••5 çcŐŞUZµjŐMNjjŞąá͵„ 3].WČŤtŠŠŠ‚Žĺĺ儍ݹ'˙]ŃSSSĆ`|ýŕňCŔv¨Ľ€âp8´aĂ-[¶L%%%:rä“áĽ^ŻFŽ©;ďĽ3äëƦB7S ý…ĘK0GŽ1«S§NÉĺrÉáp01ĘظÇŘUܨ°,--Ő–-[TYY©ĚĚĚ.+:ŔިĽ€â\ŞŞŞ–ššŞçž{NR[[ą±YĎÂ… U\\¬ĚĚĚmë`T^@„Řąs§<ČD @Ď?˙Ľňňňäv»UYY©ĘĘJÍš5K.—‹vq¶Fx Ŕív«¤¤„‰ŔBmÖvGx 6ÖÜÜ,ŹÇ#Ż×k›>vÎ]jŐ——Z™ €­±ć%ŘTssłňóó‚ËĄ®ÁЉ&¸Ř•—`CőőőÚ±c‡Ş««Íc÷Ý1Hó'frđl¦ŞŞJůůůjii1Źý8s°îrRL,„—`#7\ľţúëLŔ–/Ŕ&Ž9˘ť;wšÁĺ­ŃŇʬhŤĹä⸅ż`ô€ 9rDʧŰÁĺÔ”Áş•ž¬Ű‡EiJ2˙™űăW[°¸˝{÷jĎž=ć×·‹ŇO¦G+!&ü{˛ÇRöx‚ €˝^€…ąÝn•””_ß>,J+g VĚćů/Ŕ‚š››µsç΀ŕňIQZę¬!¬c/Ŕbš››•źźŻęęjóŘô±´Ô5É (,Bp @*/Ŕ"ŞŞŞTPP úúzóŘŹ3ë.'˙Î/ŔŞŞŞ”źźŻ––óÁ%` ă·b°Ż×\.ú>Á%üf ŕrąc~˝ű/WőIŐwL `@#Ľ p:ťZłfM@€ůN)&``#Ľ‹p:ťzĺ•W4věXóŘ;ĄWµű/W™Ŕ€Dx âp8´fÍš€óĐßémď&0ŕ^€ĹćŚ3ĚcG«[ő›cWÔrą•  „—`A‡Cyyyć‰ÚVm=B€ 8/ŔÂňňňtĎ=÷_Ů(LŔ€ÍŘCBzş†ÄĆvëÜşŇŇ^Çĺ¦&5”—_óřĐřxŤ0A’tńĚ}ÓŘČîӒ%Kät:ĺńx$µ/Ľ˘•YŃ3,*ä{}ńťöťĽŞŻY*`c„—61mĺ %efvűüŞĂ‡őů®wUëőöĘ8ęJKő»'W]óřČ´4Ý»eł$éŁU«{|<čyCăă5%'GǶog2,$++K’Ěóë+máĚă5—z†Ăá`ĐohŹPΙ3uď–ÍrÎśÉd Ű’].=řÎŰĘxd1“aAYYYZż~˝bbb$uźT}Çäč rą\Lú •—6ôďłf‡ţ#=]·ÄĹiŇ#‹ĺĽűnIŇŚ§¦ż––ö[»¶Ż®NĄnŹůgXŰh—KCăă™ s:ťZłfŤňóóŐŇŇ˘ŻŻHď”^•$Ýĺ ýďQO=ő”&NśČäl‡ĘËŇP^®ZŻWĹ?_ŻŞŹ?–ÔÖőęó÷ޣ¶9­YłF;věĐéÓ§%IG«[%`"áe„ęjÝ¡ńńĘ~éE%…X€?!=] +VhÂĽy*ŮôZ@Ŕu#n‰‹3ŻsK\\Čă ééš»ůő 1'¤§+kÝÓť™©’×^ 9ÖPďKrą”är©|ß~Ĺßž˘¤ĚLŐzKŻkÜ ééĘ~é%34ě<Ć5ÂŤÍ9s¦f<ýł ±9gΔsćL•Ľö óţCŤ-}ţ|ÍxúgAÇăR’•ńČbMwźţ¸acĐî펤¤¶ĎŤŠŇÔĽm ň’îtijnnČ­»Śŕň[źOe»ŢŐ_˝^Őz˝íU)š’›Ł¸ääcKHO׬—_’$}ëóéĎŰw¨ˇ˝*ÎßPÁ¤Á?¸ôŐÖęضíşPQˇË>źś3gš×ľwËfýÇO~2`1a‚’23ĺ«­Ő OˇY 9vćLe,~XRŰfNFxyćĂőWŻWćݧ´ymóöŃŞŐmchßh)}ţ|sNŤgUëő*.%EÉ.—ůĽţţ‰eŞ:|¸ß6‰¨ňňň$É 0„—(ŮĺŇ”śóëĎßíhOź?ß .KÝw»Í×ĘËŐP^®ćş:łjđ–?ˇ#›^ëŐńŤŹáÚĂÂE˙óIŇŘ™w„SssĚŕ˛ó{Ťć{·l¨öě®ôůóÍŠËăŹĘ~»Ë|­Ö땼^5ś>­˙ň?~-I‘–đ~Łţ[źO­Z4¶ęÇÍ÷†š‹żb™$éBy…>Z˝: ,ß·OU‡ëţ_ż©¸ädM[±\żk;N¨÷×z˝úÖçSf^®†ĆÇ+ŮĺR­×+_MŤ|55íW}ŮąŞÓÄ/”W|OřjjT^SŁo5ëĺ—44>^Ł33Í`}'//OßűŢ÷äńx @D Ľ´ˇÎ­Ŕ’äHJV|J˛b“’ZťËv˝°áć’ÔVŃç\ú+ß·OŁ3§*mŢ<ĄÍ›§?oßŃ«UtU˛zĐWSŁ ĺ™žBŤŹ7«Ëv˝ň˝ ĺĺ:îńhÚňĺ×=žoUWZÚVué\vţüo}>ݧ„;ŇÍăq))f»őqŹ'ěŘĘv˝kV@ú›0ホjŇmŰBÎű7ŤŤ:á)ÔŚ§¦$—K éé!ŻSęv‡|őáĂć÷Đčöđňz´*tKrŐáĂ:¶}».ś.7«5Ń÷˛˛˛$) Ŕt:ťL Ŕ–/mhj{{čµ”ízWǶm3żŽKI1«.+Ú۳é:ü±öv]ç¶vß6ů‚ŽŤÎĚ4˙\ÝŸÎě˙đ†Â˪ÇŻyżCăăĺ«­5çÓŕśywŔ†ŰţáĄ|~ëóu*úż–t§+dxů×ŇĐë|Ţč:¦ľš%ef*!=]?Ú˛YeżÝĄż?† {Ń·˛˛˛”¨={ö(++K‡IŘᥠՕ†ß|ĆWSŁ Ş:üqĐ®ĎqIIćź/\#ŔňľF¦§÷jxŮ|ťUz#ýĂ®ľžŞMĘĚT\JŠâ’“42=]#ÓŇCnä#ICbăžE8áÄŘögÔÚÚjîö}-ţ×ěŤű7÷Ę9s¦ąŮ’´Öy˝Ş:|XuŢŇ›Ţŕ =gâĉr:ťŞ®®f2¶ExiCż{rŐMĆeźŻË×űrł•®Bľ›őMcc—;݇—’˘)99a7ýńŐÖę–¸¸ 55ý[Čo„QÉiěhŢÉwştÜÝ7Ďé?~ú¸¦­X.çݦţAfCyąţĽműu·˘Łçťź.TT¨öSŻšëęÔpú´ĘËőŁ7¶(ÉŻ…]jk÷önÔ…ňŠ€–˙®|{Ť ş'ůjjTüóő/çĚ™ť9Ő¬Ć4ćîŢ-›UĽ~öôŁ˝{÷jĎž=ć×UUUL Ŕ–/˙käé]VÇ%űUý5[ló˙ń$w±áĚŤ—’4ő±ĽŽMs¶o»Žcě褠c—ýÖčŚKI [UÚy­LC]i©’23őm“ĎŇŐ‹ß46Ş|ßľöŕ_SBzş&-~Ř\'uÚňĺ„—ýńłŃܬ‚‚ť:uŠÉD„ALÁŔaě-éš-ÉŁý^o8}ÚR÷áęŤîâ>ś3gŢĐç•“U6¸rÝËşO;Ć–ÜĹŘ’î ýšv&efvľĆĄ¤hÚňĺĘĚË „ö¤ˇńńĘXü°¦-_ňľĘËudÓkŞúřăöń%ó×ÇŞŞŞôěłĎ\" áĺcTĂ9ďľ;lč54>^“~HRŰÚŽVŰ„ĹWSc†d“~Hq))!ďaJNÎM]çŰ.ÖýśřđĂ!Ź7”—ëBy…$iJNNȲ«±ťńŰ~JnnŘëOÍÍQĆ#‹»˝óüÍú¦±QÓV¬PĆ#‹5aŢ}74gč=ÔË/ż¬––óŘ’˘€í^0Ç=…főĺÜÍŻUŃĹĄ¤¬÷xlŰvkŢÇ[nImAŕýoţJióćih||ŰZŚwß­űß|ó†«˙|µµ’¤±aŢŚĹ+3/|°h¬U—’¬ßy[λďŰďĽ¶Ş˛Öë5w“7*;ź›áמ]WÚ{;|•«Ćő+öď—$ĄÍ›§ŚĹ‡<ß—q.z—Ń&ľsçNóŘ­ŃŇŠ5k˝ěŹ5/_MŤţĽ}‡ţaůŻ{·lVCyąąű¸;y©ŰcŮu ĘËUňÚ/4ăéźih|Ľ˛Ö=tNĹţýfv=ŽmŰ®Y/ż¤ˇńńú/˙ăת:|XĘËĺHJV˛ËĄ¸”důjkuˇ˘BλďZű˛Öë ۬&vňřIDATyąË±]č>ZżA÷nެ‘éiĘxd±2Y¬şöVůؤd3”˝P^ˇCë7ôčĽúŹeÖË/I’Ęv˝«c۶éĎŰw›óL[±B“^¬¦ş¶ wH\śôújkőçí;řaëeUUU*((P}}˝yěöaQZ9c°b†D©Ľţ;& `{„—Půľ}j8}ZÓV®PRffPuá…ň •şÝ–ßpŸŹI‹ŘńÚü7^V>¬cŰ·kjn®n‰‹“sćLł ń[źOĄnŹNľűnŰń»ďV\JrĐĆAĺűöÉWSŁIŹ,Ř}Üqcl—;íţMcŁ>Z˝Z˙°ü óśÎk”Vě߯?oߡoz¸M»ęđá Đ7áŽts\˙ńÓÇ5mĹróľ;W·Vě߯ăžÂ۱mŰ52-MqÉÉšš—§©yy!Ď+yí–ß ýçzŰÄĂiűĺ_öDx [ú¦±Q{ňe<˛Xi÷ÍÓČôŽő-żőůTuř°Îě˙°_ÖdôŐÔčź>®‰?¬ż›93äŘŽ{ iGXě&@~†­•ývWźWVvÇ7ŤŤ:îvë¸ŰÍCB·Ýl›8‘†đ, §ÚÄ$„—ĐĎh 4~3€~äőz‚KÚÄč0)`E„—ĐŹ\.—rssÍŻżľ"ýćŘíţËU&0ŕ^@?ËĘĘŇúőë•`;ôĹwúšËjha~á%X€ÓéÔ† ”™™iű˛QúšË:QŰĘ$ÂK°‡Ăˇ'žxBŹ>ú¨yŚ6rŔ@Fx 3gÎÚČÍ€őmän·[ĄĄĄ’:ÚČ—ş˘5%9Ş[źsîR«Z.3źD‚s—ŔŔCx e´‘věX­]»– Řmă`3ěF(/Ŕ†Ś6ňĚĚLóŃF~˘¶• DÂK°)ŁŤüŃG5ŹmäűO}ÇlŹđl.Tyy=Ő—ű#Ľ€ŞŤ»#Ľ€ŞŤ;#Ľ€ÓąŤü¶ŰncR¶Í@ä1ÚČëëëĺp8€-^@„ŞŻŻWAAęëë•››«¬¬,&`+„—öîÝ«={ö_9r„đ`;„—AŞŞŞäv»U]]ÍdlŹđ"DçjKěŽđl®ľľ^;v쨶Ľ5Zš’Ű!Ľđz˝r»ÝŐ–#c¤źLŹ[m™ŕ$é*“„3$ŠI,Žđ,,\µĺ?¦FiţÄÁ]ţÇVBŚô_§E«úoě8t–č [Ŕ:/Ŕ˘ÂU[.u Vzâ n}Ć”ä(MIĚdl‰đ,¦ąąY{÷îŐÁŽw§Ú€HBx rňäIy<Ő××›ÇnŤ––ş˘5%™Đ0°^€„«¶üAR”–ş¨¶ L„—ĐĎŞŞŞTPP@µ%ť^@?:yň¤^ýő€cT[ĐfSý§şş:čج .á%ô«9sćhĆŚǶ•\Őîż\UËĺV&0 ^@?ËËËÓ˛eËc;ôĹwú×?^QyýwL`Ŕ"Ľ pą\zĺ•W”™™ikhˇ 0°^€E8=ńÄ!«0·ą˘s—0 á%XŚQ…yÇwÇľl”ţőŹWôáiÚČG4SÖăp8´víZ-©Ł sţÄÁşďŽ® č_üżGĹÄÄhěرL"ĐC‡˛˛˛ —^€Ť8ťNmذA{öěŃŢ˝{%I__‘v˙ĺŞNÔ~§»˘•üľ±cÇjíÚµL ŔVhZ°`ÖŻ_PMiTa~RĹŽä€Č@x 6eTa>đŔ汯ŻHď”^ŐoŽ]QËĺ(& `k´Ť€Í-X°Ŕ\ łˇˇA’t˘¶U'jŻ09[łUxYꍦań<5–Ô×oמ8q˘ąćď˙{ "Ř*ĽĽp["O Âp8Z˛d‰\.W@&veů5/ďĽóNžŰq8ývmŁ sĆŚć±Űn»Ť‡°ť¨ÖÖÖV+°ąąY^ŻWőőő<-¶(—ËŐŻ¦áäɓޮ®–ËĺRb"Őë{±|x ``İ"ÂK–Dx Ŕ’/Xá%K"Ľ`IŃLě ąąY{÷îUUUUź^711QsćĚ‘Óéä!ô1ÂKŘ‚×ëŐÁűíúyyy<€>FŰ8lˇľľľß®ýŐW_ńú•—°ťż›ś¬ż›śŇ«×řŰWM:Q|ŠÉčG„—°ťa·ę6çH& ÂŃ6Ŕ’/Xá%K"Ľ`I„—,‰đ€%^°$ÂK–Dx Ŕ’/Xá%K"Ľ`I„—,‰đ€%^°$ÂK–Dx Ŕ’/Xá%K"Ľ`I„—,‰đ€%^°$ÂK–Dx Ŕ’/Xá%K"Ľ`I„—,‰đ€%^°$ÂK–Dx Ŕ’/Xá%KŠf Đ×ęëëU__Ýďé/---:uęÔu˝'11Q‰‰‰ĺőzUPP`«1WWW+??˙şß÷ŔhÁ‚ÎáŁâ4Á5V;ßŘ­ó›/}­–ĆoĚŻcbb´dÉ8ŔMjmmmeĐ—š››•źź`ţýŹ2ôwßO±ĺýüíĽO˙{ç˙Ń•oŻHj .׬Y#§ÓÉø ´ŤŁĎ9­YłFcÇŽ5Źýźß•é˙űKŤíî…ŕ ÷^˘_DB€Ip Đ»/Ńoě`\ô>ÂKô+;—}đýÎN&Á%@ß!Ľ„%Ř!Ŕ$¸č[„—° +—}Źđ–bĹ“ŕ ^Âr¬`\ôÂKX’L‚K€ţEx ËęĎ“ŕ ˙^ÂŇú#Ŕ$¸°ÂKX^_—ÖAx [č‹“ŕŔZ/a˝`\Xá%lĄ7L‚Kk"Ľ„íôd€Ip `]„—°Ąž0 .¬Ťđ¶u3&Á%€ő^ÂÖn$Ŕ$¸°ÂKŘŢő—öAx‰Đť“ŕŔ^˘Z[[[™DŠććfĺç竺şÚ<ö÷?ĘĐđŃń—6Cx‰*ŔŚľ%šŕŔf/‘BÁ%€ť°ć%"R¨50 .ě…ĘKD4ŁłľľžŕŔf/Xmă,‰đ€%^°$ÂK–Dx Ŕ’/XRt¨/^Tii)ł OdffjÄÇ‚ÂËĘĘJŤ?žŮЧľřâ Ą¦¦š_µŤWVV2Kú\çl2ş«“căš0)•YĐ+Î|^©¦ĆćŻu^N”Ş× 72zĹÓ9/ęÄŃĎBľĆnă,‰đ€%^°$ÂK–Dx Ŕ’/XR4S¨˘¬R˙y´LŁÇŚŇčŰoSZF*“ô^ŻĽ|:çEÝźńOzlîĘkžű«W=ş?ăźôâŠ_öȵëÎť×ŰŰv_ńĐ:ÝźńO:ţÉgÇ_¶@+Z§_˝ęŃK+~©•­ëňüľňöö]ň]j <¶m—îĎř'˝ţLĺľ©Śg^äů€ź0ܰ^/ż8yVR[řŃîC]ž{¦¬íÜ©Ó'ßôuKŐʇÖÉ×úů.5éLYĄ$i¤qçhß9óµtůb=3?ěů}ˇîÜy=6wĄŠ<(nXlŔkź÷Ď®ç™Oąk2?a¸a˝Ú6^wîĽ|—šďPScłŢ˙÷}şwQvŘóOm«lśĐ­ÚE…űä»Ô¤ “‚?k“gŁ$‚ÇŰŻ=wa¶ţů™\ó¸ďRSČóűÂńO>SÝąóš"Ě]ş|±ćÜoąđŇxć’hąŔMéŐđŇhł6ÄG?ÓńO>ÓÔyíŐŤ’Bľ~˝ŚężÎá^ܰ؟oT}v® w~_8Ń>ˇ®oŐ`ĐxćS¦Su €›Ó«áĺ™öÖć¤1Ł4ĺ®É:qô3~2Ś;aťˇ+ +Ę*ő§ßSݹ󊋏UŇQšłđŞ!Oý, úŻŮ׬G?3Ă´Š˛J5űš5~â8Ĺ ‹5«=Ť˙ŤŠjűóčŰG)į óýů.5é`ŃĺklRÝąóJ3Jsf+į÷ęü)Ó'ÍGÝąóúë—çu¦=€UűŚ1Ż;â!CĚ’ÇtćóJťůü¬&LgŽ+ÜĽúßź˙<'ŤĄ˙ďů×Uqj<ó ăĚű­řĽ2ě˝óňĹÉłaď§«ű­;w^Š©éRł|ŤM×|°ŹŢ /Ë:ŞgĚ™¦7ăúÓÁŽ`,ŕÜĎŤsSŽű.5éĺ•ů!7Ëy{ű.­ţ—˙¦s§›ÇžÎy1ŕś§s^Ô„Iă´m÷k’¤—Wć«îÜymňlÔ„Iă‚Î76ŔٰuŤ’ĆŚ 8ß?xűh÷!ýzSaĐF:ďîÓO×ĺµÇżůjˇŠ Co`3ő®ÉZżuŤţęUŹţtđXÇ}nŰĄ·%m}o“â†ĹęŔîCz{ű.=3? Ě«(«4Çk(9pTR[ý&÷†  ňÍW uâčgÚúŢ&˝łýÝ 1ľ_¸O˙ĎG˙˝Ű¦ńĚ%iĺCëĆ"IŹŻËŃÂÜűŽŘ}Hon*Ô”é“őZáĆ Ď,*ü@ďî şßŹvŇćg7,z{Ű®×€˝ôę†=ţkXĆ ‹ŐŚ9m!c¨ŔĎ„hóö]jŇşĽ—tü“Ď4aŇ8mňlÔe˙KoŘjn¦łůç˙fdĆú”?ś3MRŰú•›<µú•ećëĆąFąÉłQćĚ7Ż˝ÉłQ›<5ĺ®É!Ď—:BłÖÖV-]ľX[ßۤ­ďmŇÜ…Ůň]jŇŻ7„vooŰĄ˘ÂďĐęW–ië{›ôÖ­Ú°uŤ&L§ăź|¦×ýB¸ĄËëńu9’¤Ń·Ź2ÇdwťŰńĄ¶ŕň™Ľ—TwîĽ~8gšŢ:°U”ý/m}o“&L§3e•Z—÷RŘg´ůç˙¦#ŽjĂÖ5ćýLÔV=ů~áľë~ćďîÓřIăĚ{]ş|±$éÍM…AA´\‡kĎďÁ;î·îÜym~¶@±ńóűbç˙űsŢB]öŇkáe¨5,ŤđOż?T±hěčíżćäŰŰwéLYĄ*ź“4f”ţů™\ýpÎ4ů.5™a¨±>eÓĄfIŇŚ9Ó4ő®Éfč׹5=řüéšzW[ksܰحěuçÎë×› %IO˝˛LKW,VZFŞŇ2RőÔ«ËĚŔĎÎęÎť×ŰŰŰĆ·ÉłQ÷.ĘVZFŞ’ĆŚŇŚąÓőxűć@˙y´ĚĽ†uᄌqć f(ě7®Í?˙7ů.5éÁśůÚ¸m­YŮš–‘ŞMžŤŠŤwčLYĄY‰))0ÜkmŐ¶Ý›4cîtó~ڰą»! ˙3ź»0[·­5ďuéŠĹfëúN»ÎźąĆîäĆýúĎń sî7ŹÇ ‹ŐÂÜűÍďł’Gů °±^ /Ci©š2}rP5ź8fwuçΛçlض6dŰňÂśűÍs®f×rŁÂŻsHv&Ěć>ˇÎ/*ü@ľKMš2}r@»şáńgrµtů‶řMžŤÚúަ.7ŮqÄ9B^;­S˝ĆĆw¬˙řŃîC:SV©Ń·Ź Ř)Ýŕ_őj|®Ô‡›c_c[ŔÜÝ ‹ŚĎŽŤwčńgr‚^źŃ^ë˙Ľ|—šĚq„ZďÔ˙~C­cYŇľ>§żą łµaëš°ë|ŔzmÍËpkXÎ]”­G?Ó˘CZş˘­•جşôۡڨüáśi×Ü|Ĺ+ĄŽ 4TŘŞÝÚ˙úťĂÎPçkQ.lŻîë¬s•dŇQć8ŚÍq¤¶ŻîÜy3äí´N?ś3Í ŤŤĘQŘ[ď…—aŞď]”­··íRÝąó*9pT3ćNď¨2:Î=ŢŢu®< Ĺ?ôě¨â ~ß!Ćd”Ćîâ]ť_QViVůMéf5˘ďR“ŢÜT¨? n•—ÚBV)°şŃ­ÍŕjĐĘ óĂťăhl űŚŚŠČÎBµ§w癇«xl q˙f…nFčkŻw®ţLÓ¶¨±áĐ݇t`÷!Í]”Tů {ęµ¶ńpŐŚRG…ŕGEmAgÇąĆ:”]rF°;¬Łĺ:Üć/uçΛáˇU^Ç8Ç]óü¦Ć¶1…«ěĚŘpčŔîCrÄ9ô`Î|­~e™6y6šęźăźՆギ㿻q CWŐ†'Ú×Ô4ÂR˙{5ÇţëWv·ŠŃřĽpç´°Xa„±Ó»^ď2T€š–‘Ş× Ű7Xj˙݇Bîrűé•ĘK˙5,C­—8wQ¶Ţ޾ˬFějÍĂ®”üľ­…ŰXĎQ żůK¨Öt)üÚ’áΗ¶ŞŻ˘¬R_ś<«ńÇ)-#U%Ź™kQnŰ˝)(4ZÇ;ßű‰0ííRđzžÝ éęÎť7ďÇX§Óżâ´«ví)Ó'_÷3Çhą÷6_U·©!?łsĄ«ďR“ľ8yVޏŽ5?§Ţ5YS 7ęř'źéĄż”ďR“~őŞG·­ĺ'ŔĆzĄňŇßÂ_ţČĽůjŰÎÝţĐ„ú¬ÎJŐ™˛JĹĆ;ڞáÇʆnO×rî|©ŁťĽł_o*ÔëĎ!ťŃâ}o5 ‹ ? ŐA÷˘ĹÝΙo¶ďŽ>eúdó}×j×ŐžŢ˙ÍBµÇ´űęÎťWlĽC÷.Ęs¨ŕúťö]ÚýççĚçgőt΋z&怒ó§Ţ5YO˝˛LRGĺ.ě«WÂK3|Ë_IilÖó§öęÉÎU†F y čPPVQV©Í?˙·¶ĎYľŘ ¶ü«˙µ[ű·'wą¶dó§Ţ5Y±ńů.5U~´űŽň™băŰÚĂ%).ľ=p v~´űą›zĐZ›aŞõ¸!±RvľŽ±ůŃSŻ. zFáÖíÜž~-Çý6MňßIŢx^żn›.óÎëĎ­ëţ÷ëß.ŞÚÓÜń|źn›ë•¶ń/Âěí/iĚ(M™>٬|ě\á7wQ¶Ň™ĎĎjĺCë´0ç~Mȧ’ÇĚ@sîÂl-Ě˝?ä睏?“#)¸ÝZęzmÉPçKŇÂśűőöö]zyeľ–._¬ŃcF©äŕQŘ}H±ńmňl4?ë‡s¦éíí»Trŕ¨^¶@3ćLWSc“JSÉŁš0iśÎ|~6(h3Ő÷˙}źNýL?ĽgšŇ2Rö¸?ţLŽÖ徨’GőŇĘ|Í3M±ń±zżđ3P}ü™Ü€ÔxFáÖ w˙×zć«_Y¦ÍĎčĚÉłš0iśš.5PtHżzŐcľ6úöQZý/˙- 3>ĂD‹ ?Ř%\jkM÷đ­-î|©­bÔ×ؤ÷ ÷ŚiĘôÉZşbq@wZFŞVż˛Loľę1wĂ6ƹɳQuçÎkółA-ÎK—/6CĎ’GőĂ{ÚB˝pëy¦e¤j“gŁ6?[`ľÇ\Ź?“0.˙gŞ-Ľ«Ř»zć&ŤÓ˝‹˛ućóJ˝_¸ĎGlĽCŹŻË 4oňlÔşÜu¦¬Ň¬,ť»0[Ź?“Ł×ź-yż˙üL®ââcőŃîC**üŔlżwż°§¨ÖÖÖV˙ĹĹĹš={¶¤¶ čµÂŤý>H˙Ťm­ŤéŻ˘¬RMŤÍJÓ˝đíFř.5u´(_cL×sn¨{îîş“ţ÷.©Wďż'Çď??×;f«Ü/nĚÓ9/šŽřĂ4kÖ,óµh;ÜŔő†R}Qu7,¶Űˇâőś{Ł÷Ü—÷ŢÓăż‘ů±Úý ç b Xá%K"Ľ`I„—,‰đ€%uąŰx“ŻŮܦzZ“Ż9ěk]†—gĘ*őtÎ‹Ě €>GŰ8KŠjmmmí|pŐŞUňz˝Ě€>árą´eË–€c!ĂKčo´Ť°$ÂK–Dx Ŕ’ţč QZIEND®B`‚ceilometer-10.0.0/doc/source/contributor/architecture.rst0000666000175100017510000001617213236733243023616 0ustar zuulzuul00000000000000.. _architecture: ===================== System Architecture ===================== .. index:: single: agent; architecture double: compute agent; architecture double: data store; architecture double: database; architecture High-Level Architecture ======================= .. The source for the following diagram can be found at: https://docs.google.com/presentation/d/1XiOiaq9zI_DIpxY1tlkysg9VAEw2r8aYob0bjG71pNg/edit?usp=sharing .. figure:: ./ceilo-arch.png :width: 100% :align: center :alt: Architecture summary An overall summary of Ceilometer's logical architecture. Each of Ceilometer's services are designed to scale horizontally. Additional workers and nodes can be added depending on the expected load. Ceilometer offers two core services: 1. polling agent - daemon designed to poll OpenStack services and build Meters. 2. notification agent - daemon designed to listen to notifications on message queue, convert them to Events and Samples, and apply pipeline actions. Data normalised and collected by Ceilometer can be sent to various targets. Gnocchi_ was developed to capture measurement data in a time series format to optimise storage and querying. Gnocchi is intended to replace the existing metering database interface. Additionally, Aodh_ is the alarming service which can send alerts when user defined rules are broken. Lastly, Panko_ is the event storage project designed to capture document-oriented data such as logs and system event actions. .. _Gnocchi: http://gnocchi.xyz/ .. _Aodh: https://docs.openstack.org/aodh/latest/ .. _Panko: https://docs.openstack.org/panko/latest/ Gathering the data ================== How is data collected? ---------------------- .. figure:: ./1-agents.png :width: 100% :align: center :alt: agents This is a representation of how the agents gather data from multiple sources. The Ceilometer project created 2 methods to collect data: 1. :term:`Notification agent` which takes messages generated on the notification bus and transforms them into Ceilometer samples or events. 2. :term:`Polling agent`, will poll some API or other tool to collect information at a regular interval. The polling approach may impose significant on the API services so should only be used on optimised endpoints. The first method is supported by the ceilometer-notification agent, which monitors the message queues for notifications. Polling agents can be configured either to poll the local hypervisor or remote APIs (public REST APIs exposed by services and host-level SNMP/IPMI daemons). Notification Agent: Listening for data --------------------------------------- .. index:: double: notifications; architecture .. figure:: ./2-1-collection-notification.png :width: 100% :align: center :alt: Notification agent Notification agent consuming messages from services. The heart of the system is the notification daemon (agent-notification) which monitors the message queue for data sent by other OpenStack components such as Nova, Glance, Cinder, Neutron, Swift, Keystone, and Heat, as well as Ceilometer internal communication. The notification daemon loads one or more *listener* plugins, using the namespace ``ceilometer.notification``. Each plugin can listen to any topic, but by default, will listen to ``notifications.info``, ``notifications.sample``, and ``notifications.error``. The listeners grab messages off the configured topics and redistributes them to the appropriate plugins(endpoints) to be processed into Events and Samples. Sample-oriented plugins provide a method to list the event types they're interested in and a callback for processing messages accordingly. The registered name of the callback is used to enable or disable it using the pipeline of the notification daemon. The incoming messages are filtered based on their event type value before being passed to the callback so the plugin only receives events it has expressed an interest in seeing. .. _polling: Polling Agent: Asking for data ------------------------------- .. index:: double: polling; architecture .. figure:: ./2-2-collection-poll.png :width: 100% :align: center :alt: Polling agent Polling agent querying services for data. Polling for compute resources is handled by a polling agent running on the compute node (where communication with the hypervisor is more efficient), often referred to as the compute-agent. Polling via service APIs for non-compute resources is handled by an agent running on a cloud controller node, often referred to the central-agent. A single agent can fulfill both roles in an all-in-one deployment. Conversely, multiple instances of an agent may be deployed, in which case the workload is shared. The polling agent daemon is configured to run one or more *pollster* plugins using any combination of ``ceilometer.poll.compute``, ``ceilometer.poll.central``, and ``ceilometer.poll.ipmi`` namespaces The frequency of polling is controlled via the polling configuration. See :ref:`Polling-Configuration` for details. The agent framework then passes the generated samples to the notification agent for processing. Processing the data =================== .. _multi-publisher: Pipeline Manager ---------------- .. figure:: ./3-Pipeline.png :width: 100% :align: center :alt: Ceilometer pipeline The assembly of components making the Ceilometer pipeline. Ceilometer offers the ability to take data gathered by the agents, manipulate it, and publish it in various combinations via multiple pipelines. This functionality is handled by the notification agents. Transforming the data --------------------- .. figure:: ./4-Transformer.png :width: 100% :align: center :alt: Transformer example Example of aggregation of multiple cpu time usage samples in a single cpu percentage sample. The data gathered from the polling and notifications agents contains a wealth of data and if combined with historical or temporal context, can be used to derive even more data. Ceilometer offers various transformers which can be used to manipulate data in the pipeline. .. note:: The equivalent functionality can be handled more stably by storage drivers such as Gnocchi. Publishing the data ------------------- .. figure:: ./5-multi-publish.png :width: 100% :align: center :alt: Multi-publish This figure shows how a sample can be published to multiple destinations. Currently, processed data can be published using 7 different transports: 1. gnocchi, which publishes samples/events to Gnocchi API; 2. notifier, a notification based publisher which pushes samples to a message queue which can be consumed by an external system; 3. udp, which publishes samples using UDP packets; 4. http, which targets a REST interface; 5. file, which publishes samples to a file with specified name and location; Storing/Accessing the data ========================== Ceilometer is designed solely to generate and normalise cloud data. The data created by Ceilometer can be pushed to any number of target using publishers mentioned in :ref:`pipeline-publishers` section. The recommended workflow is to push data to Gnocchi_ for efficient time-series storage and resource lifecycle tracking. ceilometer-10.0.0/doc/source/contributor/index.rst0000666000175100017510000000230113236733243022230 0ustar zuulzuul00000000000000================= Contributor Guide ================= In the Contributor Guide, you will find documented policies for developing with Ceilometer. This includes the processes we use for bugs, contributor onboarding, core reviewer memberships, and other procedural items. Ceilometer follows the same workflow as other OpenStack projects. To start contributing to Ceilometer, please follow the workflow found here_. .. _here: https://wiki.openstack.org/wiki/Gerrit_Workflow :Bug tracker: https://bugs.launchpad.net/ceilometer :Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev (prefix subjects with ``[Ceilometer]`` for faster responses) :Wiki: https://wiki.openstack.org/wiki/Ceilometer :Code Hosting: https://git.openstack.org/cgit/openstack/ceilometer/ :Code Review: https://review.openstack.org/#/q/status:open+project:openstack/ceilometer,n,z Overview ======== .. toctree:: :maxdepth: 2 overview architecture Data Types ========== .. toctree:: :maxdepth: 2 measurements events Getting Started =============== .. toctree:: :maxdepth: 2 devstack testing gmr Development =========== .. toctree:: :maxdepth: 2 plugins new_resource_types ceilometer-10.0.0/doc/source/contributor/5-multi-publish.png0000666000175100017510000012337713236733243024056 0ustar zuulzuul00000000000000‰PNG  IHDRH – \ۉzTXtRaw profile type exifxÚUŽë Ă0„˙3EFŔć1N%R7čřÁq*·ź,8ťĚź÷IŰ 1H»‡Ąššx•žs·Ń«Nž.­–M‚),ĂY×G}ü/],ětu·n»í¨t‘Ş5C#•Çůwd­ý÷qż]B,!őaję iTXtXML:com.adobe.xmp :Čy˙sBIT|d IDATxÚěÝX”÷ť˙ű8÷ `Ô­@M˘Žf7¶F”´ŮŻ~µ'öČw›ž5=iµéµMÓä=Íi“~żi´ŮĆ˝šöĽökÎĆk·5—¸î¶!!1«1»1m6r”Ćřc€(̨ ś?Ćą™`aîç㺸2?îűžűţ çĹűóţ¤ôôôôŔÂR`u$ŔňH€ĺË# –G@,Ź€X °<`y$ŔňH€ĺË# –gcâĂëőĘëőö{ĽˇˇˇßcEEEýs:ť˛Űí $7É477Ëď÷«ˇˇA>źOÍÍÍQ‘á %ĹĹĹĘËËS^^á –ŇÓÓÓĂ0ôçóůÔĐĐ ††Ő×׫ĄĄĺ†žŹa*..VQQ‘ŠŠŠärąx“’ëF8®v*ďjGżÇ]WŰeďąöX˝­ ßvŤ§éőLK$>źO|đ<Ź<Ď ŰC`čQÔuVöž.ą®¶Ĺ휼2u>%S-ią:ź’©ć 9jIsČź’6ŕ~†a¨´´T%%%„% ‘%’` rčС·sv·©¨ë¬Š»ĎިëŚě=]7ôĽëmjIËU˝­@ iSن&yyyfX’——Çw9°L@âőzµwď^y<ůýţŰ8»ŰäľŇb†"c] ()g˘S-¶ÜČ×ätę®»îҢE‹hô @I477«¶¶6jµł»MĄWNČ}ĄEyW;ÇíuúRŇÔ6Už‰NJźÓďyĂ0t×]wiĺĘ•%ô‘´I}}˝jjjÔĐĐĐď9ÇŐNąŻ´¨ôň‰¸ö+|)i:”1WŻĄ«uBfŘs†aČív«˘˘‚é7\—tÉ@ÁHá•3Şđ˙a\Lź‰ŰxŘ t(cnÄŞ’’’‚”D‰×ëUuuuÄ`¤äň UřŹŤë)4#ĺKISmF±jŤ/ôkîJP°şqř|>˝ţúëÚ»wożçF"ŚW” $ŘŁdőęŐ ŔrĆu@R__Żť;wĘëő†=N02¸hAI^^ž*++U\\Ě ,c\$^ŻW»wď–Çă {ĽđĘ­÷˝ź”ŤWĹ—’¦˝Ć˝n„"n·[ëÖ­cÚ ŔĆ]@rđŕAíŢ˝[~żß|ĚčéR…˙ĘýőĽŁĂTo+ĐîĚ%j±ĺöŽ«a¨˘˘Bĺĺĺ  ©Ť«€d÷îÝŞ­­ {¬äň ­ë|Oöž.ŢÍ8ŘkÜÚoÚŤŰíVeeĄěv;HJă" ńz˝zá…ÔŇŇb>ćěnÓşÎ#–Z˛wÔĆ{B¦^˛/UăÄ©ćcyyyÚ¸qŁ\.H:c> ńx<Ş®®›RłčJ‹Ş:ޡj$ÁjŤbí¶/ {lÝşuLą$ť1DšRłÎw„^#ٍyB®^Č^¦Ö ™ćcLą$›1ř|>íرC ćcŽ«ťzčâV¨ąďGJš^ČZĆ”@Ňs‰ĎçÓłĎ>Öo„)5cĂ^ăVŐŘ÷ ĂĐ#ŹÖڵ˛ă•^>Á»4Ć4OČŐł“ËĂ–®¬¬Tii)—ĆD@B82ţ4OČUuÖRµŘr{ß3BŔ8uĂ’ľáŃÓĄG>ŻĄë8ŕKIÓł“ĘĂB’ŠŠ ­^˝šÁH2Ź'l*<Śu%%%ĘËË‹yűŽŚ‘B*I’‹×ëŐcŹ=Ć@WśN§žx≷O˝Q'J8’ě=]zäB­]éýkÂÎť;uđŕA Iś?žA0î µęÍv#NŇçóiÇŽ„#IÂŢÓĄŞŽwÂ*IvîÜ©ĽĽßÉ@ł†Śę›`8Zć˛ńÂ[„#I"’8®~aúý~=űěłjnnfpcÚ¨$»wď G*;ŢQq÷YŢ…$bďéŇCČčé’IŞ««ĺóůŔ5jÉŢ˝{učĐ!ó~eÇ;*˝|‚w ą®¶é‘ĎkͤĄĄE;vě``cÖ¨$őőőŞ©©1ďŻô׎$9×Ő6mĽđ–yżˇˇA{÷îe`cRÂ’ŕŠ5A…WÎh˝ď#oĹÝgµ.ä˝®©©ˇ `LJx@Ňw9߇:0ęRîŻ×˘+-aßô#Ś5 HöîÝ«††óţĆ oÉ~˝/¬ŁŞăseŻ×«ť;w2(€1Ĺ–¨÷í;˛ÚwŚk,*¸˛ÍS9wK’<ŹjkkU^^Îŕ )477›•râĎétĘn·3 ˇDę;Rá˙Łma®«mZç;˘Ýö%’K>»Ýnĺĺĺ18×jkkµ{÷nH°çž{Ž$TB¦ŘěŢ˝›ľ#č§o?’ęęjăŢűďżĎ Ł€&ß Ńâ^AR__ŻC‡™÷é;‚PUďč±Ü{ĺOISCC<ŹÜn7¤’“/ŮŇ Nz>?'u]f Ŕ¨{@ZjľčJ }GĆŢÓĄ ˙±°©6EEE”M#9ţ‡ę^©Ô‚™ 'W^˙Ősľ…Ł"®SljkkŐŇř‡ŚŃÓĄőľ#Ś0ú)÷׫đĘIUmjkkŔ ·€ÄçóiďŢ˝!‚˙¨ĽëK»}­÷őöm¨©©‘×ëeP7LÜ’Đìޫť¬Zą®¶iĄż>ěű€%.IßƬUď0˛T…˙Śë |=ŹęëëŔ —€$´‡ŤY+{O—ÖuľgŢŻ©©aP7į׫>řŔĽOcV Eéĺr\ďUÓĐĐ@/Ŕ 1â€$´1ë˘+-4fĹUřŹEü~`´Ś( ńz˝a˝GĘýôŔĐ…V‘:t*Ŕ¨Q@ú×ţÂ+gč=‚a+˝|<â÷Ła؉Ďç «aY_ŚDůĄzsE›C‡Éçó1(€Q3ě€$tĺŞG0Röž.•ű˙ńű €Dv@Z=Rzĺ#‰+żTńű €DV@ŇÜÜl6Ň4zşTz™€#gďéҢ+-’ €ëëiú Ă HB§?¸Ż â!4lŁŠ0Z†x<óvÉĄăŚ"âĆ}ĄĹlÖú}@" 9 ńx<ňűý’$ÇŐNšł"î‚UI~żź0*†ôý ÄSč÷ `4Ś( ˇ9+Á}ĄEŽ«ť’}H|>H¨!$őőőaÓk\WŰA$U$€Ń4¤€¤ˇˇ!âX ŢB{Ű„~ßC® ‰ô·˘®3ćm@˘ ) illŚř7{O—śÝ)\^ŻW^Ż—A$LĚIhőăj§ě=]ŚިëlÄď?$ż˝{÷ę±ÇSMM °Ż×«ýčGzě±ÇÔÜÜĚ€Ŕ(Š9 ťćŔôŚúXWmm­Ľ^ŻöîÝ«ęęj`ŹG---ňz˝zöŮg I` «‚„é5 ô!±®ŕjYR`©gB€Uř|ľ°ß‡„$0zbHBűŹPA‚Ń@’¬ŠFOLIčS٧KyW;9Ś ×Ővóöůóç #$X! ŚŽ’ЦήVF Ł&ďj‡y›i6Ö´ä¶ŰĚŰ„$+™óÍĘČČDHŁ!¦€¤ĄĄĹĽ=ĄÇǨaÔ„V„ÎÉ…u|uý×I–4mú =ř× I`”Ä„~0 ý‹>hƵ+ćmţ1`]„$«š6}:! Ś’’ĐlB˙˘$ZhCŕÖV¦wY!ÉŘfŘRµ ?S«ć:´j®Cn.ĐŞą•ąrä0Ň ÎětÍË5äĚNÖ8ĎË54/×aKe0‹ $€Ńaň?ÎBţ˘Ś&V±ÁW×M’tä˝÷$BIŞŞŞbpnŕ‡ýĺ®ÉZźőűÚ˘)jlóë_N´Ş±Íoů1[[8Eór }Üć×ĎŹśňx?Ľd†$éů#§OŔB‚!É‹·C—.]2C’GyD.—‹€8éĎO,ń‹©đĘó6)•$cÇŇé“ôđ’Z:m’Žśę¸¬ŹŰüć—ůsśk¶ť>‰€a˘’kĐ ’qÝó–/J·,ë˝ßŮ.ýű>éěÉä{'g/”2'IMǤÎĎ“ęŇěę2o{˝^ţJ*IĆ€˛™9Z[8%đôîkŞ;Ů®şćvů»Ż…mgŘRUćĘQŮ̶Tm_ V•Ăä˝Ô­ýÇ[Í۬‡JHśA+HBéĐżäŹi·Ż–^ř´őźĄu?čýzŕ™ëŹď“ f&×;ůŔÓëť˝0éľI]ÝmżamT’Ü8#M«ć8$‘珜Ňţ­ý‘ŕóűO´ęů#§Ěç7Ü\Ŕ S«żKűO´j˙‰Vµú»Ŕ˘¨$€ÄHľo+6Hór 9×,íŰ!íţIŕëß÷¶ąe™ôß˙MĘśĚw0Ž’ÜćSj~uô´Z.^tź–‹—Uw2ĐäŰ‘‘¦Â\€ $€řł%ÝU=řoÝËŇßţu˙ç fJ˙ă` yŕ™ČŰ7n3şFo¸qě\ç¦ĘÔ5·«lfNXo’ µES4#+]ď~vQ‡?˝ 2WŽćĺfČ™ť.GFšZ/uéđ鋪;ٱR%¨0×Đr×d92ŇäĚNWëĄ.µ\Ľ¬ŹŰ.©®yŕUŘ [Ş–N›¤ů™ć5¶\Ľ¬S—µ˙D[ÔŠŤáî´0?K·OË »ÖŁg;#Vĺ8łÓußő©Mżm<S8 ůC¦ŰI·,ë­ ‰|ś=xîo^–Ęî' I0 ó3ÍۇO_Ňľţîkúţ›Ç#>çĚJ7WvŮps–N oćęČLëY:-[ĎnŽ’DŰĎ‘‘v=„ČÖŻŽ}1°pf§ë o’##­ßăÎět-ť6I»>:«Ăź^Ë~Ak §¨lfNżs.›™ŁĄÓ'éů#§ÂBĂ–j†0,ó €HFîĂ҇oKçN*Jú6m-)•m4y :{2°_ÝËýŹ·îc˝±+ŇÜł±7¨ycWď>3Ą?»§7Čy·&đ\ß¦ŞˇÇ+»?0mH lÜg¨ĘîôfÉś8NÓ1iß ă˘ˇk޵Nóö¸n B’dH¦ô$Ťmń˙y 6sý¸ÍŻşćĎĺďľ*Ă6Ae®Éš—kČ‘‘¦µES´ë?ĂWT[[4Ĺ G‚ű¶t\V^†Mór ­šë3;]߿ݥ-˙Ö°¶T=Ľd†8ě?ŢŞŹŰýjąxY…ąv­š›«YéÚ0ż@ţ®k:z®cČűEjL;/×0CˇŕµJR™+G ň3eŘRu_á=?ÄĄ€’’@‚’––ó¶ëjűŘľš¦Ł˝·żµCŞ~4rĐůąôäW"㞍©7}ݢ@P±ú!é{_ěh|ř¶tó{Ă sżeŇśE°cëľđľ'·, l˙ä=áçéx皥|W äX±Aúéý±…3Ő2}›·ŢľZşçˇŔq><0¶’«˝ÉHćŐľňĘ+zýő×ů©'$Ař»Ż 8Őe¸ [ŞęšŰő›†óaŹ=ס->[Ž ›ćgi—z’Â\Ce®@ĆáÓ“`0qŞăŠľ±đ&¶T­šë;ţÚ˘)fČńÓw›Ă*6ŽžëPc›Oß_:SŽ ›–NË6’XöŰňçłÍ•|"MG:v®S/=öXc›_ß^2Cór ć2l© ëńÎăńhÇŽ @Hq3h}nč_ěí=WĆöŐt~hĘŞëaĆ %e÷ǶjÍ-ËzĂ‘ęGĄŻNęýŞ~4đřě…ýC)Pm˛bC lĄ+đ<—{6‘¦cpĺ«“ˇďÂŕÇŰ·#°ýĆ[űžk?ĎÁÑߖş5p¬JWŕ<3'÷6´µ€ŕ‡eŚÁĆsC IhÜš8ó®Oď85Hď gvşY!í+šŕ2¶}§©ôťZş˘Nß`%4°8|:°™+GŁwJL°ňäđé {zř»Żéđ§äďľöÚ±ěě™m:Ě?5F>ßĂź] KôwđŕA’и†'ů¦ŘĽô}©ł=P…‘992ł'{§¨„V›·Ű·CŞy!üąšACŮýR~”@!¸ZNčąÜľ:Půá» ýôë­úřđ€Tó‹ŔyF[š·îĺŔ1‚šŽz¦lÝ8ŹÝ?î?=(Ôę‡Çn:^1Óůyďy®űA ±íOďOúovżßĎOü8’““Ł/Ţą|XűRI’xôżX[8eŔD’ľ]űqżÇ>nóG­–đ_í}Ľ0×0+2†Í *Ş´x÷ôE3ÔČ˰©Őß¶šÎ»ôT .­úúĂŮ/TëĄî¨ \YÂwpLąD˛›óÍúÓ?űła‡$T’IoP±ď…@ŕq˲@ß)P)±úˇŔׇúOSůŰżL…‰žHŇŮO®gV”}OľKzăőźÓt,đߨˏű?öáŔ~ł—ľAN¨?[řďľ(Űě{!ÜľÚrßř?ţéç§?É’$ĆÇm~ÍË5nHUC´Ş•`T×ŔÓPĽ—şÍŰórËŚa^ÇŚ8\?!Hü|ă˙ükÍý“?a €ëI`x’·Ikççđ  ܲěú´•Ż‹[–*1úö öăČś!ňg‘[ľŘg @#Z°2Đ9$Zuȇo_?·Yďl2{óŁźsĐě…C?`Ś#$I¬zcü ­—{÷Ď3ŇbIB§ű8łÓ‡®Ŕh!$€ˇ4 )**2o×Ű T1–Żćo^|ĐéWC„öąeYo@˛bC ©«ĆňnM`ąÝŕżë~ř ů14N=w2¶cýbăŔ˝J¤1]=âOťh޶ŰíüÔbČIâçčąN­-ś"IZîś|Ă’ˇLS‰4-(tc€ľ*†-U3˛Óuęâeů»Ż…M÷Ę~0ÚI v©Iu5™9˝Kç&ŘO$´r˘ęéŔ«•ľ÷çj“7v…L»É˝khe™ŕTźŕ5DsîzÇň`Ŕ3ĐW,Kß ÍzÇť_ä.V·‰_ \ Ć™ťn® s#ťę„ ň3Ün^Nďtścçˇ÷íKMa®]ß^2CĎ,ź+gvú°÷€Őm0}j>Ä ą’`Qvô•a‚Ę6„ďS0ł7,y·&ň>ÁfŻŁ%RĐS0ł··H¤¦°ˇ‚U!÷lŚüü-ˤĽřbŠ ,€$>~Ópެ†X5×aV” İĄĆ´˛Íp=;śŮéZ:}RÔ×/›[[/u›SbZý]úřz‘˛™9Q«Anź–¶ďp÷€…ÄZ–ݱDĎ<ľIż{ĺďthßßëźţçĎthßßëĐľż×ÎçźŇú{˙BŮY™ ”¤˘ąłô‹§Ó/ž~LEsg y˙Mßüş~ńôcÚôÍŻ3$cĚľ˝B2'÷.…Ű÷˙-ˤ˙ńv hđ]čť^:%ŇŞ.ßÚ1pUG"ü—>KgNîťôáŰO‹ .|űęţaKćdéëKýţűľ1]A’Ś-ţîkúŐŃÓfHR63G[ţ|–VÍu„ †-U ň3µáćmůóŮf@!IűŹ·Ćí|öźhUëőŠŽµ…Sú…$#M/™a†}—Ý Ţ7l©úĆÂiýÂŽ2WŽć‚ŽĐ)ECŮŻ®ąťo„$•űż{ĺďôÓ'6ëÎ’Ű"† EsgiÓ7ż®úőłZýĄ;-?fYYv-Y0_KĚWV–}XcľdÁüa…+{’«IkççŇűŠô˙s řżţ®÷ńsꄇ ľ mC};ŐUOKłz|Ř'†‚™ áĎî‘2'ŤŇOkN č NąeYŕ<Î5KżřëÁ÷?{2Đä[;_÷l”ţđv`ln_řoÓ±ŔňĆ€ĹB‰ž$#ŃŘć×3ď6ëÁ…7iFVşiZ5ǡUsŢŻőR·~uôtÜ«)~uô´‚l_ Usrĺőw˰Ą†Mm9|úBżľ)Ťm~í?ŢŞUs*Ě5ôĚňąćĘ4y†Í\Fřعΰpe(űŐť$ 0¶Bz’$źŐ_şS?Üô y˙ýcÔ[G†ă˝Óň§OÍםwܦew,QvV¦ą}ÍďßbX’)SzK§[ląc˙ŠšŽJo‘îy¨wIßĚÉRćÂŢ`äÝš@uEߪ‰—ľX!fő·Â+.>|;44•v6‚–‚™7>©˙öé§Ă+Zę^Ž|îŃĽ±+pžë~š őX7wB–y›&­ $;Zý]zćpł–N꤅S2ěrě\§ęšŰ¶$nËĹËÚňoMúúÍSµ ?SŽŚ43 3żi8ŻŁç:"îż˙D«>n÷kĂÍSĺȰ…-ěᆭş“íý*OF˛’ ž–,śo†ť>}˙©m:rôŁ~ŰŃGŞůý[Z˛pľžy|“˛2íúá¦uäčGúôĚ9rzô˙a¬äĺĺőţc/d5‘1­óó@ŐÝ×§Ě^x˝Zâčŕa@pż‚™Rţ¬Ţ%ć§ŞŐ#_ ˘äÉŻDîĂďŰt4°ÁLÉž3đ”šÁ^çÉë×q˲ŘĆa 9źŇŠ8ťN~jAH2Ćţ4P•á0Ň”—a“a› YuŞăŠüÝWc E~~äÔ Ű4¶ůőíÚŹŁ>ďᆭŹž–$ćrijőwÉß}-¦Š•Ć6ż¶ü[“yÁý;˙ˇî7’kl €Äz˛ł2őxHĺČ·ýqXŐH$GŽ~¤ď?µMżřÉc’¤˙z˙}zęą_2 ‰e#§Ó©––IRó„\ą®¶ŤŻ«Î¶gO&ľBd(ç˘8ťKßŔg­\ ­hIĆ–V—ąlîŃüG¨Ć6ż4Ějó:†¸˙p÷€…$9,/ąMÓ®ŻRóë—;h8täčGz˙صxÁtçKžËÎĘÔĽ9ď÷Źý1ě±% ćK’Ľs$¦× îW4w–˛2í:rě#ť9덹beúÔ|-^đÝT0Eť>5˙DźhÖĹŽÎ!6XĎ9–k-š;K™™†:;ý1Ź=Ćy@b˝%Ăľ”4F Ł*´r)´˘ $`dIĆż˙ň—_6ożňężiß_˝üeeÚű…sgšŐ%%÷üďúÎôµ5w‡móŤ kuäŘGÚţË]Qő÷ţ…ţęţű"6‹}ëĐ{zjŰ‹QŽéSóőť7čÎ’Űú=wúĚ9m{q—Ţ:ô^Üö =çľ+Ň|cĂZť>sNŹ>µ˝ßµnúć×µxÁôţ±?2Ý& Ä´ŠMč˙Ň 5Śšć ˝Ő#LŻA"±ş ŔŞXÝfüš>5ß\=ĺŔ;G­ŞčëČŃŹôÖˇ÷ÜďńMękkîVG§OďűŁŢ?öGutú$IKĚ×ÓŹ'â~Ď<ľI›ľůuegešűî{퀏*ăď,ąM˙ôëg#®ţR4w–Şţ#3äh<~Rű^; ďQG§OÓ¦ćë™Ç7ő[…g¸ű}çÁ f8ĽÖÓgĎ~N¦ćëoň–GNr1U„6ĆôĄLdÔíĂ·óű­·b)´’ HTH"QI°nHB%ÉřrÓÔŢé牚ŢqĎ—îÔľ×hű‹»Ě %;+SOlţ¦–ݱDÓ¦ćkő—î [ çÎ’ŰĚâŔ;GôŁç~¬ţŇťúÎ+él~P•?öš?Üü ¬lqWرłł2ő‹ź<¦Âą3őĂMęÍ€g¸űÍťń|ßüMÝs×2egeęž»–éöü ß|I*¦ ’˘˘"óvó„F-ŃžüĘŔMW-$´b‰_έ¤o%ÉÁ€eB’ľ•$^Ż—Áٞ2{˙¬ę·Ćă'őTźŔŕbG§~ŇÔuŢś™aAD°iěéłçő7?z®_Qóű·´ýĹ]f(:}çÎ’ŰĚŞ’W^ý×~K_ěčÔŁOm3ďßsײí*Úůnq—9ľwŢqßxIlČ$­!K®‰Z±^ĎhĽ IDATÄż-s˙džYE"I *--e`IoÚôéšű'˘˙üđCIÄçó10cTčô”*HŠćÎŇwľąaŔcíŻ}»_¨ I˙\y‘‡‹ťęčô)+Óv…sgšÓP¶ýňE}˝šßżĄ˙z˙}šV0EwŢq›Y• ::}Q{Ş|zćś¶ż¸Kźž9§ĎÎśŃ~a×úZôkm<~R‹|oş$S@ú—{ď„LůRŇdďébôpˇKˇ•L@˘ůŹ˙Đ?î~ĹĽďt:µnÝ:` ˙řĘ?á$•””Čĺr©ľľžÁBC‘% ćëČŃŹ"n—•e7Wd‰&Ú //‘Bаd°Uc>>~RÓ ¦+ćHľ*ÁcÔĄď4—áîëµÂl±nşÔoCÚTąŻ´0zH¸Ć‰SÍŰL±A˘E Gy䪗–đŹŻüCXeII }¸Ć¸X§Ő|vćĽ~ýňoű=~SÁ”SMF⦂޾(5Ťm8ţ‰–ݱ$¬ńihX2ÁýF2Ő(QÓ”0~Ä™I˝­€€ Wo+ű Ę‡T$áŔĘGƧĐi"ËîX˘_íúMÄí>=s.âsKÎŹ{@¬äŞě¬L]ěčTJJŠyÎCÝ_’. q% Tj¬›·Ňň9$\hV¦× ‘GVF82~}z朼s$đďĺął†NÄőßđC¦Úd6Xm\x¨×śÎ3ĆăWĚIčÔ›‘CÂ…V„t@<ެŚpdü{ëťŢ÷ď;n¸áçóŮŮŢŞ–Đ~$?c^>tjKđvf¦1ŕľßذVżxú1­ţŇť#Ús@b·Űĺt:#~xţ#H4Â‘Č [Şćĺ1}¶Ô„ť‡ĂH3_g8‚ű:Ś´¸7Ţç 7 áHr¨ůý[fŐĹť%·éŻîż/ć}ß˙UYţżŢ ’u÷ţEÔíŠćÎ2Ľľy¨÷űđȱŹĚç—,ŚŢXvÝ_~YKĚ—űú5 w? ”m(‡ö!iH+Pq÷YF Ŕ9ĺĺĺ1(+‘čśŮézxÉŚ·oąxYo6®Ă§/Äő<–Ţ”­Us‹ß®ýxČűűú5ě?ŢŞý'ZăvÜxź'Ü„#Éĺѧ¶içóO)+Ó®olX«% çkű/wEťî˛ěŽ%úÚš»ĂV¶‰W҆ăźhßktĎ]ËtĎ]Ëôϵú­®“ť•©n~ĐĽş”đľ×čŻîżOY™výŐý÷E\•ćÖš=G‚űw?`ŘIqq±^ýuI’gâ Uř˙Ŕ"!BűŹ0˝„#c›3;]n.ĐĽÜ íúO‚sëG’ϧgÎé[ŹţXO?±IÓ ¦hÉ‚ůÚůüS:}朎ýH§ŻO{ Vm„®ÓŃéÓŻ_ţí€ËßŐöwiyÉmĘĘ´ë?yL˙°ç_Ě©@Ó§ćëŻîżOÓ®÷ yĺŐ P.vtjű‹»ôĂMjÉ‚ůúŰźü@»_ýWłië=ĺËtĎőé1ű^ë _†»0ě€$tšC‹Í!ď„Lĺ]ĄK0âĎ3±÷Ż×4háČŤÓ·ú"Ča¤)/æy9†ĘfćȰĄjé´I:ŐqEu'ŰÇô5}Üî×ţă­ĽąGG’JĂńOTůđăúÎĚ•i¦MÍ×=_ŠÜ´´ŁÓ§7˝§˙ůňo‡ĽbĚ`.vtę[ŹţXŹoţ¦ çÎÔ×ÖÜ­Ż­ą»ßvż~ů·Wשůý[f%HŃÜYz|ó7űmsŕť#Úţ⮸ě ) ÉËËÓ˘E‹ôÁ\˙ëTążžQD\5OČ kěv»ÄáHü´ú»ÔęďRc›_ÇÎwęonče®Éc> iló«±ÍĎ›ŔrG’ßĹŽN=őÜ/Í ŽŻ”‚’Âą3żŹźÔ§gÎÉó‡?ęÍCďő›‚ôŮ™óúőËż5oGóϵtäŘGaŤYŽ˘˙ăáękkîÖĽ935}jľ çÎTăń“j8ţIXuG$˙°ç_ôÖˇ÷ô•»–iÉ‚ůşię}vćĽ>=s.â´ťáî7Ňkh äIđĂj0 98q6 âî`úś°_Ü|xáČŘÖrń˛Nu\ÖŚ¬t92Ňä0ŇÔęďb`€p70(©ůý[Şůý[ĂÚ˙Ó3ç"VvôËńG2u'ÖóÉ~#˝ÖáŽ1Ʀa$;wî üŁi6H€&:þ߀‘"IĽŁg;5#+]’”—a3’`ł×w?»¨ĂźFnâştú$Ý~S¶$éů#§˘ľ†3;]wĎÉ•3;Ä´^ęŇáÓUw˛]ţîk1źë`ŻWćĘŃ‚üL†¬JÓrń˛Ţ=}Q‡O_ôµFšVÍÉŐŚ¬t9łÓĺᆭĆ6źŢlţ|ĐĘ•ŕkç692ŇÔŘćש‹—U×ňyÄĐÉ™ť®ű §×˛tú$­-ś"Ă–ŞŁç:bzM„#€a$v»=lšÍkéĹZď;ÂH".š'äĘ;!Đ4Ę0 ŽŚÎěô°0!(2|<Ŕ‡tGş-,ŚjlľĽĽ##M«ć8TćĘŃóGN…˝î€F”×3l©zxÉŚ°k ˝>gvşVÍu řZ ół´áć‚°ĺŹ [Şćgia~–v}t6bPäĚN×ýó ú˝va®ˇÂ\CK§OŇţă­ŞknďwÎÁké;F ółôqű%ŔâG v¶áěTZZj$Lt njŤŢkG0R„#ŁĂ°ĄjŢőéţîkCŞćŐ†ůňw_ÓoĎ«±Í/»-U ¦djŐ\‡llů·¦˝öŞą3 ŘĽUÇÎwŞĺâe9łÓĂ^ëľÂ)Q+]ľ±đ&ó+ţîkzćÝć°çŰüň_˝f…ą†‡<ż¶hŠ9~}«SŽžëĐŃszpá4-ČĎÔŞ9ą:z¶#bdŘRőÓw›c®¤I&GţăßŐtâ8?H*7M›®›oą…pĆz@ I:$I:”1WĹ$™ésäOťřĐĺp¨¸¸8aŻőÚď~Ç€Ź˙ńďďęŰ›żK8rÍË5´*ĹţA|BŞf\Ż´ťŞŇz©{DĹ@ęN¶GüŕřÓ ZzS¶ćĺ–ÉëCŠĐé1aŻőŮEµ^îV«żKţ®«·Ů˘5bpRw˛Ý H齿zťŮéf0ł˙xkÔpăźĎkA~¦iZ:}RÄ ęđé – G$…}’ÉW×­×’?ýÓ!ďG8Ă3쀤˘˘˘7 IźŁ ˙1šµbDjŚćíŇŇҸßétŞĄĄ…G>;}zč”Gâ*Řc0‡O_Đo·ĽÝ±óŃż4¶ů5/×aK•3;}Ř!ÁÇ׏ ,Žžë4§«HĄŤ˛:Ď©^ŰHë `i˝Żß˝WH«żK­—şĺȰiFÖÄČçß~ÉRß›.—KŤŤŤü"©µ¶=ô%€áv@’——§ÂÂBó'{ŤŞęx‡ŰLźÖśµĽĽ<îŻńĐCéŕÁ ö8PSS3¬ýGđŹóKÝQ—ě 6˙ öęH¤Ž,D«ţĹţ­z870Íoé´IZ:m’ąͱs>5¶ű]ľŘ{©{Đ×qfő6b ;îžăĐÝs˘ďĽ¶ĽP%콲ŘŇĘš2eŠ|>?¨H*őőőĂ˙G`dl#Ůą˘˘B?űŮĎ$QE‚‘©Í(2o———'äm^^ž***ěq`8 áHbţôB¦Í$‚a›0ě}ŰüzţČ)­šă0›Î†®@Üć·ŤçŁ6C )FďŻáX*u$™Ó›¬În·'$LĆ‚á$„#0r# HŠ‹‹©"ÁŐŰ Ôb ô8HTő’á‚üÝWGöˇ¤ÍŻĆ¶SriZź©ÂC ň3Íç s =ĽdĆ–ř|{›­F[C8ńaéB«HĎV—y®e3s"Vs”ąrĚ0çđgă2>ţîkćňČŽŚ4}cá´~,…ą†ÖN‰ËuH„#¶xhýúőzę©§$I iSĺ™č”űJ #Ś|)iÚ›ŃŰ{¤ĽĽśęÄ„pd|©kn7›ś®-š˘˛™“ĺőw˰ĄĘ™ťĎ›@4§:.›Kď6¶ůÍý͡ńü°§×ý¦ńĽ^2C†-U/™ˇ–‹—Ícć69®O«9ŐqYGĎvÄmŚŽžëP]s»Ę\`fKé,óµű^çóGNŤř:Śo„#8©ń:ËĺŇĘ•+Íű»íKäKIc„ŃîĚŰäO ”‰;ŞG‘ń§±ÍŻ_ýĚśFâČHSa®!gvş>ľľ¬î©«ţü˝S:|ú‚$™űÊ珜ŇáO/Śř\[.ެ$qf§«0×Pa®a†#uÍíúů{ń)~Óp^ż:ú™yÜŕkŻ38VńjB `|"€Ä˛Ĺó`:tčü~żĽ2U›Q¬ ˙e„©·čPúó>żŘ Â‘Ń 5ľ]űqÜŽwô\‡Žžë3;]3˛ÓĺﺦĆ6_XČéőöźhŐţ˝}EvýçYý¦áĽ sďyKÇĺW}‰v }ŹŞĺâeýüČ)9łÓeŘRÍé.­ţ.5¶ů‡|ĽXÎ'lśŢěĂHS^†M3˛ÓuęâĺW·‰÷{`ě"€Ä‹k@b·ŰµnÝ:íÜąSR aké•Ę»ÚÉHĂ´3ëóö˘E‹T\ĚĘGáHrhąxyÄţîk:z®cTÎU’%I¤V×€ ë!€Ń‘ďömŘúBÖeö·š«†ˇőë×3(áŔĘG`ô¤&â ˇz[l˝b_ÂHCő¶ŐŘ÷+**hĚŠÁ˙aH8°¨o €Q”€ÄĺriÝşućý׍by&:m óĄ¤iGö2ó~aaˇĘËËÄŚp`5—.]2oŽ@âĄ&ęŔĺĺĺZ´h‘yż:s©9µÖł#{™ąjŤaz衇ÄŚp`e„#0:RyđŞŞ*9I’?u"ýH,jŻq«Ҧš÷7nÜČ]ÄŚp`e„#0zŘíö°Jú‘XOßľ#«WŻfŐ Ęétš˙%XIQQ‘y›pF—-Ń/ěG˛{÷nI~$®«m*˝|‚ŃOr‘úŽTTT00Ô#Ź<˘C‡©¤¤„p`)ĹĹĹúîwż+Ż×«ŇŇRF‘m4^¤ĽĽ\őőőúŕ$I»íKäęn—ëjď@’ňĄ¤éŮI+é;‚a±Űí4ńXŐ¶pc¤ŽÖ UUU™eóţÔ‰zvŇJ5OČĺHBÁp¤Ĺć0cš`,µ€$ŘŹÄ0 I˝! +Ű$źťYw„…#•••rą\ `ĚJÍËËËÓ#Ź<’ĽőEůRŇx'’DućRy&:Íű•••ĚźŚy©Łý‚.—+,$i±9ô줕„$I :s©eĚ5ďŻ^˝šp0.¤Ţ ®lDH2ţŐf……#%%%¬X7RoÔ —––Ş˛˛ŇĽOH2~Ug.ŐîĚŰĚű%%%ŞŞŞb`ăFęŤ|qB’ńŻď´šÂÂBÂŔ¸“zŁO RHňÔäU,<ĆůRŇú…#%%%úŢ÷ľÇŕĆťÔ±p}Cď„L=;i%!ÉĺKIÓł“Vö G¨ŚW©cĺDJKKőÝď~7l `B’±'Ž´Řćc+W®$Śk©cédŠ‹‹Ă–ö§NÔS9w«6Łwj ¨·豜ż G*++µ~ýz0®ĄŽµrą\a!‰$íÎĽM;˛—ŃĽőŞÍ(ŇĎ&—Ëź:Ń|¬˛˛RĄĄĄ `ÜK‹'ĺrąôÄOČétšŹy&:iŢzřRŇ´#{YŘ2ľ†ač»ßý.á i¤ŽŐËËËÓO<ˇ•+WšŹy'd2ĺf5OČŐS“WÉ3±7¨*,,ÔŹüc3@€¤aë'¸~ýz«şşZ~ż_R`ĘMó„\­ó‘˝§‹w1jě ´×¸5챕+WŇo”RÇĂIşÝî~SneĚŐc9©Césxă¨ŢV Mţ‹°pÄ0 mܸ‘p´RÇˉ§Ü”””ŹůS'Ş:ë–Ž_JšvŰ—čg“ËĂV©q:ťzâ‰'äv»$@ҲŤ·®ŞŞ’ŰíÖ+ŻĽ˘ÖÖVIRCÚT=•s·*üĐJ˙™v3Dž‰NUg. [ˇĆ0 •——«˘˘‚$=ŰxÁ»;z[jě·Ş!mjŘă………zŕ”——Ç ,Á6^OÜn·«˘˘Â¬&ill”Xé¦:ëí5”D-q8Zż~=Ói–cďŕrąô˝ď}OÔîݻ͕nJú‹ŚHŇęŐ«U^^.»ÝÎOŔrlÉr!ĄĄĄr»ÝŞ­­UmmmÄ ¤ôĘ Ëő(ńĄ¤é‰N˝–QÖ|5¨¤¤DL§â §ýś®Ą¤0@Ľ~¦ş.3`ÔŘ’éb‚ÓnĘËË#%{Ť[őZz‘ÜWZ_]§’öŤmž«ÚŚ"y&:ĂšŻŚń×íyťAĆ)[2^Tß äŕÁćŠ7ţÔ‰:”1W‡2ćʸvEĄ—O¨äň ą®¶Ťűë¬ZÄ0 ąÝn‚ ަL™bö@Řßí‰dKć‹ %:xđ öîÝk%R ,©5ŠUk+ďjçőŞ’uť7×ŘV OšSőiůC)Đ|µ´´”#@”——K’Îź?Ď` ˛xńbą\.$”Í*ZZZŞŇŇR577ëŕÁňx544 ůS'Ş!uŞ”eăÖ¸žgpęLQQ‘Š‹‹i¶ @D`·Űű&ÍÍÍjii‘ĎçS}}˝ü~˙€ÁÉH9ĺĺ婸¸Xv»]N§SS¦L! Hb`·ŰU\\l6;­¨¨0źóz˝:ţĽĽ^ŻĽ^oż}ëëëű=ćrąúM…qą\2 Cv»ťNýŚ2’ĘË˰Ş#4LcS*C¬Ž€XSlÄlrćD•~ˇ€t¨ IÇ0Ś!mO@`@ĹĹĹr:ť €qĂ0 •——iź”žžž†ndnř9ńF0Lô úúzíرC~ż˙†ź‹azâ‰'\rDĆ›hhhá$ůý~y<ކ ’8ÉÉÉQNnn{[›ÚŰŰ%I>źŹ7€a ‰“%úgşëË_ő×}íwżÓëŻýž7€`Š °<`y$ŔňH€ĺË# –G@,Ź€X °<`y$ŔňH€ĺË# –G@,Ź€X °<`y$ŔňH€ĺË# –G@,Ź€X °<`y$ŔňH€ĺË# –G@,Ź€X °<`y$ŔňH€ĺË# –G@,Ź€X °<`y$ŔňH€ĺË# –G@,Ź€X °<`y$ŔňH€ĺË# –G@,Ź€X °<`y$ŔňH€ĺË# –G@,Ź€X °<`y$ŔňH€ĺË# –G@,Ź€X °<`y$ŔňH€ĺË# –G@,Ź€X °<`y$Ŕňl Ć›¦¦&}ňÉ'’¤YłfiöěŮ `D¨ Ŕ€rss•’’˘9sć şí–-[”’’b~µ··şOuuuŘ>Ń455ióćÍĘÍÍŐś9sTVV¦˛˛2Í™3GsćĚŃÖ­[|˝˛˛˛°×‰ôµbĹ mßľ=¦ó$DU]]m†MMMŞ®®Ňţ{öě‰Ű6‹/Ö¶mŰÔŢޮɓ'kůňĺZľ|ą-Z¤¦¦&mٲE‹/–ÇăöőÖŐŐiÓ¦MZ±b…šššř ! @TÁ𢲲R’´sçÎaíM{{»^}őŐ·©««Ó}÷ݧööv-Z´Hďż˙ľÚŰŰUWW§şş:y<˝ńĆfP˛bĹŠC’'ź|R===ýľÚÚÚôŇK/I’<ŹxŕľŔBHQSS“^l۶MR ¬Ą˛bѢEšC‘““ŁŞŞ*=÷ÜsCşV@r @DÁé4÷Ţ{ŻrrrĚ#– $''GkÖ¬‘4pIđąŕ¶‘Ρ©©I“'OôusrrĚsöx<1M݉¤ŞŞĘĽM@ÖA@€‚Ói‚áE08ušÍ`IpzÍ˝÷ŢušŕkUUUĹ´RŤŰí6śá$k" @?Áé%“'O6‘5kÖhňäÉjooŹ©YkpűhÓl‚ÇV="Éě%RVVóąŹ7Ô~)AˇÁJ¤é<€äD@€~˘…Á°dűöí1g *’ľ*}ŐŐŐ™·srrb>÷X*M˘Ůąs§6oŢ,©wjŔH¦˝˝Ý /6mÚö\đľÇă‰i9ÝhISS“<OĚ!ÄP*HúľN_;wîÔŠ+Âľrss•’’˘ŞŞ*µ··kÖ¬YC^Ň0ľ L0Ě5kVż)&łgĎÖ˘E‹$ĹÖ¬5tšMhX1XsÖľB«I:ť'R5ISS“ąDpđ+¸Ď¬Yłôä“OĘăńP=cc*8}ć“O>QJJJÔí‚˝E Ö¬YŁť;wjĎž=fĘÎť;5yňä’ĐŞ‘– îk°Ę–ĘĘʰ•j¤@2’©9¬-Ő IDAT€ńŹ bť:#B‹XVŠéŰ458˝fÍš5†+łfÍ’4´ ’ŕů/_ľ<âółgĎVYYYŘက¦`ߍĺË—«§§'ęWeeĄ$iëÖ­38ÍĆăń¨©©iHÓkB+N"U‘ÔŐŐ…:ŹGŻľújĚÇ €¦`•Gß)(}Ç`?ŹÁ·Ż««‹izMPUU•ą´đ<ĐďůÍ›7kńâĹZĽx±<ŹąÍ¬Yłú5` $¨ Vi ^¬YłĆśţËj/Áăm߾ݜ^‹śśóř{öěŃŠ+Â*I^zé%UVVĘăń!I¬ç@(Hę]Y&ÖĄwU&Ѧż„ ťfĽ«5kÖ襗^’¨@ÉÍÍŐŠ+´yóf˝úę«úüóĎö§á*`8H ¦¦&łwÇ`Ók‚B·JI¬ÓkúľÖű￯{ď˝WR (ٶm›¶lŮb;•••ş÷Ţ{ŐÔԤŋkëÖ­aK 0–ů€rrrôĆoH _^w łgĎ6÷ VśTUU©¬¬,bʶmŰTUUń9·Űm+·Ű­={ö¨©©Éě}’““#·Ű-·Űm7¸śđ–-[´|ůrłšd۶mjoo§şQJOOOĂ0<{÷îUMMŤ$iĺ]_Ň]_ţň¨źĂkżűť^í÷’¤Ő«W«˘˘‚7@Ş–X«a0¸ÚÚÚ—€€Ín·«¤¤Dn·;ć}¨ @R"‰Ż×«Ý»w3Ć•ććć!$ô 0 óçĎ3ĆŻ×;¤í© ł)ůů*[ą’0fýă+Ż k?1KOO—Ë5“tb,Ź€X °<`y$ŔňXĹ`UWWë“O>QeeĄfĎžť×TWW§7ß|SłfÍRUUo2`\˘‚`UWWkË–-jjjJškŞ««Ó–-[T]]Í ·¨ €ňx9äńňx<ÚĽył*++UVV¦­[·šýKfĎž­ŞŞ*ó¸Ű·oמ={TWW§śś­Ył&ę9¶··kóćÍňx<ňx<ĘÉÉ1Ď1Ň8´··kűöíŞ®®6{Âää䨬¬LĎ=÷\ż&şCÝ^’šššĚ1 í;|o׬YqŚ‚cŇÔÔdľĆ“O>)ŹÇŁť;wŞ˛˛˛_Cܡ^?€€bÂě[•Z0“Ɖî?HWălÚ´IŰ·o—$-_ľÜü  +^z饰˛ŹGoľů¦ÚŰŰĂŽóŔ!Á¬Ył4{öl˝˙ţűşďľű´fÍŐŐŐ©§§§_`ŃÓÓcî;yňd-_ľ\oľů¦ůúoĽńFżÇ›7o6–E‹iůňĺć‡ę={öčŤ7ŢčŢl۶M›7o;?ŹÇŁűţöî?¶­ëŔý׹S¤ä°M)¤ôĺóů|š‘3Ĺčďď‡ÓéÄŘŘ,ßď0=‚BÜW"‘@<—ÓŐ#eD¨°oß>¤Ói´··ËzÇă€ÍfCWW—ćp8Śx<·Ű-ďG]çétZSwĹľ_„^ …rŽńů|ň>őA”a†‡‡eűŚŤŤÁétš®a#ÚĚď÷Ëk‰:÷źď9"""$DDDDs HČé)j@>źííí3né+:ÖíííšŃv»]Ž Č' j¦jŘívyÎS§NvÚ»şşr¦wtttŔçó!‘Hh¦ňżë§uňŮlłź^ŤF5SSÔ×Őß—ÇăŃŚÜP“H$ĐÔÔ¤9žĘ"Â'qďęă˝^oÎtQę6-öý"äp:ť9Á‰ÝnG0Ěąu`ŤF5çsą\¦Ű(‹mŁ›šš‡5ĺóx<2)$¬#"b@BDDDDE{1ŐD?’"Ť˘ŁŁĂpm 5bčה̾Żôڦ˝),ůÎ) őH…|ĺłŰí3–ĎŚŞŁ?_ľűďW ˘¬3ÝS"‘#cÄyz{{ĺZ,ęk¶Ó_·Đ÷{<ąľKľgGM܇Óé4mSýÔőqfë™sĺ+q """˘Ysą\hoo— ‰FŁQą¨×ë…Ďç3]đTßą5 5ň}ż AM=m¦ąąŮđ\â=.\ČůžYĐ3S”Żţň1şoŁcD€ÓÝÝŤŢŢŢĽçŚÇăđz˝čęęÂéÓ§ŃŃŃ!,uŰ©ű~}ůâń¸śúcTg!_˝¸\®śQA˘­z{{ŃßßoxśÝnG:ťF"‘u{-w Hć ««KNăčďďG:ťF8–S‚Áŕ¬vzŃwnç:Ś1[ËBxőűĚ:ÖĹ.ŇşP÷]ČZ(ú‘'áp]]]¸pႜZÔŐŐ—Ë…ÎÎNͨŚbߍFqŕŔś0ÄívË`­Xf;ĺz˙" ""˘\śbCDDD4G---FŁHĄRD"đűýp:ťráNłµ# f»Şžč{<(Š2ăő1úpÁ,tř" ĎxOú5T‚Á ‰ĆĆĆĐÓÓ#G‚$ ěŰ·OsoĹĽ_L»kôôô```Š˘ Źç¬K2—gA¬#ÎźďŹXű„Í›t:ŤS§NÉN±^#¶˘ťç|»‡ŘívŮÁ5{ß|$bD†îQČ=©;ŕfĺŻňÍ–zű\3§NťŇLMI$ší–].€ şDŔ%FĐű~±XlSSb±€&l2*«b´ ´púôéśď‰`Ąű'"˘\ Hf)ĂëőjvG1ę¸ÎDüVßhĘE"‘q]ŤBy<ą#Ž~·uYĽ^Żf¤6bTľt:=oĺ›-<č·Ěb±Ľ^/Ľ^Ż Ä}ŐÝnĎ™ĘRěűŐ»Ţ=;Fí#ÂŁëŕÍěţͦě¨ďź‹´™c@BDDD4Kę_ôťSő÷ĚvQwľm6b±š››ĺŽţţ~ěÜąs^Ë,:ŢÝÝÝ9eV/rŞH:::důÔ[ŦÓiěŰ·Ďt´Ăb p:ťH$hmmŐ”'‹ˇµµŔô6ĘbŤ:ôŃŻÇŇÝÝ-G[đˇŘ÷‹˙ööönĺ«•ÔĺSo>Ś Ź#ŹŁ»»Ű4+ôţý~?h%"b@BDDD4˙\.zzzd'uĹŠhnnFmm-š››‘N§á÷űgÜ×n·#‹ÁívËßö×ÖÖ˘ĄĄN§3ď)ĹjiiA(B:ťÖ”ą˛˛R†"===š‘.— áp6› ]]]X±bvî܉ĘĘJ  ˝˝ý m»ÝŽh4 §Ó‰h4ŠĘĘJěÜąSÓMMMšQ@~żétZŢżQ=@ˇŘ÷‹Đ+‘H`çÎťhnn–ďďčč@{{»ÍŁžăőz‰Dd]ďÜą;wîD0„ĎçŰížŐý»ÝnÓQCDD4Ť»ŘÍA €ÇăAWW‰b± 5@Îč1íFżC‹ÇăA,C,“k„x<´´´ «« ýýý9żýÓ1Ě˝ŢŃŃ!Ď+ĘÜÔÔ—Ë%·Ż5 VÄâ˘ńx©T >źOŽxÇăO)˛Űíhjj2}ľűrą\˛¬úú‹Çă‡ĂFŁÇăň:@Ŕ0¤S¤˘Ń¨¬·Ű źĎ‡`0SľbŢo·Ű‘H$䢮úçÂëőÂĺrɲŞ)Q×âY°ŰíňYP/´›ďţŨ•|÷ODDZ+±D9íţáđţűďľ¶çIěů˙aŃËpâ˙˙tâ8ŕOţäOđŤo| C´ţúŻ˙ŁŁŁ€UŢ?ĹĘ Ź°RS˙űWřýŮ–ÄĎĐx<—Ëeş­m0Dww7Bˇáî'´|ś:u*o@´sçNÄăq ,ęv˝ü1ţćoţਮƷţÓ˙ÉĆ"˘%«óŻ˙ů÷żýŰż-ř8N±!"""ú‚ATVV.Ü™N§Ńßß đE_éÁĺőz±bĹ ĂĹTĹz$|"""˘/†sŕŔôööĘE6űűűŃÜÜŚD"·Ű=ăbŻôŕëÍ´¶¶Ę`LěÔÜÜ @»Ř,Í®ABDDDô Çăčíí5\+ÂívŽ.ˇĺG¬sŹÇ 1őş/DD4ż-‘Žq ČY¤ŐëőräH ±Űír±U±¸+ůpj ŃÂa@BDDD´Dx˝ŢE]x“–.î:CD´ř¸ •<$DDDDDDDTňQÉc@BDDDDDDD%Ź •<$D´¤]ż~'OžÄőë×YDDT?÷^}őUA|đÁ¬"˘EÄ€„–´ŁGŹâç?˙9ţëýݏxń"+„–µx<Žëׯ#›Í˘··—! Ń"b@BDKÚřř8 ›Íâ?řC""ZÖnŢĽ©ůš! Ńâa@BD †$DDTŠ’-$Dô@aHBDDĄ! ŃÂc@BDŚňňrÓ!É÷ľ÷=~P$"˘eí+_ý*6nÚ$żfHBD´°ŃăĎţây’đ"-wËCřłżxž! Ń"a@BDŚM›7㿼ň*?(QɰX, I "âE2µwKön©B]Ą…•QbŞ,«dűWYV±BřłŹhŮc@BDü H¦öÖVaom¶Ú”šµĺe˛ý×–—±BřłŹhŮc@BDü HDDÄź}DD%Ź -»Šáp•C4G×oMáŘo'pě·¸~kŠB´Äö1$!"š;$D´ě>(ţó?˙3C˘9šČŢÁ±± ›ŔDö+„h‰˙ěcHBD4w HhY|P|ńŔ˙Ť]üÇň{Ą’XĘf÷Ďx•eŐ¬Ź]ňSî…*ĂlŹ«¶®^ÔăćÚ~ ąŕŞĄlĺ¬ĘµĐĎ ŃrýŮÇ„h~qŐ5"Z6ţăźţ'ŔGżţ5€éŔ˛şOoŤŹ­XłłĚh*‹äämôŤ^3=®®Ň‚¦v¬_Łůţřämśşř/gfŐ±ő>bÇcëÖtřGSYüćęç]LĎ*8h­[8ňQu•|˝¶*ç~#Ł×0>y°cý4ŐŘ4ď‰}šĆ±± d§îšmukQm]ŤŞňéĐ`âÖŚOŢĆ˙KÉsÝóŢÚ*ěŘđ°ą–đXĘVb˙.‡aT•ŻBă¦écŹ|”Ô´Ąl%^n¬É 7f:n¦¶S·ź¨k˝§¶oĐ„ę:޿˾Ńk˛ľŐőˇn}=‰ďo­´ŕ?×oČ©śm­´ŕČGÉśöŻ«´ŕą›rÎ[m]Ťjëj8¬_ÂÚňUŘZiÁůT–˙°1$!"b@BDÄDŐŃUuDcÓş<‰ńÉۨ˛¬ÂŽőŁ­nŞ­«ńÜŽŤxsđ˘ě”ZĘVĘöůTďŽ\‘#Ş,«đÍşuxlýôĆÍý¶^–˛•ČNÝű± śąú9&˛wPm]Ť¦7U`Çú5°ěřâĐ{N޸ŤŘĹĎpćĘ XVýĽ56xkě°”­”ŃůTÇĆ&0šĘ˘Ę˛ O×oŔÖ{!ŤţžÔĘÄ­)ô}r g®ŢĐ„ Ž5«Ńöč:$oÜƨŞŢöčtg§î˘oôšćĽęĐĺąŃń« šăŞĘW™÷TýXĘVâ?×oĐ„TU–U˛¬É·Ń÷É5MyÔ×lÜTS?Ó,ŢGě2QŻ~fň…/3i«[7ÝţżťŔo®}ŽńÉۨ«´ČŃ♊}šÖÜ“GD}śąrŮ©»Ř±~ Ú]—č|QnŢĽ‰ńńqţŁJ‹ęúőë IĐ\˝˙ţűx˙ý÷YD&!ÉĹ‹ńŇK/ᡇzŕîÇű]ţÖľoôš¦Ă9‘˝Ř§i$ďM•¨*_…˝[ŞĐ÷Éôt›îO©ůń™ËšßćOdďŕÇg.Łă«.T•—ˇqٵ €ä©íäo˙őŁĆ'o㽳W0‘ťÂŢ-ÓScv¬_#Cbd§îâ­_ßťş‹ľO®aÇú5¨*/“áČ[ŞFÜÓ›M[¦;䫵?ęÄ´‘‰[SxsđSM}ŚOŢĆ›ńâ.¶VZđÔö š CtÜcź¦sęiđRU«Ë°wKŐôHË*Dz\µuµć8oŤMľO]ęcłwî⹏­{X¶…Ň#‚1uŰëź™ąřə˚Đf4•Ĺ[żN˘ă«.XĘVbÇş‡5ĎëŢÚJÓgçĚŐMÝÄËŤŹ Şü‹ýrýúuĽúę«üÇ” I掫˘ѲIÔ ·ŽŹŹă?řnŢĽůŔÝKĂF«ěÔ«;›jŁ©¬śŽŕ­±ľÇlaĐźśąŚ#%5Aé‡đ˛•r“ÁËÓµ:bÓ¸·=ě×k+guß—3†ÓsÔaKěâg†ÁJňĆtąÔë’ěXżFÖAß'×L§ţµ\ŞĘWˇqsîh‡őK¦÷|äŁ$^>ő[ĂőDŠ=.yăß»FěbÚ´¬fÁÓŽ kda¶ú™™ ýMýß{.şgN„EfĎŽ‘ôE»ví˙Ą/\±>n%"šŽ yŔmůĂ?ÄG˙ň!Ňé4+–µŻ|ő«łI¶üáVüýϦ I´‘$˘S?ÓčŽŘĹϰő^ PWiÁh*«éŔ>·c/e0šž^DU0 9f Y†.OšľO,Ô)FFĚĆůÔ-ăsßQŤú¸q{Ć÷Č€bÍ—Tá€yP¦®ő”ó©,¶Ţł—C—'1šÎĘP#;u×00íq3µ·ĄlĄlo˝­örMbć̵ĎMĎ1“3W>7ŻĂ·±µŇ˘YgDVĺ{v/eć4őgľ•——k:śD‹ˇŞŞ Ű˙Ýż+ú8Ž$!"b@RŇÉ_˝zA”Ç®/ŘD.LÜžĘűŢěÔď︶¬RÓťđÁË4nŞ‹gz±ßë”ßÄůÔ-Ó‘FÔ#®ßš*üĂľjęHˇÔ÷c¦sVYî˙Ř{nǦ®}–˛•šúď˝&×^ ŃŠ@ĺ|*‹ˇßM†Mł=N˙lµ[`Yµrúżş˛é­˝· lňFţó&‹ÇŠj;Ł€Ş LÔ˙R°qÓ&ü_Ď›˙Ň! "˘e’¨;‰3…ę‘ę‘ďť˝‚ß\˝‰˝[*áXłZžwÇú5raĚÁË™ĽÓNdyţ đňśOgUö˛˘’ů¶Vµ‹L]Ł&Ôő?>yo]ÄŢÚĘéhď˝&v_ń>bÇÄ­;čűäşfęËlŹpŻ}Öąú9[˙pQAĹö,˙¶.óINŢžőČ"bHBDÄ€„h™†$÷;ë÷uu§?ű{mÇřĚŐ8ső†ÜÁ¤ÎnŃLhÜTÇšŐEmő;Ó¨­öĄŮÁť¸uÇpK\Ă€AMd§Ź}W°cýl­,—‹ĆÓë–<·c#Ž|”ÔVł9NżĹďůTă7n#yăß0‘˝#ß÷Öî­yÚhéü¨OŢř7ů÷jëęĽSŽ1$!"b@BD´Č!É÷ľ÷=<˙ü󨩩Y’eVw"kľ„3W 쌚ü†^ě`"{ݱ~ŤY"F4äűíţlG…ŚÎa1Đů¬Ë­•T•Żš—ňĐ©ď“kš-“é…rÍ®Qčqm÷vÜÉNÝĹ›C‹Łľ×|ł\f6ÔS¦f űh~0$!"*w±!˘’łëË_Ćü֟ʯŻ_żŽüŕ¸xńâ’-łĹ0ÓoŐ6Yďß×˝őA7WŕąŃöč:Óκظk¨GTĚ4B¤qóýÝw–D=ŞFŐťx ;e+±—Omß ßWm]Ť¶şuÓ[)[rC±˝°XóC=Ťf6ÇŐ©F÷Ä>M›†#fS…ÔëŐíÄ#<¶îáE«˙ŃTV>?MŞ-Śsź› ţCE4ʏ» QaQIÚőĺ/cđĘ˧wúČfłK:$Ł=Ô |ęU[WË-T“7n˵cÍ—°cýxkě†ôéNůýßćĎ´h§XXĽŹźłqs…˝`¶íbĽt1Ú¶GךľĎ[cG]ĄEÖ'Üśş ď#÷ľżŃjŢŃ-,:Űă őőÚ*Ó{ÁÔŢÚJĂó7n®(x-–…x–ŤBŞ*Ë*9r†ćCsőŹÖÍęĎłO?5ŻĺČd2Čd2%[˙oykYßgr|ś˙1 !"Zş6mŢŚ?ű‹çäŘŘ„ěě>·cSÎoŘë*-ŘżË!żVŻŻˇŢNőąÇ6ću•LÜš*hęÉ»#Wd§ţ冚śN¶·Ć.×θ5…cżťČąć[»·â­Ý[±×¤ż˛SwŃ7:=Z¦Ş|ţŞˇFłŚĄl%ön©ÂŢ-ÓeJ޸-Ăť‰ě íÝRoŤ='ŕxjű ţnrNÇ©ŰÁ(Ş˛¬Âţ]MÝë×yďě˙'ďőĺĆ4nŞŢ §Ň‚§¶ořB¶ŇŤ]LËęą±wK¶VZdŔ÷rCÍ’Ů˝†! -–h¤O~­çFFXËL&“ÁŹÁžŻ5ł2\„’Ü››}ëÖ-’ĽôŇKKnM’źśąŚçvlBUyžŞźîÜŽ¦˛Ú…Yďę5DĆ'o㽑+xŞ~Ş­«Ńń„Ů©»źĽŤjëjMgô'g.T–‰ěĽ7rmuëätqNuy&nMá'g.Ľ…đbĽ”cÍ—ŕ­±ŁÚşŐPcXöäŤŰxë×Iͱ?>s_uÁR¶mŹ®CŰŁë0šĘćlą;x9ÁK™9×7zMÖqÇNMh"Ęú›«źŁĘRÇšŐš]zDČň“3ż“ĚS۵HňĆm ^ž\Ôb=•?۱Ž5«±·¶ {kµďĎ+-\HÂ5Irmv8ĐÚÖVđűÇĽ\whpŻĽü2ÎeęťŢ0~xä+‚ C’ů4>yo~Š˝[ŞĐ¸©–˛•šýŕĺ N]üĚpŐÁKdďÜ…·Ć&w®Qű›«źăŘŘÄŚ[ŻęĎ9šĘĘFŐçĚNÝĹŕĺ ŽývbI…#2xřä~sős´=şŽ5«sĘű4­é ďÜď­­”ÓoôPěâýpçz\ěÓé2ě­­BUyYN€»ř/eŕ}ÄŽ¶şé‘!úť…Î\˝‰Źîŕ±uŁ®rzç˘ó©,FSYÄ.¦5#Zk!݉ěĽőëäô»ëjd§~ŹńÉŰňf@BÄd±9|g˙‹|@1$1 Iľő­o-©‹Ů©»čűäú>ą¦}PHÇV윢OŢ6 1^ł HJ<$ů/ŻĽĘ‹ôŔ»9uWîz$Fk…(bĘĎbí4´Ó‹Î:¬«1šĘĺc)[‰Ż×V@AŁ‘!ÉROb˙ ß446˘÷§ď漧7ĆŻ}đÂţýxĽˇ!g'ś×ż˙=fĽóî{š×~xäéN/őőőřţoät¤ĎŤŚŔ˙ĚÓ€ľţ~žyFłSÎČŮłxĺĐwe9zú.’ÉqĽzđ`Î5^˙ţ÷đÂţý†Ó2™ Ţxíűôő™Ö‘ŁşG~řCÓÎ~±2™ öżđm t˝ŠŠ Ľđü_`rr»÷ěÁŰGdzîy ďôöÂjµbč×ÍęzęşD=ëë1“ÉŕĐÁ8qâ¸áąź đĘ«‡rľ‰ôá‡GŽŕń†ĽrčöżđBÎn9oy Ż˝ńZŰľ‰‘‘ł9Ď0˝NĘ|¶ ""~X$ZR&˛wĽqŽ5«ĺBŻŁ©,VP0=ÍJ¬?’ťş›łÓĐB9sísl˝Üěßĺ@ěb·¦îB°¶|ön©”;ú‡?÷–ş=O>‰–Ö6D#}Do8  p?9+Ă‘mŰęeçřń†Lf&qîÜ|ÍZaE}}˝ćümľ˙#÷v¸Q/&{nd'OśŔČČĎ<ľhżéČCćtŚ[t‹ŇF#}ôőÁjµ˘ˇ±Űęëĺ5DHłm[=ö<ů¤ć¸Ŕ3OËňéG´Dúúp)™Dr|gžÁń—Ń ęk¶´¶ÁQíaŐô(Šńś:Ů˝çID#}8yâ2™Śi9˘‘hí-özÖ ëô¨ťdRŽ"yĽˇa:LQ-đ›G[‹O¶ŤşţN?sçFđN8ŚŃ×˙KĂň^ştI۶Őc÷“{4íöęÁ°Z+pč•P»÷ěÁ¶úz âá!$ÇÇqčŕAÓó3 !"â‡E~X¤Ţ[żNâĺĆGPU^f¸[ °ř; Ĺ>MñćKhÜT!G¸é˝¦ŮчřsoˇÝśÔL™‰čě Ż:„ˇˇA\J&qôí#ŘłgŐŐrtX­VĽ}ô¨<ćťwßĂĐŕ iđʡC9ÓezĂaŮ17Á12rţ§źF&“Á«Ż4˝2ýľÍń‘ľ_äŚôőáń†Ľ}ôGšđ@]ĆwzĂš€$Ň÷ YľŻŇCđťý/âőďďôö"“Éŕä‰ăhműćśÚjhpP^ł÷§ďćÔŮł?Ú|ÓˇĂ;˝aĽrč»ňűŃHźĽW}YŕÄńă2¬҉ăÇ‹ľ^}ývĽóî{xűČ[rý¨ xăµ×Éd`µZńÚojęö;ű_D¤ďxőŕAŚŚŚŕí#oŽŕIŽŹĂjµ˘Żż_Ó¦âXŘ˙·±m[=Ţ>zô~¶˛mFFF048h:]«T¬äŹ"˘ü7nÚt˙CJo/>ţřcV=˛SwŃń«Ţą‚ó©,’7nËPä|*‹÷F®ŕÍÁO}Ë{gŻŕČGI ^Îŕ|*‹ěÔ]d§îâü˝ťu:>¸wŃY"ZĽź{?˙ůĎqýúő’¸˙‘‘<űôS˙Ń«¨¨ŔëoĽ 2¬¦§kÎőkoĽ9ă:j™LGßžî\ď޳ǰs\_żŻÝ»îĐŕ éÇ4Ç›…ŻżńfÎČŠ†ĆFąĆ†>DŁ6;†Ăt°s˙şÉdrÎm%ÖFe3Ş“gý~<ŢĐ€mŞŃ8őő۱ůŢčŤţńt q?۶Őßź.so„ĎL×#3Š zÄ´šöż32G´Ół~?ŕ§÷B&#Ď9WkŰ7aµZĺׯ:”óü©G%“ă%˙ď G-sb'śĄdôŢVĂD´´ełYÜĽy“Q †ĆFĽ°?~xä†ńúkßÇ;÷Fâ´´¶v€ó972";ÄĎú¦ďŰóä“°Z­śśÄĐń(€Ý{fľöă ¦ζúz¨˝}ôGČd2y;×ó˝č¶m÷CWľŚŻĘą†5’$řxăµďcddÉńńśLĹup0—ëĺ HTAŹY¸$Ę"Fŕś1iß=¦í&B-łpGŹđęAÇ€„(χBőPcđűýřŁ?ú#V•ĚĎ˝ššš’=i´0ęl|g˙‹šµ#Dű•C‡ćÔžśa Puuµś&ý0čĎ<˛Á᨞Ő=WTT ˘B;záá!١WßÇ|PB‘ľéuS¦Gą<‰ÇĎ»Řčž={äz0‘HźfTÍIŐ"©ęőGg}˝Ľí{o´ŹŁşşŕé]fŘLeĐO #c HŠřČőG?÷h&o=Š=_k–_żrčЬFQLަS|çŰĎtĚŤÉÉY—[,<:ŃHN?ˇˇAÓi ó©÷Ýwńťo[.€Şž^䨮Ćî={đěłţś1ⵓ'N ‰h±8ëî={4íUQQ1ëëŇVÉńqĂi[3=4˙ńC"ńçîÍŁş©(?ííťŐâ—ę)Ku@&“Ńěđ"lv8ŕp8äú%m>߼^·ľ~;NÄpâřqśÔÚ.C-«Őš·ťô×;yℜRev˝|íűáжŐ×/Ůö-5ÜņčžË—.ńC"• †# ăĐÁrŰÖ1ąŔçˇW"9^Ü.!ęĹAÍv§FFÎ}ţąRŹşŘ˝gŢ>ú#446ć„ Q®‘‘ł9«ů^}ývřD~ůK|uzÍ±ŠžXcääÉ“îďjŁ^{¤ë}g˙‹üň—xa˙ţĽ×3"Ö…)dý±¦ -,$DD¸ŽÜşu ŔôV‡Ď?˙b8Jhdä,Ţxmş|V«µŕőMhvřÉŚŽ„#555¬śÔ¸©Ž5«qäŁ$˛SwY!DD Gľp ˇţŃş˘Žéýé»hhlÔŚ±Z­xíŤ75ďSoýűĆkßGCcě«w=9úöüÓɨ°VŕČŃŁ€öżH_&''ńú÷ż‡ńxc#ęëë148H¤ONaiim›ő–łłQQQÇđáĐNś8ŽWľ¬éŔGú~H_꬗ÉÉÉśhvɓض­çÎŤŕŐńáĐápLou|ňÄq6<ë7ßY¦Ą­ oĽöýűőg2˝f.×SO7Ú˙ p8xĽˇAîžóʡCđ?ó42™ üĎ<ŤgÔ××Ăj­ŔĐĐ ˘}}rDĘl¶Š&$DD Gľ Ç~;cc¦ŻďXż{·T±f5Ş­«±wKú>ඤďé|:‹cżť`ăĂ2Ąîd‹©5zŻ˝ů†ÜÉĺĐÁčë˙Ą|íYżďôö"“ÉäŚ"¨¨¨Ŕ‰^=ř2Nž81˝ŕë‰ă9çÖďÇ+‡ľ»č÷ţúoâŮgžĆĄd‘ľ>›Ľ}ô(Ţ ÷"éĂÇçÎÍK0óöŃŁyŻ[Hť´Ţ HÔ_Ď÷ő±m[=ÎťAr|Éńq(Šěż˙z_?^}ů ÎťÁ;Sm¬V+^9tŁG-Śł˙úŻřűźýĂ‘Evćę Ś¦n˘ă«.XĘVÂ[cDZßN,éQ$Ł©,FSY61Y¦Äô—Ůp8Čd2Řěpŕ…ýűaµVî€R_żG~xTîv"¶—€W}Űęë‘L&qnddúďăăr$ÂtýGr×”s##Čd2÷Fp4bĎž=†Ł$÷Ę%ţ>S¨×ŃĐkhh”ťzÍ5Ş«qr †Hß/048„drüŢőŞ5SŤž ř᨞.úŢ ą¶aÝß»îĐŕ ††5ÁR}}=ZÚfMŁł{ĎžĽ;ĎĚĺz˝ďľ‹H_.%Ç‘ÉLĘzP?‘_ţ'ŽÇąs#ňÜőőőŘě¨Fk[›aŮĚÚDMŹv™[ű3 !"Z¦>ú—Áß˙ügňk†#‹ü!}ę./gŕ­±Ş­«<€¨˛¬ÂDö΂ß[µu5Ć'o!ĺ´”­śuĐ´XőCD G–"1Őa1Îq‹XŁŽěĚŁęë·5…ĆQ]]PŮ yOCc#ótÄż™÷ĚĘ>×ú—ĺšEΕÉdä2-­m v˝ŠŠ řÂźŹýEŢ{…©®®F `8˛ŘÖďwä÷ďšţíĆĐď&1xÉx·ĆÍh¸·ę‘Ź’¦×¨«´ŕëµU¨Síśsćę üϱTQ!FľëYĘVÂűŤ›¬¨*żż#Mvę.FS7 şVµu5ľ^[‰şĘ‡äµ·î`đňdŢ©=–˛•Ř[[…­•T[WËëŽOŢĆ©‹źáĚŐ†×j­['ďĄqsžŞß Ź=66اi> D GJ‚z}łŕŠJ "*FáČK/˝„‡z•łČ,«VjÂu¨çóŚ(©Z]¦ =Ś4l˛˘qSîPÔë×`Çú5xoäŠiSčő,e+±—C†ú× ąVĂ&+v¬_“łŁOUů*ě­­BUyŢ;{%ç¸jëj<·cŁ&”×­«´ ®Ň‚ÁËô}rMSżâuQ"ŻUqW˘e…á‘––Lď($v×y¶€ŃTřIJĂ‘ĄĂR¶R^ĚeJŠ™ĆMČNÝĹ{gŻČ‘;ÖŻ‘Ű ?UżÉÉŰsş¶·Ć.Ă‘ľŃkš‘ękµŐ­3 #Ę9rg®L—ł®ň!´=şUĺehÜTS?Ó”S3bJͱ± ^Ę ;uU–UŘ[[‰ĆMhÜT‰ě”颹Omß §;eďÜE]Ą±‹źń%Z&Žĺ:72˙3Okľ·ŮáŔłţ+‡0 ™7˙tâ8ţÉ`5i"š_ůęWń _ Ă‘\]Ą­uëä‰ŘĹ…›Îq䣤&X8ső&>şżjžNµ·¶ ?>syN÷—39ÓRÎ\˝śžŰ±QŽ&1šň’ťş‹Ž_%4Ł<Î\˝ěÔďĺtŁÇÖ=¬ąŹ˝[Şdýéďq"{ďť˝‚‰ěön©ÂŢ-Uüݤáú"–˛•řoCĺńÇĆř|šů,ťćĎxZtĺĺěüă?†Ĺba8B4OôëuX­VĽ}ôhŢĹY‰ Ń’óĎżúŐ¬’ţ×˙Âűżě—_3YXŤ›­Řj2ýE?MeâÖÔ‚mź;x9c8:d|ň6/gи©Ź­xN ›ŠiBúi.ę ăČGI\ż5eşęŕĺŚáőGSYd§îĘ)1"Ľ;˙Óá’Ůccđ>bż·NIĄá4ťß\ý|AFď,G©T 'ţńYô…xâß˙ű˘ŢĎp„(?±“жmőhhld8B HćŔăńŕ>ŔÄÄ+h úűźý>úőŻĺ× G^Uů*ÓŔ@-v1˝ Űű]žĚűšâSWůáČŽBŚOކcÍjÔUZđW 5ş<‰3×>ׄ!3íÎó›«ź›ľ–śĽť6©×;9źş•÷Üâřµ&íÁp$żuëÖ±č wóć͢ŢĎp„hfův"b@2555xýő×YD čĎ˙üĎguś>q»Ý GŘ„Éh śOgóލ/×oMô>Çš/áĚŐŮ]Łď“krŐjëjT[WŁíŃu¸ug®|ŽŃt6o2U–űaÇ×k+ŃTcËóŢéńEd yăßřŔć±víZ|÷»ßE<geТúřăŹ1::Zôq GćŽ -;úpä+_ů \ť|Q ^Ę. şň03Ťę(¸3roý¶G×iv˘©*_ď#vx±câÖô}r}ÖŁTôŞVß˙±]m|čéwČą_ţßóťAMM ·§/D± Ă"˘ůÁ€„–†#4u°0×Qb§ś÷pu•<¶ţaÔUZŕX3}ŤŞňUxnÇFüäĚďć-$~rćw 9áŃňYW-jűŘú‡óľ÷©íđÜŽŤđ>bçE´Lö1!"š_ HhY|@d8˛<µCĚŽjëjÓm„ŐĽ56åʲJî`3x93§r6n®@]ĄE¸čU–OÔLŢźcĆ'oăü˝PÇ[c×,ÚŞVWiA㦠ěXżUĺ,J´\ö1!"š_ HhY}@üÚ׾Ćpä&FtXĘVâ©í4!ÇcëĆţ]Ž‚ÎSUľ űw94Bµu5^ܵyúŮ™ş‹ľO®Í©¬bĘL㦠ěÝRĄ)«Ąl%ön©’#GÎ\™żÝlŢą"˙ţrC v¬_Ły˝qSžŰ±IŢgěâg|°–ůĎ>†#DDóżV""~@¤%#v1 ď#vXĘV˘qS7U ;uW†Ů©»č˝†¶şuyĎ3x9ĆMčx‰‰[ÓŁRŞĘWÉsĽwöŠfşĘlô}r ŐÖŐp¬YŤ˝µUŘ[[…ńÉŰČNÝŐL»I޸ŤŘĹôĽŐŃDöŢą‚¶şu°”­Äs;6—Ş­«5uuäŁä‚o©LDüŮGD´\0 !"~@¤ĄÓ¶SwńćĐEě­­”SaD‡˙7W?Ç/FŻamSFŢ;{É˙†˝µU2Qźc>BěÔ]Ľőë$Ľ5vę¨×™¸5µ`Ű^Ę`4•Ĺ7ëÖɵHÔˇĚŕĺ ŽŤĄŽńg ń"ÍÉh*‹Ožź·óMdďཱིWäŤěÔ]ŚOŢÖĽnt=}9bź¦ű4Ť*Ë*¬-/›qAÖ·>J~˙ŘŘ„iČ‘ťş+_×ßW—y6őeVu=üřĚô˙âÚů®»mEDüŮGD´ś0 !˘F:•Â;á~@,•ÁÔÝyŮĆv"{gQFR,Öu–Úµ‰h˙-d8BD´hŃă­ÎżÁ­[·ř‘JB6{“áŃ"b@BD ŽX,|ë[ßâD""ZÖţůWżŇ|Íp„ha1 !˘ŠĹbÁK/˝„ššV• †#DD o%«€ G¨1!"Z HhI«ŞŞŔp„JĂ"˘ĹĂ)6D´¤ýĺ_ţ%>řŕ<ńÄX»v-+„–µ'žxďż˙>†#DD‹Ť -ik×®Ĺ7ľń V•ĚĎ˝×^{ 7oŢä¨I"˘EĆ€„h Y»v-GM}¸ •<$DDDDDDDTňQÉc@BDDDDDDD%Ź •<$DDDDDDDTňQÉc@BDDDDDDD%Ź •<$DDDDDDDTňQÉc@BDDDDDDD%Ź •<$DDDDDDDTňQÉc@BDDDDDDD%Ź •<$DDDDDDDTňQÉc@BDDDDDDD%Ź •Ľ2VŃüř}âCą6Ί z@Ü˝z‘•@DDD"˘ůęl%ţwY DDDDD$N±!"šššVŃ2°víZVQ‰ă"˘9řĆ7ľuëÖáćÍ›¬ ˘ÔC=ŹÇĂŠ ""*q Hćرڽ{7+‚čÇ)6DDDDDDDTňQÉc@BDDDDDDD%Ź •<$DDDDDDDTňQÉc@BDDDDDDD%ŻŚU@DDDDD…ş}ű6Ć/^dEѲÀ„ víęUüŹźý+‚–N±!""""˘Ľzč!V=p,KQďç"""""Ę«¦¦_ůĘWpíÚ5V=0vîÜYÔűW(Š˘°Ú¨”qŠ •<$DDDDDDDTňQÉc@BDDDDDDD%Ź •<$DDDDDDDTňQÉc@BDDDDDDD%Ź •<$DDDDDDDTňQÉc@BDDDDDDD%Ź •<$DDDDDDDTňQÉc@BDDDDDDD%ŻŚU@DDDDDTD" .ŔétÂĺr}!eH§Ó8}ú4 ©©‰Ť˛Äë“íőŕŕ"""""˘Ax˝^DŁQÍ÷‰š››Ą ±X ^ݎPhŮ×÷áÇ‹Ĺćő|ú¶‹Çăđz˝hoo_ňç§…Ĺ$DDDDDD#<Źü^4Ekk뢍Çă9eXŽjkk‘H$066¶ çĚ\ësˇĎO o…˘( «hv‚Á ş»» …ĐŃŃÁ ™±X ÍÍͰŮlH§Ós>_<ÇÎť;çí|‹}~ZśbCDDDDD4ÇÎ1Ŕ Q§^Ż÷h#>˧ŘѢI$číí„B!Äăqtww#‘HŔn·Ł˝˝]vЉ>ŚD"!;Ëfënó& $ ¸\.x˝^řý~Ă÷‡Ăa\¸pˇPét˝˝˝rí—Ë…P(”łk,Ă©S§ĐÔÔŻ×+ĎqęÔ)@?Nź> żßŻ9V§žjáóů ;˙étÝÝÝp:ťFŁčîî–őĺrąĐŰŰ ›Í†`0s?ííí°ŰíčííE8Î{?ęňuww#ťNĂăńŔëőÂçóÉ:e™Źˇ··W† FmŹÇŃßß/ëęłĎ>ĂáÇeťëŰ[˝>‰XçĂn·›ľoĹŠšóéźGý3ŐÝÝť·Ľs9?y¬xĆ[ZZŕ÷ű5÷PLyhŽ"""""˘E‰DJSS“ŇÓÓŁČů300 ôôô(v»=çµöööśsęßëv»ĺß˝^Ż’JĄrŽ Řl6exxXqą\9×±Űí9Ç555)”žžEQĂăô]¬`0(żďt:›Í&ż¦őÓŢŢ®„B!Íy‡‡‡eť555™ŢŹÇă)č~R©”ŇŇŇbx===Jgg§@ …Bsnwu[ëëÁăńČ÷éďY]Łsݧú˝3ťOý<.DyÍÎźJĄ ŰIÜĂŘŘجĘCsĂ€„ŤčPş\.Ĺfłi:ł"€ÇP($;ő˘łn·Ű5çßןkllL%~ż_sĚŔŔ€,ÝnW|>źě¦R)ĹétćtĘE‘!ĚđđpÎőő`EQŻ×+›ÍőE7‰Öʍ‹P($#EQ”öööśĐbxx8ç~DŐ÷ÓŮŮiZ>őýűý~M;čËX¬±±1M€Ł.·(›>„ť}¨#}{§R)Y7úgD}>}đ ę[ĽĄR)Ĺn·+6›MÓn…”·Đó‹pM˙ě‰v÷z˝s*1 !""""˘%NtőťeugҨS.B ő ŃqÔ‡ꎹ"ÔWju,}>_Îk˘“Ż!"ÂýyDGŢívŽ`#ôżýőcvOâuuý¨GŤ°1*ٍkłň©Gáč;ýĹmçóůr^3a!Ţďt:sŢ/‚}xĄ*Ôu—ď|úQý)đJÝIDATAę÷…^˘Ľęv+öü"Č1:*•ĘyΊ-Íi%""""˘E#Öë9 Zе|>ZZZrÖć ŮJ7 "ťNĂď÷.Žérąä÷Ĺęë8ťNĂ]gĵÔçÇč·ňŰţę×9pŕ ««Ëp= qâĽúóµ··Ţ“¨?őkbý łű÷®^D¬k2SůśN§éÚ%łi{±Î‹zÝtuuĺÜŹŃýŹŤŤaxxxĆ5QÔeη€Ş¨OŁő`ň•·§§gVçkĚ0l+»ÝŽP(„Y—‡fŹ‹´ѢP‡ęFőá€QçרÚßßoz.˝X,&;©â:fljN­~ˇUŁN°QąÄ˘›N§Ótu(‘N§a·Ű‘H$d8cT6u˘.›ú~ŚÂńş(c8F:ťÎ[>a>veńz˝p»Ý8}ú4Z[[árąĐŃŃ!#Ő—a¦l\.Nť:…x<.ë+ťNËú±Ůlšz©íl6›¦>‹-o1çGľş×'Ĺ–‡fŹ#HhQ’¦¦¦śŽ|:ťÖěVcNh:ťF:ť†Íf+¨Ż>§(‡~”Š>Ä1A˘>Ź:°Đďś’Ż/ĘŻKÄ5Ün·á¨ ł‘ 3ÝŹ~DL1囯mkc±Bˇl6‰jkksFDąŽşLűöíCee%Ľ^/‚Á :::ĐŃŃH$‚T*exśŮ(ź|#?fSŢBÎŻŢ­fˇęŹŃä @ÜnwAŁ ňunŐj1DśÓl†ľŚú©4ů¦¶čË`4ĄĹě~ťNgÎ÷Ě:ĎfŁUŠ˝~ä+_ľ©'ła·ŰŃŃŃD"ˇéč·¶¶j:ůů‚˛ÖÖV„Ăa8ťNtvvb``cccP‰DBÖ‹ú¸|çË÷<ÎGyó…jFĎř|Ô1 !""""˘€QČŻóŻ~ÍlDĂgź}fz=őtq\ľő-Ě:µfS1Ě:ŘęőNf*›: ™ij‰ŃH…™‚"Ł×Eç\=ŠE-‹@˘»»‡6ěčűý~÷×kQ—×ívçÜG,ÍfC<G0„×ë•íˇĂŚęG>łş™ŻňŇ6F>ŚĂ‡Ëc‹-1 !""""˘ŔLSf OÔŁ ÔÓRŚ:úétZv,ŐkšĚÔi5 qĚŽ1› ":íbŤŁ{ííí ]k$_€”N§ ËQh@˘ ň•/ťNË·Ůhžbi0úĐČn·Ë{WżVČZ/FeëŞčŹÍ÷\Ő÷|•×ěüůFäÄb1ttt łłS¶Q±ĺ!$DDDDD´ÄÍ4Ä,l0ëä{<9=EývŃÉommE"‘€ŰíÖ,zi¶^DľŔA”Ýl¤ţ~D ŹÇs¦?Äăq´¶¶BˇŚp8\p»‹‘ú6R׫:ř×ÖשřúôéÓ9X4ŐŚ˘P׏ŮůĚžÇBĘ«-RěůEťvwwkîCL—ˇ¸‡Ů”GŚBˇYŕNÇDDDDD´Đ:;;ŠĎçËymllL uOšššJ$Ń|xxXçńx”ŽŽ% *v»] ¸Ýn%•JÉ÷§R©Ľ×P(6›Mó}ŹÇ“s}őą<Źâőz•ááaůz(’ŻĄŁŁC ň{~ż_sŤžžĂďëϧ~]]ő}ęëG?Š˘(‘HD @qą\˛ÎÜn·@éěěÔ#ŢÓŢŢ^p»µ‘ş.l6›¦ŢD[ŰívĹëő*===ň5Q.—Ë%Ďăőzĺs%ŽSS_}>łçqxxX±Ůl—·Řó«ďQ܇ú™Ő·±ĺωQ›ÓĚŃ‚óűý % ™vÖ›ššŚ;-÷:Řccc†pŃIśN§ …rB€¸Ýî˘Bł˘łłSv^ŤB—H$˘8ťNMŮššš4ť~}ýčC }G\ýş¸§Ó™÷~ĚęuxxXńűýŠŰíVšššäąEYŐoucTţ|rÚHÔłú˘Lę:S_kllL† ęú0 ŮĚΗďyŻĎ¶ĽůÎźJĄ”öööśgÖ¬^‹)ʍŁ`†f¶âŢ˙DDDDDD´x<—Ë5çu3ć›Řédľ¶Ě]H±X ÍÍͰŮl9SY‰jkk‘JĄf]ljDétzÎu‹Ĺćm‡ťĹ(oľóŰíö‚ëłň´´´ ĄĄEłö† Q‰Ĺbhmm…ÇăÁŔŔ@Îë@˝˝˝đűý9kŤ8p©Tި5HhqĹăqx˝^ĽPq•R'pĹ @gg§fťîînůőŘŘfqŃ`0X,†X,ĆŽ÷‹Ĺ‡etͲü Qéčîî–_ŰívÍtšžžžśé‹5Ą…fŻŘé:”‹ Q‰‰Ĺb‡ĂH$H$pą\đx<†Ű0•$DDDDDDDTňV˛ ¨Ô1 !""""""˘’Ç€„J"""""""*y H¨ä1 !""""˘’vřđaTVVbĹŠX±bĽ^/+…‰DÍÍÍhnnÎy-ťN#Źł’ Ä€„JV,CGGŇé4śN'l6ěv;+†¨g8‹AQÍ÷{{{Q[[‹t:ÍJ*P«€JU8řý~ůw˘‰ËĺB(ŇŚ|J§ÓŹÇĂJ*"""""*Y§Oź´´´°2čäőzs¦…‰i5N§“#˘ŠŔ€„ľP§Nť¸Ýî‚:sńxź}öl6[QżO§Ó8}ú4śN'\.—¦#™oÝq455Íézf÷bôńZ!×M$¸pá‚ě›]o¦ű+¤ Ô×*¤>ŠąŹůz– }6Äű‹-›¨BëZ]gĹ<·ĹŢŹ‹Ĺf|®É€BDDDDD´HŠĎçS"‘b·ŰňOGG‡é±‘HDqą\š÷ŰívĄ««ËđýˇPH D"Ą§§'çZFň^/_ó]ollLľŢŢŢ® çś; )Š˘(ĂĂĂŠ×ëŐĽćńx”T*eX'ú÷P\.—‰DrŢ?<<¬Pššš”T*ĄÁśúT×A!őoVĹÖßlĄR)ĄŁŁ#§}].—ὤR)%¶04¬gqĂĂĂJKKKNŰŚŤŤ–ͨ-EŮÂá°é=™ÝŹľME{Úl6EQĄ§§ÇđľBˇ266&ż6#žŃ¦¦¦’ü÷‰ -Ńóx< Ĺď÷+ˇPHiooĎ ÔÔÚöövĄ§§G …BŠÓé4=¦©©I tvv*§Ó©455)MMMJ(’Ż»Ýn% iÎ!‚›Í¦´··+‘HD …BŠÍfS(---_Ďď÷k^oooWěv»,‡ßď—÷&B#§Ó)Ë$®ŮŮŮ©ąžş3ěóůäűőçSekooW<Źât:•öövM}ŘívĂ»8§Q›é˦^ŚÚ«˝˝}ŢÂń,Ůl6Yę{Q‡ę÷«ë¸˝˝]Öł×ëÍą†8—¨3qś¸—Ë•S6uH&ÚZß>úzKĄR2P1şŇčźh Č˙ÇÔm%Žß7 R©”,ŻYHĆ€„hžř|>ŮIëéé1ěĽë;č‘HDvŐťCŃ©[ýońŐŁôQuYôáŠúzúsŞŻ§DçŇězęßčëŹu»Ý¦‘čđú|>ĂάľŐÇčG¨;ç~ż?§ŁlÔ Ł~Śę_tĐŐm6Űöš q?n·;ç^DűŞĂń~u] ęęs‰ű7:N}?ęv©}Äł®o:廸©ďGýĚä)"‚ŁD\[}ţRĂm~‰hŃ5ÚŰŰĺ.B0”kbµAŇé4öíŰFŁ9ë0Řívyžh4ŞY÷BđűýňÜjbÝ ýîçÓŻ/ˇľžXçA\Ol§jt=uy:;;s…ktř|>tttÖťúމÚŰŰá÷űsę1ßÚâžťN'şşşL×Q_ÔGGGNý‹k»Ýîś÷wuu¶—¨u{ÍF"‘@ooŻ<—ţ^l6›ć{.— >ź]]]9çS·u"‘ČYĎĂfłĺět¤~Ômźáýx˝Ţśű×4*ŻŃőÍę^„‚Á`°¨~Í‚č¸555™vÂÄoÄ1ĘŕÂ… X±bEŃ×ňz˝†ťZłß˛˛«Mľóµ´´ä}ÝčĽęđĨ^Dťč;Ľ˝˝˝FŁH$šÎąšúőŁmŤŐíŁŻłzÔ›m{͆ŠŮ©EŚ”ÇăšQ?júgB'Ĺn‹ĹĐŰŰ+ŻeD]öbwžď×oĺ›ď<^݇Ö< ±X Ńh6›Í4¸d@BDDDDD´ÉL[ęŞCńu!ŰĎŞĂu`‘Żs©ÄoÖóm©j4˘c¦ëĺ{=_‡V=ęF]¦}űöÉN§MMMňxŻ×‹ÖÖV¤ÓiĂ‘>ź/o ŤŻý iŻb¶­5 ;ôíO8–ÓĶą.—Kţ‰FŁčďďĎ™ĘdT˙FĺP·_ľöńx<Ř·o_Nű ¦Đz1kł@M]WbJ>|ňNąb@BDDDDD4ŹÔżµ6ëŔŠĄč܉NcWWWQ#ÄqfÇ$ęifŚF.Ět˝|ť_ł˛¨_SŹę‡Ă‡Ăp:ť†ë˛¨GFŤPČ7őCŚ>°2 DÔŻ‰÷Ű^sQČuÔkËtvvޔӒŚÖóČZçAĽG´ŹÍfC4Í)ź:t1*»Y࣯kŁöś)ĐQOťeŤĹbp»Ý¦S˘J i%""""˘E1Sř ÜTwÔD0`¶^E8Fssłfm ŃqĚ7ŠÁhqK9‹zúŃ ęé:F×S/ňYĚłş’‚Á á1âuu¨’ďžőíŁ>§Ńú.ę{mm-V¬X!ŰÇh=µh4šÓ^seôl„Ăa¬X±B¤Ói¸ÝnĂpD= ĆhŤŮý5Wl6›|D{AĂşVFĎ‹Q]GŁQTVVjžź| ´ęŰ^MĽ–N§5ŁG -uÓ((żÉÖŻ :«"<ŃźóŔĹbEýÖ?ßoŮE'Wż[‰čPŠ)ˇPHvng32è,ůÖ')t4†zVuyĚĄĚBqŁâŔH§Óš5eÄűŤ!ŤÇăŘ·ob±ŘĽ,*Ö ŃŹLRwüE#ęŘl]Ń®úzÓďT¤żŽzqÓbŰG_"¸0şŽŃýµg!#^Äu[ZZH$4ÓJwb'""""˘…ÖÓÓŁPśN§błŮ”––ellLS::: %‰hŽSl6›@ Ęđđ°266¦tuu)v»] ´··kŽńűý Ą§§Ç°,‘HD 455ĺĽ6<<,ËŇŃѡ¤R)EQ%Ť*ŹG ¸Ý'^…B¦eŃźSe“ßëěěT(.—KPEQR©”¦Nô×Č{qNźĎgZ˙]]]ňZÁ`P Řl6exxxĆö ‡Ă˛l~ż_sŤT*eÚţ…nllLó܉çѬ=›ššJKK‹‹Ĺ4m#„B!ÍýŞź­RÇ€„\{{»ěŠŽ úŹÍfËhN·ţŁĐAt(Ť:‡ę˘>XQwpŤ®'ŽťÔBŻ'^Ww–őőbTÖŘl¶ś×Ün·aůBˇáý‰ďéÉBBłúp:ť†÷\l{‰Îţl:ë˘ţôšššrÚI„@F: Q×*•2¬sźĎ—sEQźĎgúüäk‹BďÇěúđc¦ú6ű Tq‘V"""""Zpęˇ˙---F,“ÓcZZZL§]´´´ Ź#Ť"ŹĂn·Ăĺr™ăóůŕóůL§¸\.„B!Ói@^ŻW^Ofu˝|Ż{<Ó˛Řív„B!ĂkĆăq„ĂaÄăq$ Mí˛3Ó={˝^yŹfő‡‘H$`·ŰáńxLőS7DůÔínÖĆ~żVSoşţ˙öîđ8m¦ŤÂđÉ×r’+©ąp@&€+U¨ä +@®ŔKl*Đű#ßj¬c6÷5ă™±¬$“:óěł uon·«$IĽ×éz‚äy^ďšďÝ_FÔ<÷ ęíp‹˘P ­÷3Ď󝿏»_Qí|§Ţz=mß›Ůl¦(Šękńíšä–u:ťÚ ć+řVUUĹmpŇŹ˙÷~xyyů'ý'đµŚF#ApVÍB'“‰Ň4Őt:ýRAÂÍÍŤŠ˘hÝÍç’QAŕ¤ţ´‹ .[š¦;»ť‹c[3VÍm} G8©×ě¬Ëd­ŐjµŞ—­đ˝=Íu¤i*k­ň<—ĶľmH|Č&[‰b_őCű9ůJUO®Ś3źĎůżŘ‚$NŞ,KYkëf—ŔąłÖÖ ?{‰1¦ˇŽ5Ę €ţÇ-—Ž€\<pńHŔĹc›_źÂÓÓ“$)ŽăOłŽ1F›ÍFa˛{p㍠pöŚ1J’DI’|Ş­‚gł™’$Ńb±ŕŹś9gŻ,KIż«G>“ççgIR’$ü3ÇgĎ$Ýn÷Sť÷z˝ćŹ|T8{EQH˘ŔéPAŕĂc”e™Â0Ôh4Rš¦Ęó\Ňďęétęí1â–Şř*HŠ˘P–e2ĆH’˘(R’$‡ďµÖ*MÓz~cŚŇ4­+T’$Ńt:=·X,´Ůltww§ ”eYÝW$Š"M§Ó&¬EQčééI˝^Żv^3ż›ĂwŹőĽîŁ(Şçę÷űź®Ę8|ů|^IކĂaŐív+I;?ATëőzgĚz˝®Ź7m·[ďg¸źn·[m·Űť1Ë岞>źWAŚ ç펭×ëÖóŢź«ßďW’Şź?ÖŻ­V«7Íż^Ż«(ŠĽó.—ËŞ×ëU’ŞŐjĹ— x#–Řř0®R"Ë2UUĄ——UUĄív«8Že­ŐŹ?Ľcz˝ŢÎë···*ËRqk˝^«ŞŞú3ă8VY–šÍfŢĎz~~Öd2Ńt:Őv»UUUZ.—’¤<Ďe­=E‘nnn†a=ßv»U†˛ÖěTă«zqK…Üüwwwőü«ŐĘ;ż1F7772Ć켽^+ CŤÇcšÁ˙ €óôô$I ĂPEQÔËR‚ ¨—ÚEá (ö†˘(ÔétTĹα(Šę`ÄŤÝ(ʲTžçšL&őr–Á`p0góßĆ ‡Cĺy^ĎA=n?ÔpK~šˇ…ű,7˙l6«ç÷˝O’Ć㱬µ‡zxx¨ßßívë0ĹZűévřÎ  €ăü›úNE Ăđ đ$I’¨Ş*•eŮÚŻĂÇUZL§Ój‹ć_X†áAEJóxłI۶Ä. ‡G«=ܵ6 ‡‡‡÷EQTĎAőđ>4iđ!\őF†;ŐMWWWÚl6;ŻąPÁ¸ĆĄűŠo×cL‚ŚFŁĎj!ľ×GŁŃ«Č6›®úćźL&GÇ-Ű Ţą%íT”x;´˝g˙÷N§sPˇq{{[/cijö*ńő˙ăř`Ç™ćńýósŤ/Ôi†ľą|ˇI†­»ńěŹq××(IŇŻ_żĆř{$>„o)JS3ěh.1ŮřwMK]_W]ŃÜJ÷ęęę`Üźš¶^'Ňď€Ć@´5=VUŇvřćwáL[ő1Ć;Ŕß# đ!ÜĂ~×cŁßď×€/ÔÍfu8˛żsڤşŮk†Ţľ m‰;żż UÚś×V•4ův˘éőzGď›»ţý€ŔߣI+€“łÖÖ"ľŞÖZeY&i·?‡ŻĂ·;LSš¦c$ŇüLw^ľ­mˇĆ±Ş“×T•řîOsţý^$űs»kĄA+đ~$N®Ů[$˲ťß­µő’™~ż˙Ç€ÂU…¸ ˘ů9ăńŘ;¦­—Éţńý]g|UMľ˛ľŞ“˛,˝U%šß…EY–í„$EQÔ÷¬í3ĄßaŃýýýÁ˝r‹Ĺ»Ž_ Klśś{Ŕ‡˛Öęű÷ďőN2yžËZ«8ŽBép©Ěd2Q–eʲL›ÍFI’ČZ«<ϵÝn˝ËR^[ Ň 5¬µGÇk »?¦­WÉţµîĎ“$‰†Ăˇ˛,Óx<Öýý˝¬µő#WuÓv].`™ĎçŢăăńřUÇţüI• ľ<*Hś\34X,ę÷űZ.—Z,ŞŞJwww*Šb§i[8Ńívµ\.†ˇŠ˘Đl6Ó|>WŻ×“1¦~oV©´íPs, hî:ăk’Úv~Çz™´Çú“, -—Kőz=u:őz=-—ËşŮk[ULłęăŘ®9mÇ›÷ʰ¸ßŞŞŞ¸ NéúúZĆ­V«Ą'ďyřv˝;.ń~2™(MÓÖfµţ$NĘÓÚXő˝ÁF_6™ÍfşľľŢiZŰôřř轧ކ€ŔIµ5 ĹqQÉămj{{{+cŚâ8®{ąx'ő§ţđŤFŠă¸nj{uuĄoßľéęęJyž+ C–Ö˙»Ř8)k­z˝^ÝTŻW–Ą‹E˝ÓŹ1FQi0h4y›Çx𴀋ÇpńHŔĹ# Ź€\<pńţ·MÎb¬*IEND®B`‚ceilometer-10.0.0/doc/source/contributor/3-Pipeline.png0000666000175100017510000013312613236733243023014 0ustar zuulzuul00000000000000‰PNG  IHDR R`CObKGD˙˙˙ ˝§“ pHYs  šśtIMEß  2ÇJ IDATxÚěÝ|Tőťďń·&f2a†ü$™! “t –° n ( ]AAş’ŞX…îcmq˝ZVí]ő¶BďăÖßm÷b•âţ(X~x]°pďâš T ů&$ĚHHläţ1ĚÉL2 äÇLÎëůxôŃ33çĚ9ó9“€o>ßď÷˛sçÎťÓşśćFH!!`rŃ=˝ŘĐĐ ††ŞD0‡Ă!‡ĂA!@·ş ÷ď߯M›6Q!`Xľ|ą¦OźN!@HÝ7¦>řű=čItovş˘µU1mmT m11úÚbˇŕ‚z&54ĘY[Kµ€RëtŞ>ŐI!Ŕ±ş1`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`rŃůćîyó”Q4G’Tµoź>ţÝÖ^7ç$IÇvíVĹŽý~]»]­OĐsIn·ň~¸J’tđ—Ď©±˘˘Ë1±rĄÜóć=˙ć=+z’ÔtěZ=ŮśNŤ=Z‰îLă8˙>’T_Vň¸PüűĄ|+Wm^Ż?­čq?›Ó©ÄŚ %ŤďVë>( 9ü4ÉíÖ#FŹG$'źÓżżźŻżúŞÇů!“Ün%çćĂ©«Jöő¸ż˙<ź)đţ4~Zˇ“‡]đóööľ'Ťw×ć­«W}YYŻö°9ťJÎÉ‘-%9č¸ŔűŘÓĐ^›Ó)WÁtăÜÝÝ‹Î÷Ä˙=ëm]üÇůďç#F„¬q_B´đdi©>ŢjÓŐ‹n˝äaÇ®ÂBĺ­Z%›3ĄËkŢÚ:xć™ !Á‰™™úÎů“ýćüĎ_Hň…—‡6n Úç­V«®´TEE]ć0ô'ů†‡:®łĽU«”1·¨Ë0ŕVŹGĺoĽˇ˛Ť›şă_mąób)’”]ě;öżž!hő缿ýˇ$I’{Ţ\c5f˙h˙>őee!‡§äć*íÚ.µÍ..–·¶Nź{NU%%]Žó×ĺ­VK’ţňÉ'B~ŢÎ×Ü[»]oýkeÝzk·Ă©ëKKőÎc? DZěö‹ĎHҡŤU_ZfÜÇPĂÇ»»ŮĹę±.ţz—mܤ“ĄĄ!kŰ›{™ävŐŘ˙=s*íŹCÖ¤§ë4¨s~¸i“\…˛Ą¤(§xąŞKJúÜíÔyń'Ń!ćLŃw~±AžůFčŇćőŞľ¬L#’“eKI1Ž“¤3őőÝžëL˝ŻÓ,f„Íč&ěË»]µa˝1ĽÚ[W§úóáNrn®l))Ę..V\rŠ<óŚq\JnnP¨řý×b±Ű•żöÇjőxڍńS_-ýá’·®N_őđů:sĎ›§üµ?î±¶ł~údPm;ť›kÜ˙ů{şćŢę\ÇŻęëŤkKĚĚ4V™žůäÚ}>¨ôł9ťĆb.ţăO=ż2uNޞ‹‹/ŘUůWÖÇűëc·÷ş.IăÝ}Ş‹˙^ú?[›×k uoóz%ůÂY?}ŇxîÔŃŁŞű T)ßĘ ş®˝Ź>FPz4¨!a«ÇŁO?c`}vś@ťŞ8Ş˝Ź=4Ä4ÉíÖô‡V˘;Sůk¬ĆO?UcE…+*´űď {ł€GĹŽŞŘ±#(´ëËÂ×.ďńŕsĎéăßm5^łŘ횲jĄ2çúşý>۵ËčË_»V’/řyëŐ]¬Ŕ0ďęĹ‹ŚČżęňßěÝ#I:şsWŻ»5}„?î¶¶6§Słž|ҨmwA_Nńrµy˝zçŃÇ‚ş*­ěâĺ} ­ÜóćuüxëĆç umÉąąJr»j6= Ó®s—ävkćOźěvžLĐk±ŰC~.‹Ý®ü‡×ĘUP üµ?–·¶6d7©« ŕ‚uÉ[µŞË˝śó?ˇäśť:z´Ëw/oŐ*IľĐňťG3:(môŐä¦W^VŚÍôľˇ úÂ%uĄĄúxë’:V;î­)?ô…"m^ŻŢZ˝şËtŤÚűŘcF§Uö÷‹‡¬°»]W/şU’ttçΠ€Pň¦ď?÷Ľq­s‹$ůÂ:˙pÔ÷ź{>d‡[ĹŽFGcŕÔK1qń"ٶťBIňÖÖę­Ő«Ťëő¶P:a’TUR˘Ł;w÷˝/˛nýkß5ÔŐu ý×V†&Ťß±ívC„>÷\—NżĆŠ ˝µzMçîŢęsůoă;×ixz_ęjř|OüűŰą«Ëkom­Ţîy_×âÉúWĽ¸|(Núá¦MňÖŐIęýjÇ6§ÓŘďhPÄĎ[[ktMą †,qŰźśE;kőx´˙égtđąçŚ}ęJKőÖ«/8D´îŇţ˝Ţ‚IľĐŞ»@Z=ÝąKŇůáŇNg—}NU ŮI'ůńëË}y籟č­VëťGëvźŔU§ă¨É;×Ř>vţÚC}güA]g™Eľđ¶ľ¬¬ŰĎŐęńÁw¨9}ź˝®WuIéćřždÝú×!ëY±c‡ţeÉRíţ»úeÁ0| IHčďľňËxíŹIĚěXa¸úC'O– yÜ` Şzšď®Ş¤D˙nkĐ>uĄĄŞ*)éěXÎĎ—SĽÜŻúC`0X»Pk޶m_y»=6pţÇľÜ˙ŢPuLr»•9w®®˝óÎÇúW…®/+ë1( őą-v»Ń­×ćń(9'§Ű˙]¦s!ëé×ÓÜ'K/.đ ěĚĽmŰď5ë§OęęE·† oz=T'ö;îíjlj݆ţˇťÝé®n0Ą|Ëőeˇ“@»]ir‹r † Ő.°ö‰nwźćş»Ôű’’›«´‚%ŤwkDrJ݆玝|Ń×dş şCĂÁűĎ=Ż$÷xcQ˙5ćýđ‡ňÖ֩ޤDźüó?‡ĹĎoŃCyň‹]í¸/+öF"÷Ľyú‹•÷‡BZ_V¦şJeKIVfŔPÚÁŇöŐW~N‹Ý®™O>2(őŻ}˛ěPĐĘĚ~}ťçŻ?\ał ĘyZ=mżçąçÍSZa1dÜ˙ąŻ^ĽHW/^¤Ź·Uź{Žßv [Cöeµă݆±&ffv;ż[¸ém'›_’Űm„]m^ŻŽîÜĄę’yëë:Âú˛ŕK˛ Ł>Ő‹@·?„őee:¶sW—U„»b[_VÖ«Ĺ].ěu^ť:śřWá–Îw[*%7×č0Ľzń"5;Öeѿ衾€PĂŽC9őiG es:ĄBÂŃgš<`źëR%çäČćL‘Ĺnďv><÷ĽyJ+®S*۸IV ~ëŐÝvVúçÚëŢ€ÎĚŃąą=°aÜ×PŰŔŐ‰«öíÓŢ˙öhčëJĆzkk•ś“Ł„ŚŚĎŞK1°C5·ĺ…tţnŐ•–×íž7ĎśÓ  @·.‡‹čĽÚq(uĄĄFŕ—Q4§Ç÷ó/ęá­«ëŐđĺŘe×Ó\vi…r«đúC¸SG{ĽöŃŮŮýv­ŢÚZŁţ®Â‚÷ő׾Íë”n΀żOzčäÝÍśŤU%ű$ůÂ4÷Ľy!÷±ŘíÝ~§ŞöůŽO»ŔJŮÓ^«żŮ»G·műý ¬¨í*,4Î×]eĹŽňÖÖu©#@gav^í¸;Gwî’äëúę.đązń"c:˙ţˇ tSURbo×ŢygČóĄäćóČůŻŐ„&ş3»˝Ćüµk{uý} †üçOr»uőâE!÷qĎ›gtÜőTŰ’ÖMŘšävkâ­Ýí}8UqT’ô+ďď˛ň°ĹnWţÚw[Ďę€ń/V®ěöüţů!›ŽëqĺţręčQc»Ű•ťÝnăgˇńÓ t':\.$pŘqw>Ü´Ék-íŹuev¶ŽíÜ©ŻNžÔŃŁuőâEF×Ţ©ŠŁ]VK>0¬vć“O¨ľ´TőĄeÖçźoŃćLŃŤ/ż¬C›6éTE…®1B)ßĘUÖ­ľĎÚćőŞüŤ7$ůB)p8óÉ'tđ—Ď…®‚eΛ+WaˇÚĽŢnCŔSG•čÎôuÇť;§Żżňę“7ţąÇđęĐĆŤ[X¨Dw¦ňV­RrNŽ>ţÝVٶsçĘ=Ď„yëęôá¦Mö˝đÖŚ˘9:UQˇę}űÔęńČćt*łhޞn˝U»˝Űšěúi}çd±Űőť_l8V(.9E)ąą˛9Sş=¶bÇcQ÷ĽąJďÖ'[ßčö>ü_ż:$çä(íZť©ŻÓŃ]»ĺ­­UŐľ}ĆuIұť;őőW_éŠ#ds:•˝|ąq]źś˙~„N¸Úq(­ŹŢZ˝ZßٰA‰îLąçuWŞöí Ů™8%çć*97×·Zđß=0 ź§®´Tžůš˛jĄlÎMxm—}ĽuuzçŃÇŚŻbÇeĚ-RrNŽ’ssuÓŻ^ ůůŽîŘ©Y?}R’očiUI‰ńúń’ŁŃßŘ›0ô­Ő«5ó§O*9'G®ÂÂäëËĘ‚®w0ĽóčcFČކm^ŻŢz`µ˛ż_|ţÚ ‚âĆŠ ˝ő€ďłŮRRş|¶Ź·ľˇ6Ż·Űˇîž~Fm«V*sî\%ąÝ=^Co˙řw[Ť…WüßőŻęOŞ˘¶Vž~F¶ ü,´y˝Ú˙ô3A‹Ţt6 !á™úzŐ—•IęÝ"ţaÇŮß/6޵Ďö{îń-řQX ÄĚLŮRRÎĎá÷©ŽíÜŐmć­­Ő[¬ÖÄE·ó¸ů‡a¶y˝Ý^kŕkťőtśä ýęJK5ńÖ[•4Ţm>őeeŞ*)ѱť»şn»˙îąçÍ3ÂBI!?_Őľ}Š±Ů”čv…„‡6nÔ×_y‚0˙µů?o¨á§­ŹqîεőÖ×é莝Aç äŻAOĂZ/T«îÔ•–ęÍ{Vhâ˘[•ś›+[ŠŻóďÔŃŁA5´ GcE…ţeÉRą •čîXôĄş¤DŤ=®Ýęńh˙ÓĎčŘÎ]Ę[¤$÷xcĺŕú˛2Ő}PŞň7ŢśöTďŢÔĄŞ¤D{}L™óćvétěüłcł)9'ǨMO×č˛sçÎť ő¶mŰ´}űvIRrM­śt"!ő´˛t ěâbĺ/W›×«×ç/ p`X¨u:UźęűÇńůóçkÁţžB»ś`8Ë[¤żŮ»G7ýę•{ńŻÝ]Ç(ŔpFHa­ţßĐě$·»ŰŐ‰ó×®5VöŻf `&Ń”ĂYcE…ęËĘ”ś“#÷ĽąJÉÍŐ±];%IWڰÉUXh„Uűö©bÇŠL‡ĂŢ;Ź>¦ü‡×ĘUP ›3%ä"%o}CůKŠL‰Ă^«ÇŁ˝˙íQŮśNą tĹ›bl6µy˝:S_ŻŞ’V¦FHÓđÖÖęăßmĄť°p `r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&GH!!`r„„€É&M  ďĽ6›±ÝĄł#â( ě|ełŰź~ú©¶oßNQŃ&L`l[­Vą\.Šý„zĐbµĘk·éŚ5N_[bÔ2b„Ú/§ yĘËËU^^N!0ě89ą\.ą\.M0A‡Â@@€6‹E§IňŘlA`|śEΔDąRŠłZ4ÁťjgµČ•Ę_DZůŃc»ĄĄMU'ľTĂ)Żľlôh˙ľµśm•$#4ś0a‚ňóóǨ¸BB¦×f±¨9~¤ť=˙Hç•ńş.+MYî1rŤ%G˘ŤB ±¬ĚÔ Çą“Ç=®ŞiPŐ‰9ZŁňŁGUZZŞ-[¶(77WąąąĘÉÉ!0€n0-ŻÍ¦şT§Ľvß|MÎ+ă5cú$ĺçŤWśŐB"Ś+Ő!WŞCÓ§úć.l8ĺŐ˙}§LĄ‡}ˇŐjUnn®,XŔdčä˛sçÎť ő¶mŰŚ‰Ť“kj嬭ĄZ†…ćřx}‘nśÚ,1J°[µüĆo˙Ę €áÍ•ęPń’Y:ÓŇŞ·˙řˇŢ~÷żtŕŔM0AĹĹĹt0=:  {m‹N¤ŤQsB‚âm±şĺ¦i„&×VËŮV-X°@łgĎfÎB¦E'!€aí‹ŃWŞnĚE[b4V¶ĺQ(ÎjŃ‚˘<Ýđ—×jËż˝§m۶i˙ţýZľ|ą˛˛˛(ÓąśŽÚŁ˘T1aĽN¸\7.Eë~|!şłZTĽd¦ÖÜ?_çÚ[µ~ýzmٲ…Â0:  ;^›MźŤw+ÚŁĺ]ČĐb\PVfŞžzô˝ţŻűôöŰo«ĽĽ\+W®d®B¦A'!€aĄ>Ő©Š¬ Jq:ôŁż˝™€}rű-şżxŽľ<©'ź|RĄĄĄ€)6*ÓÓUët*?o‚úáBąRůW_ô]îäqzěÁEr$Äé…^Đţýű) €aŹ@ÄkŹŠRů¤ktj”CËoźĄâ%łgµP\4G˘MŹ=x«ňó&hÓ¦MÚ¸q#E0¬1'!€ÖĄŠ‰Yj‰ŤŐňŰg1ĽýŞxÉ,IŇ|Ź‹‹) €a‰@Äň„çFÚ´ćîąĘĘLĄ(čwĹKfÉ5f”¶ü›oŘ1A!€á@D úáÍĚ?€uÌɲĆĆhÓë{%~ D¤Ď'ŚWKl¬ÖÜ=—€búÔ jh<­íoËĺŇ 7Ü@Q „„¨ÍbQŰWH’Zâ¬ú&:ř6ž˝"F_[bşg÷z»$—ËEQD4BÂ!r©ˇ sl†$iLş»Ëk©ă2CÓvö¬ľ¬;ôś§ąQ§›ĺij”÷tSŹçôÚíAݱgÎČîńĘćńbŔµY,ŞëRćŘŃ*^2‹‚`HĹY-zpĺ|=ňß«Ť˙IŹ=öŠ ˘˘ćřx5%$¨9!ľWˇ ctŞRÓ342q”ÉNYb­•<撮᪬ž»ŻZ϶čËúF x˘˛B ő5jk=Űeßłqq:§/’GKň…†Ž†Ĺ7źVLk+7ýŞ*ă*EÇĆčîżů+Š€°gµ¨xÉ,˝°q·¶mۦ P‹p€µX­:9zôAÇčTŤJIŐ¨”49’ť!;%ÖjśűެɚŞ"I’§ů”ľ¬;Ńcpx6.N'ââtÂĺšśÔШř¦&: qÉľH-O\śîżc¶‰6 €°‘;yśfNŇöíŰ•››Ë°c‹p´Y,úâĘ+Őś ¶« KRŚ%VWeMVjş[WeM–%Ö֟ɟ({|bPpřeý }Rú˙TSyL 'k‚öďšś®ř¦&Ĺ75+©ˇ/ú¬ĹjŐ‰´4ĺLJWîäqagAQžJ?ŞÔĆŤ˙¤|Hqqq@Ä!$ěGŤ‡NŽľRg»ůÁ1:UWeMÖU'_ň°áp0*yŚ ‹ľ+É7LůłňŹTSYˇĎĘ? ę2lNHPsB‚ŞÓĆ(ľ©YÎş:†#Ł×N¸ŇÍ<„[qV‹Š—ÎŇú¶ëí·ßfŘ1€DHx‰ÚŁ˘ôeňh5$9Bv ÚF&説ÉĘ™6KöřÄa[K¬Us¦jbÎT#0ü¬üC}~ä°±Ď7ŃŃ:5ʡSŁJü˛AކŮĽ^ľDč–×ć[ÜgţĚlĹY-a++3Uă3RôöŰ˙W7ÜpÝ„"!áEň‡őW^r®Á¬ě<]•uí ŽCOó)}Vţ‘>)ýAC’ýaˇÍăQJM-a!BŞĘ¸J‰ńqZP”G1öľżt¶ůďŻjË–-*..¦ " !aµY,ŞMI ą‰md‚r¦ÍRVv^ŘĎ18Xěń‰Ęľn†˛Ż›ˇ•úĎwv©öř1ăuŻÝ®Š,»bZŰ”R[ËĽ…04:j˝â ­şc6Ĺ@Dp$Ú4»p’ţ˝ä€,X ‡ĂAQD BÂ^jŹŠRuZšNŤęúKŢ62ASgÎŐÄś©ŞcŇÝs§[žćSúĎwvŞüĐAăµ6KŚŽŹKWťÓ©±źNg!?o:áJSćŘŃĘĘLĄ  Šňtŕ`…6nܨ|‚„„˝đĹč+UëtvétŽÍĐÄůßݵ IDATśëűČź¨Ů —jęĚą*{oŻĘ4:iłÄ¨"k‚lŹĆVg“jĺP{T”–.šA1Qâ¬-(š˘-˙¶_UUUrą\@D¸śtĎkłé“«'ę„Ë:ÇfháßÜŻ[î\E@x ěń‰*,ú®ľ÷·Ź*oĆĹXb;jo·ëO“'©>Ő©ö¨(Še2 IIr^/W*Ă3yňóĆK’öďßO1D : CđĎ;Řyh±md‚f߼TcŇÝ©Yb­š:łHŮßţKýç;;őá–ŻŐ:ťŞżňJĄUź`ľB“h±Zu6.N3¦O˘HqV‹r&ĄëŔşýöŰ)€@HŘI}ŞłËŠĹ1–Xe_÷—š:ł K¬U…EßŐÄÜëT˛ë_ŤNľ‰ŽÖńqéjt$iLUµ¬--kk8?ął˙__€H4}j–ĘWŞ´´Tąąą@Ř#$<ŻÍbѱګt6..čů¬ěH@üó~ďo•ctŞń|sB‚Ož$ŻÍF‘†‘G’âm±„„ţjfŽ$éŔ@Ř3mHŘbµęÓ ă'±ŤLĐâk”}Ý ľaĆź¨Űî}P×N-4žű&:ZYTźę¤@ĂÄWv»®™8–B`Xp$Ú””hWyy9ĹöL9Üř‹ŃWŞÖé ęĚĘÎSÁś[d‰µň­c…EßŐUŻŐÎ-˙¤¶Öł’|+ źŠŹWƱĎÓÚJ‘"T‹ŐŞö¨(MČLĄ6˛2ť*=|„B{¦ë$¬LO× —Ëc,±ş~ÁÍ^¸”€0BŚIwë{ű¨śc3ŚçÎĆĹ铉Yj±r#UËů9Z\cÆkĚ(µ´´¨ˇˇbk¦ ŰŁ˘ôYfFĐđbÇčTÝ|çJMĚ™Ę7!ÂXb­şĺÎU*s‹ńÜ7ŃŃútÂx5:™"Ń™óŻ+•ű€á#-5I’TUUE1„5S„„íQQútÂx5'$Ďeeçé¶{Ô¨ä1| "Xöu3´xĹĹXb%ů‚ÂăăŇ #PKśUă3R(†•¬óÓéwĂ>$ô/Pr6`ąůĽs4{áRîţ01*yŚn»÷ˇ ŐŹŹŹK׉´4ŠAľ˛ŰŤż@ĂÉřŚ/ö†uH* Ľ~ÁMťYÄťfěń‰şůΕAAáÉŁU™žNq"€×f“䛯n\©UWWSam؆„Í útÂř Jć.ţ>óc–X«nľsĄ˛˛óŚçNŤrč“«'Ş=*ŠE«5†"`؉łZÔŇŇB!„µa6:ú,3#( ĽůΕş*k2w|łÄZ5{áŇ  đl\ś>ť0ž 0ڵŚ8ż˛1‹–`r$Ť”ÄĽ„Â۰ [¬VU§u,Fâ_ÁJĚeöÂĄA+űB„§?_î p㬊€aÇ‘ä›^çĚ™3@ŘV!ˇB!ˇąe_7C×/Xb<>Ç…!D—Ň9 ڱÄjŢíwÉkĺ.›Ę=Ű^“䛣P’Ň++)ş(?ZŁ÷#ťiiĄŔÉrŹŃüďü…Â̰ Ű,–.áÍw®”=>‘; MĚ™ŞšĘ •:(‰ 0yí6ŤĎHňëxűŹŞě0ß ` 9Z«üĽ r$Ú(Ŕ4˛2S}9˘¬¬,  ,Eüpăö¨(˸ŞK@ČcęĽÉ©Q5:X$Áč Ç—Ť§)f"ş“°=*JźNŻłqľ•Q ѓ٠—J’ŃQx|śo~¤†Š.n›ëRZ2ÓýeËÎ*U×·P LEtHXť–f„’T0çBôhöÂĄ:ÝÔ¨ÚăÇ$ů‚Bë™3˛¶đ®––lUÖ8;…ú‰ŐE€0±ĂŤżHmĚ-'I×/Xb,RôdŢmwÉ1:ŐxüYf¦ÚŁřŹW`^¶X­:‘–f<ÎĘÎ# DŻYb­šwű]бÄJ’Ú,1ĆĐc3Џ°=*JźefŹŁSŤąć€Ţ˛Ç'}ošTëtR`J—®6KŚ$ßB%ónż‹»‹rUÖdĺÍc<®OuĘkłQ`:Ö:ťjNH0Ď^¸TöřDî".ÚÔ™ErŽÍ0ËĚ`~B`:zm6Ő§v ˝vjˇ®ĘšĚÄ%›w[Çü„ßDGëłĚ ŠL%bBÂăăĆŰα*,ú.wýÂkŐÜŰľo<öÚíú"y4…¦!a­Ó<ámĚCţ5&Ý4?amJ ĂŽ€i„}HŘf± 3ž:s®,±VîúÝÔ™E˛ŤôÍyůMt´N¸Ň( 0…° ʧŹ5¶ťc3”}Ý îĚě›—ŰŤ«Së°Ńá×n7ÝÂĂ€“îVVvžń8p.L€á*lCÂö¨(U§Ť1_;µPŁ’ÇpÇ0ŕ ćÜb¬vÜf‰Q­ÓIQŔ°¶!a­Ó©o˘Ł%I¶‘ š:s.w Âk úľ}1úJµY, [a¶Y,ú2y´ń¸°č»,V‚A•}Ý 9F§Jň-b87&Ŕp–!ať3ĹŘ7a’®ĘšĚťÂ ›}ócŰk·ł ¶Â.$lłXÔčpʧÎ,â.aHŚJ´Iă(EĂRŘ…„]„α,V‚!87aŁĂÁÜ„`X «.B„{|bP7a` 0\„UHŘą‹pLş›;„!—•3Őئ› GŃár!t"\ŤIwË96CµÇŹIň…Ůc?ݤ0č“-;«TUw†BŔ´Şë[:~ţí€â¬1ýúţqV‹ćĎ™"W*óÇ#lBBşΦÎ,Ňď˙÷ ’|Ý„)µuŠimĄ0蕪ş3zű?NRŕĽęš†y_klŚŠ—̢ŔŔE‹áĆíQQt"¬ů» ý›đšăă~®ÍěĚŮvŠ ‚/OSŕ"…E'a㨎 Á1:•.B„ĄŔn¦řxŤĄ$ÝjNHĐg™ľPŐcł)˝’áŮ~iÉVÝ6×E!€~R]ߢ-;«(p‰Â"$lHJ2¶łżý—Ü„Ą1énŮF&Č{şIßDG«ŃáPRC… áŚŐjlź:˙Ź…>VK”˛ĆŮ) ¬ ůpă«UgăâŚÇWeMć® lMĚąÎŘnJ§ ˝tj”C•éé€05ä!aCŔśeYŮy˛ÄZą+[s;BÂÓ jŹŠ˘(˝DP@řňđtB‚±}Uֵ܄5{|bĐ&ói"4«Ą#H%( < iHŘś 6KŚ$É62ˇĆCŽçÓDh7L­üśŽ0• €đ3Ä!aÇśn„ßŐłqqjłX(Ęß2Ž €06d!a{T”ć#Ě™6‹»`‰µ*+;Ďx|ňĘ+)J/ľ†,$ôÚíƶctŞěń‰Ü DŚŔů3çŐDĎ OCzl6c›ˇĆ4WeMVŚ%V’Ôf‰aČq„ ĎR?†Ô…„ÝW©ă2ą8©éß[o@čŤ ë¶ÇĨîd…` IHŘf±«KŇt7wg̸ńƶ×NHŘWťBŹ÷¬6ľ¶—Â0†$$ ěşrŽÍŕ. "Ą¦g|§íä"t 8î‰řÉćHN5¶Ű®¸˘WÇüůň(óÔ'ˇď]… =`p z'á×Ă-±Ö(Ҭ97iďî7µů×Ďu;ě¸;ďżW"ĎéfŮGĆkÂŐ“eŹ^ZSU©ÚUš2­P凩öD•&\=Y©®t•>$IĘš”­šŞJůřŁ.ďÓÝóťůßŰsşI©iéš2­đ‚×îż¶Îď뾦şň‚ďĺż>IĆçęŽ˙í#ă{u}ábdB’jŹ“$µŚ“ÍëíÓńů9ĺç:†ĺ/¸Ř(ąRâ.ęX: €Kó /¨´´”BhţüůZ°`Á°ř,ú-‘"g¸ńĎľ¨ &ëĄ_<­Ľi3”5)ű‚Çěٵ]?âaŐTUĎŮăôĐOžŇÂĹß3žűýÖÍziĂSZ˙ĘoµfĹRI’3m¬ţ°˙°~öřZIŇÄI9Úüëç‚Ţç•×ŢÔ¶­Ż=źęJ×+Ż˝Ä•>¤5÷Ţtţ÷X˙ň«=v úŻ-pNŔź?ţpĐ9ýç]˙ň«Auń47iÍ˝wčŕwöÍËźˇő/ż:†Ú7kR¶¦|;2‚B{|’±8śľ· 1Ęgç7k…ŔĹ# ŢŰożMHx1vpŚNŤ"ŮăôÄł/jÍŠĄZ÷ĐýzmÇľ÷ßłk»Ö¬XŞ ×\«őŻüV×Íמ]Űőâ†Ôşď—}d‚®/štĚş‡î×w­”=>^©icŤç߯DĺúPë_ů­˛®ąV{vo×ĎXkî˝CçÎťëňüĎ_« żzM’/|[±ä&ť;wÎú<ÍMúýÖÍúůăkÝC÷ëÍ}őşĽ«Íż~N -ÓŹÖ=-{|‚xW«W,ŐŠ%7éŹVűúCż¬ţ{-\´LRGč¸îˇűµţ•ßvŮ÷ˇuOká˘eŞ©>®ź=ľVŻţćůř~¤ŽË”Îç›ĂéŃ? {gËÎ*UŐťˇ0ť3gŰŤíęšF=ű¶9OśŐ˘ůs¦Č•ę č0´´´ ›Ď2¨!a{tÇébbc#ŞP×Í7†ż¸áußęGşÝ÷Ą_<%IÚđĘoŤ®ľë‹ć+ëškuűĽýü‰‡»„„łľs“~ôĎ„|ż'ž}ŃŘŮÝ«´ů×Ď«¦Ş2¨ĂĎ˙|퉎 ®üOĘ9ĆĄ…‹—űŮă´ěîUÚłk»ŢݤO58řž/ [¸x™Ń —?C÷­ţ{yN7«¦ŞR©®tíٵ]Ľ«‹–ŐéľŐʍ¦ę¸¶mݬňÇ”5)[ĺ‡űú‡rgĹ'hĂ+żŐŤ“ĺ=ÝöߍŔaó__䪾čAaĎŞęÎčí˙8I!Ŕ_ĐζéČŃÚ{kl żwký˝÷R ­yůĺa÷™5$Ţe¨pjÚŘ ŕ//†^ßą?hOs“Ž|üQPŘ[Y×ř®wÝC÷kŮÝ+5ĺŰ…Ęš”ÝežF˙°á…‹—uyŹ…‹—iŰÖÍÚł{»˛&ekĎîí!÷µÇ'há˘eŃM8l>p8=úAaż[:© ś/OS„UUÓ ˛Ă• ŽD›r&Ą+ÎĘ?@$Ôđë€ĹžqĹęͰc@–šzެIŮÚ»űMy:uČŮGĆČ5×TUęŐ߼ ň?Růź>”§ąé˘ßëú˘ůúÁęż×KžŇĎţa­Q“ëçĚ×xŘEË˙ä[peĹí7^đ==ÍÍ=Ô;>bľ¶‘ ňžöŐ¶Ĺj•uµ‡‚ KK¶ę¶ą. ô“ęúmŮYE!"Ř™–Výtý`ł 'éö[ (D°A [cCÂÄ,XçaÇťůĂ>Ďé¦!żÖšŞJ-ą±Pžć&M™V¨;îş_Y×d+ëškő“ďëópcÉ7dxŮ]+µg÷vô—†¦6ăďc€Č7¨!áWöŽî–Hś“Đ/pŘńŢÝovyÝßixđŔ»A!YůáCz˙˝M¸ćÚ.ó7cçn>˙uôŐ‹žŇ¶­›őÚŽ}Aó)úç*ô»ľhľ^ýÍóző7χ\ĹyĎ®íZ~Q—ëçĚ×ϸ˾žć&m{ăU~BŃ-‚B@_9âc´`V*…úIůçBBF.§Ç?ě8”űV?"ŰČx­ą÷˝´á)˝˙^‰^ýÍóZ±Ä·˙Ďľ8h×(I?âa˝˙^IĐuŘÎwđ•>Ôë÷[v÷JIŇš{ďж­›őţ{%Ú¶uł~ţÄòŤŚ×ÂEľĹGňňghÁ˘e:xŕ]­¸ýFíÝý¦ŢŻDkV,Őž]Ű5áškŤkKuĄë«˙>hßm[7kĹ’›.iţDCń-㔟ÓŃrŕŕm|m/… Źn| üĂŽ˝ť†ĘfMĘÖŻ^˙~öřZ߼…|ĎO™V¨­{¦ŰU‘űŰÂĹßSMőq˝´á)ÝsŰWçsKęqµßňÇ”š6Ö¸ÎýĂ3|qA…\BÂAĐS6XbÄŢ~®ľś{°ş,űۨ”1ĆvKś•/ý (ŕâ Úś„^›ÍŘvŽÍ ňVbbcŤíö¨( 2DŁ€‹ĂÂ%†‚BúŽŔ°S|Ë8ĺdĹŹ<˘ňŁ5€nvŞęÎčČç^ăqR‚M®T… „„†•Şş3zvăµ´¶K’¬±1ZyW‘⬊@7-$ŚůúkcŰŰ|ŠĘčwˇÂW. ‹€ Ľ°µŐŘöbi¨Ż5¶­gZ(Č ŕâ1Üč­-gŚí¨öv 2ȸ4„„ćĹ ˙¨oŤµëŕw/ú=ĘRMU%Ĺİ@@ŔĄ#$4™ňÇ´d^jŞŹS D<BúÇ †„W0/áóśn¦č?Ńy˛¶6}m±H’N75ȟȸ€ňÇtä㕚–®)Ó {Ü·¦ŞRµ'ŞTS]©Ô´t9Ǹ”ęJzýČÇJ’Ž|üˇ.»ě˛.ďY~řĽžÓĆ{L¸z˛ěń Ü ř˛ţ„±×ÂÂ%Ť€zżŮ˝[}ţ9…€éýŕ?°÷ž?ľ,X0(ź#š[ž<ÍMZ÷ĐýÚłk»ń\ŞËÚ…ÚwÍ˝w„ś§pŮÝ«ôĐş§%IżßşY/mxJ’ôłX+Iúŕ¸G’/\sď]ć*´Ç'hýËŻ*/7Ą­g;‚Á¨?˙™‚ B <ďí·ß´pP‡Ű<^c»ćóŁÜéüěń‡µg×vÝq×J˝ąď#˝ąď#Můvˇöî~3侼«‡Ö=­?~XĄŽ{ôĘëĐ„k®Őć_?§ňÇ$I -3ÇÖ=­W^˙$_ȸbÉM:ÝܤW^˙>8îŃ?¬ŇăĎľ`„•čYC}Ť±óő×d€ ś”­Ńł/lÓö·ţ‹b-8ZqP; Łżi7¶=ÍŤÜénÔTUjŰÖÍš2­P?ú‡gŚçźX˙˘ľ÷®j;-:RS]©)Ó µěîUĆsyů3´ěî•Z÷ŕý:ř޻ʚ”­TWş˛®É–$e]“mtÖT—sŚK /3žłÇ'háâďé÷żŰ¬÷ß+á¦ô ől‹ÚZĎŹcćŢD˙! @¸ŮňŻűU]ۨ#GkőeĂi/™eÚZ¬ż÷^ľ@?ZóňË~ÎA ­_ť1¶O7vgĎnßăë‹ćwymáâeĆaż_mŮŃeżňÇtäOIşđb%Y“˛őúÎýAĎyš›täăŹT{˘ŠróŽđx(Č „űźSeĺ,óihj3¶›ĽÖMçH´)gRşâ¬–°«AumÇ×8xD’Ll»pIŔ0ĚŔá™ćőü]RÓƆDa/´ťíX´$ęϦ Şë[´żôK-•Ú§ă |•~ôąZÎúć!Űôú^I"(<ĎÓç?ótŻüsOD…„Ĺ·Ś“$‚BëňÁ>aŕpĚ/몹!ŘăôŁuO«¦ŞR+–ܤ˝»ßÔŢÝojĹ’›tşÓ\©iceŻWó‚¶mő­DĽw÷›Z2Ż@'ÎĎMĘ«żyA/mxJ5U•ĘË/”$ýü‰‡µw÷›z˙˝˝ú›çµbÉMĆĽ†ĽËŤéFĂÉŽů5m^oŻŽIhjҬ‚!„·3gÎ=Ţôú^í˙Ď#ä ós:ţŢzŕŕm|m/…˝“0p8fMĺ1î@7.ţž$éĹ Oiő=K$IS¦ę‰g_ÔšKŤýR]ézâŮőłÇ×ę'kî3ž_°h™^yíMÝX0Y{wż©űV?"IĘËźˇYs|ÁcůáCš2­P O5ŐÇőRŔąśicőĐO|«(Ż{đţ ÎCtjÜ—ů­--šôŃáa[—Z§Ső©Î>G@‘‰ŽBčĐ]G!„»A ă›::áNÖ°xI.ţž/Ŕ«Ş”}dĽ1ěřăž ý®/šŻë‹ćËÓܤšęăACŹßý¨k·fçą %éľŐŹčľŐŹ+#§şŇ®ˇ}öɇĆöʇ‚\Bl…Đ!TPxîÜ9  ¬]>' Sj*Źr. Ő•n„=±Ç'„ś›°Żç ŃłŔnX{/‡Ł+BLłŻ»RiÉ˙ŘËĐcčĐyčńe—]¦ŇŇR  l IHhót„)ź•Č]@Dj=Ű<!ť„…€"Wś5ZO (€nt «««µqăF  , IHh§“Ă@ŕww„Ç´r7z‡€"_\,A!ţ?{÷ĺ}çű˙e´( ›V!M=1 Óͦ§Đ“eŚ{Ĺô|ˇâů®¦izq›h“ţ”ěŐ=9ÝËál›sş'ţČ›4¦©°ß¦¦ëý®ş_ĂPO Ťń8í†i¬C1ZFqq—Ă÷Źńľ˝ç'‚Éd= IDAT3ĎÇuĺŠŔĚýă}ßsĎđâýą?â‰Ě¤«‹ @Rş5ť„–a™ĂCäH`Ö9ëůÝőszˇĆ“ŢvŰBĄ  >‚BłÁm·jĹ‹,śő|Ě‘Ŕ¬r?B†OJx@8g|\ůą„0‹@|…’Ý- ­“<ô÷bv ˝!“–$,< ś;6¦ŰGF´`ţ§(Ěr…A!€dvËB¬K×;ŻÎôśäH`V±žłÖ®XÄ- Ľ§Ç­ŰţĎ˙ˇ8"  >‚BÉjŢ­ZqF  OŤŽę_,ĐŐŃ+ú¨ű¨ľPöGłÂG®÷Íçř‡(Hb„Ĺ€c…ŰÝę;ĽÎ7ýÂ)IŞx¨„4÷ésÚôü®”ŮźÚšbIRW·/ř˙®®ŕ÷kk9Řn™ŰnĺĘďüăÍźéůŁYaŕüYs¨ńܱ1eÓI8!BH?t@|tH6·4$̶t`yܧĺł‚µ‹0Űď×ܱ1ŠGßů!¤)‚BéÂöMA!€d2ďV®|ţč¨ůýş”“#)xź·ľü0GI­çÄćżłj‚BÉŕ–‡„ą>źÎhlî\  jŕüY-ţěRŽ ’ŇGÝÖˇĆt&Š€@:qťôčŐĆC" A!ÄGP駇é¤ÇĂÉ´·iÓ¦[vUU•Ş««%Ýâ{¬ÝX'~ó+Ž>’Ňč•€<îSć×Ö‰wjŢ˙ą~źFBéĆ{v mö53croć…÷( E@̼Ç_˙]>6č3ç/čb^đBx¦ç¤FŻ´ŕö Ž’Š5ŔÎ hţč(E‰!wŔ'v¶$©ŔŰG@ }݇Ůó•—=?%÷­đs*/Ëťôóč(€ř˘uöőőéűß˙>Ĺ0í–ßד"$Ěô©ŃQý낺:zE'~ó+=TąŠ#…¤1z% ď_ ?sáE‰cîŘîu˙ŽBH{¶˙Uóß/]U[ÇąYO^Î|••f+óöyIµ˙µ5Ĺf7ˇ$ýńŹLű ŔĚJš«`®Ď§sK>G7!’’µ‹pÉąO(…hDÝżí›ňółeȶĽ0mjŐ´·Kž>ź\§Ľ˛——Şň+÷Ę^Qʉ„´BP8±‘+˙¦ĽöĎiĎ翪Vg˙Ś-˙‘/FŹmYŇ× ŔLJŞ?•ĐMdôQ÷Qş¸Nyµbíö)?ż˛ĽDηëSľN´ę‡ňŤßsvşŐľw 'ŇAa|ŢO¸Ż/Ŕk-A!€™’T!!Ý„HFüę ůoşÜ(Ƕ63 ,*ČSqapâ.ă˙@:"(Lđłrö|UظVÓĹçż2ś7Ů•—ĺ™ŰKP`&ĚK¶ ˘›ÉäŁîŁ”D!0{E©ĆĎľőgŽm­jŘŢ&I1“.öę–$•ÝW ×?ý 'p AáÄň˛ç30Ťz<Ăł*$ źő Ŕt»-Ů6(×çÓ§FG%Éě&nşL7Ł‹°ć1ĹÂAaÁgŻ˙¸éNuuSP0(,/»ŢQl…0nKĆŤ˛†1'Ţ˙•FŻp/Ü|G;š]„sÇĆ”í÷SfA!ÄGP`¦ĚKĆŤ ż7á{‡ZôČןŕhᦠ™ŃřsçÎiîŘ…n’k·I’v8ÖI’Ö|űUyĽ>ć©v]…¶n©2ëńčĺ7Ţ•łÓ-×)ŻůýâÂ<ŮËK´uK•Š Ç]GqAžv´©±ąËěôł-/TÝÓ+µ~myÔmôŤčĺ7ŢUËWČzí%ŞYeÓúµĺĘÉΔśÔełŁ9äůM{»Ôńë`č±~mąj×UÄݧśěLŮËKôÜ·‰: ˛uí{ëŐŘܩ͎˝ňŤČ¶ĽP[·T©ć1[Ä~żüĆ»jlî”Çë3÷Űx¬±ź›Íj9ŘŇąuKUÜ٨›öv©±ąSÎNwHmj×UĬédŽ;RCŹ >† ó’uĂŠ<˝ú¸4ř!°çÄ*-{HK‹îáá¦x÷ť=ćŚĆ#}ćüŠÜDF° ľöšá”ÇëS{gŹ˝ü“ĂŞŰÚuŻOŤŢ.56wißĎD ď5Öáět«aG[ČlĂĆşkëµóőĂ:~čűaËĐŠµŰÍ`-|ąÎN·›»Ôľw‹r˛3ĺ ËŚí3ž_ů•ëˇGË—6liŠŘ˙ĐZ¸ÔrŔĄÚuĺÚ˝Ł6âçĆ:Z¸´asSČľ8»zTóÍ|ŚÇëÓ†ÍM!§ńŘ5ß~U»w¬—myˇV¬Ý±-Ćv?ôý Đ?4˘k·G,7´6ťÚ÷“gĚu˘câńútüäxa¤‚B/ZPř·ű·ú›żážÇ¦ć¶dݰ¬Ë—µČ2Ľłýť·8Z¸)ÎôśTďióëeEn# ¬,/ŃsO=˘˛ű T».Ř…ÖrŔe„e÷hßĎčĚŻ¨ÁßîPűŢ-!Ýj¶4ĹYGłüC#Zż¶\í{·¨}ďíޱ^EÁa<®S^56w†e/ĘĐîëuć×?ÔřŮ×tüĐ÷ÍÓuĘknźmyˇąlu}Ć>ťhÄ\öŕowhüěkÚ÷Ć3Ş,#ŤÍ]Ú°ą1ć>ű»zU™ÖŻ-WQAžęžZń×)oČvěp¬Sö˘ łökľýŞĆÇǵuK•ůçžzÄ\†c[kČ2ĂÂçžzDí{·hüěk:óëšµqvşă# \˝ŞLĎ=őŠ ň¸‡cšbč1Ä>ô¸ŻŻOŤŤŤŔ”ĚKćŤ+ňôę·_\¨±ąs5<4¨ŁőPĺ*ŽfĚč•€Ţ;Ôb~˝řüe¸'&p«ř‡F´Ă±NuOŻŚř™1[rQAžśoׇtĄŮ+JÍ!ąM{C]§Ľ1‡Ç¶ďÝ1„×^^˘»ľň_%C9ëp`ŁămgĂşďŰ–ʶĽP®S^í?Řr»ł68T8lůĹ…yß[óíW%IŮ‹2ä|»>d{kł˙űËW´˙`·›»´~myÔˇÇţˇ‘¨]~áŹŮşĄJŽúęşů/Ť¨a{0¤‹¶{E©<^źöěÖţÝ!ËÜů“Ăf@~ÜŠ ËQ_­śE™ÁáË\jlî ©_¬ăľłáq^ iŚŽB/ĽŁ°««+řýÚZŠ`RnK捛;6¦Ďť;g~ýÁ‘CćDŔL8Úq d˛’%–óŔÍWT5 ”¤Ęň{UY^˘ş§VF [5?4Ż»ŢM>döúrJ˘mĹ…‹ÍνˇK¨ËńE˙#‚ŁľZűŢx&¤sp"ÎÎsřqÝS+c|Ť–aĆ;r8ęcVŻ*‹JÁ Ňěĺ×C—őkËŁ.Çú=k=^~ă]ł¦±Ž[ÝÓ+Í.Í–®Iw¤':  ľđŽÂ®®.: LÚmÉľź9A#× {÷ť=5ĚógőáŃ˙e~˝ĚÓËd%Ŕ-f[^óg;—óíú¸a’§Ď7á:¬ˇX¸śkCo­÷×ËÉÎ4C®ÍŽfmŘܨŽ.wŘvŞć1[Ôđ1§eÖp3b›˛3µzU™$©ă׿‹±OĄ Ô6zX\őߡuąĘµqťňšaĽšJRÍcÁíďDLä }@|Ń‚Âńńq  aófĂF.óxÔsßż“$ő÷žÖGÝGő…˛‡8z6ŁWzw˙őű^~zxXŮ–{b¸5&ę†3¸NyŐŰç“ëTźü—‚C‹Ă' ™ęúŁYŽú*sbĆćŕä(ĆěĂ5ŹŮ´úѲݍńöÁm6ćhŰ«;2^¸:‘‰ÖkůÖmٰۜą9ë„/ď@Ä:c…“@´ˇÇÍű;e»żH™ (€´>ôxÎś9ל9s(€ ÍŠ0#Đâó4đŮ;%Ií­oińçňµřłK9‚ďj‘ďBż¤ŕ0ă˘^fŃf–.mv4GťeX NhŇýŰľé˙ľ®BĹyŞŰÚl.ß:ű°Ľ‡ŕÇÚ„B7Iň_Ň\v_ÁM©ÝtwëąNőYţíMřyŻ/áŃ®\UŕĘż€ń9%FP™7[6tÉąsú—… Č 19ĐĽ[kź®×‚Ű38Џ!uUωĚŻ—zű4t”ÂI®në/ôňOŢ5ż.»Ż@¶ĺ…Á AĘKĚ DV¬Ý>#ë·W”ĘőO#Źw@-şĺěę é:l9ŕ’łË­ă˙ë¤B°™5o†śěëďÇ;ëîfL´[¤‘+˙ŇE(Ië·+ďŽ,Šŕ†¸{‡µ©áXĘěO´ pŹÓ©'ěv6€fMH8wlLwýţ÷ęůw_0g;no}KŹ­ÝŔQÄ” ś?2›q®Ď§\źŹÂIÎă0²ű ÔňÓgnY7ZqábŐ=˝ŇĽ7bË—ŰZŐýŰľkÝ…Ý MÂa[^qoĂŘűź|שâ‚Đ{Nć~Ś@"b„Ěp Ń……GÝÁĎ…bąm6měüŃQ-óôš_źé9©ďá(bJFŻt y·®Ž^‘$eŚ´ÔŰGa€Y ±ąËü·Łľ:f@h;]\§ĽjŘަ5ß~5ęĎkłÉův˝ůuËAWB˵vÔĹšő×`Üďďf Mžîíßůúa5loSĂö6Nf$„€@şłNH2á“™u»µÇ餠˘š7Ű68Űďą?á{‡Z”÷Ů%ZZtG“ŇŢú–†‡%;U—y<Ěf ĚBĆ ÄáüC#zůŤĂÓľ>×)ŻŰZ%ðšÇl‘Űd™´$Ńa·5«lŞ[Ô¬ˇK5lo‹ş\)°ť„µë*’ç8dgŞ˛ĽD]n5ííRÝÓ+Ł%öŤ¨aG›üC#Ş,/ŃÖ-UśÄ‹€ŔÍPR´PĎoHÍ.x: $ę¶Ů¸Ń}}ĘąţAń@ónŤ^ p4‘°ďŃ™ž“ć×Ë<˝Ępł…uŇŤ—ßx7âçď€V¬Ý2,×äFŐ¬˛)űZ0ąaK“śť=!?÷ŤhĂćĆÇ'"';SuO‡%»NyµćŰŻFĚ^üňOkłŁYR°‹°v]yR—ť ëĚŻX»=˘ŁĐ8.Ć~91BtHÄĽŮşá÷¸Ýúíď×ŘÜąş:zEű˙á­ţĎĎ2‘ &ôQ÷Ńű.>AŮ~?…f{E©ŮµÖrŔĄ»ľň‚ěĺ%ĘÉΔë”WÎÎŕ_Čź{ęóŢ…®SŢÝy“‘“ť©Ćµf·bívŮ–šw¸Nő™!Řúµĺ“ş7źŁľZŻOM{»Ě™’íÁ0Äăő™ˇgö˘ 5î¬ éXL¶ĺ…Ú˝c˝6ln’hDkľýŞŠ óT\üĄÄ8.’´{Çzî[¸`zŃQ`"·ÍÖ ź;6¦»>>m~í»ĐŻý˙đ Gq ś?«öÖ·ĚŻ3F*čă>„ŔlÔňĆ3Ş,ż 56wiçë‡ĺět«¨ OűŢxF;7Ó´·kÚÖ]óMí{·¨čÚdF0éětË?4˘ěEÚáX§Ćťµ“^văÎZíޱŢěV4–ëńú”˝(Cë×–Ëóë“vVŕÚujß»%äŘű É<6É4Tɇ€fč}šŽBq̛͟uů˛–yző‡â"IÁ đÝwö葯?Á‘E„ógC‚䌑€îq») ˘j>ŕUćís§é—]îu騯–Łľ:áÇŹź}mÂÇädgĘův˝<ŢyĽ>ąNőɶĽŕZçÚő‰L¬“Lvń¶Ű^Q*Ďo^”ë”Wţˇsý9Ů™qĽDÖ[»®Bµë*Ěe{ú|*.Ȼֱs{YötBYŃ: ű/^TýúOHsófűäú‚6#(ě9ń$"„3ĎÓ=n7• &ë/§HnĹ…‹U\¸ř– ]5BŻ™X˛v Ζcه€n#(ět hÎś9:;0 ľő–ţë7ľAq€4v[*ěD®Ďg†…R0(|÷ť=]H’†‡#ÂBD*˝;ź"Ŕ-B@7WmM±ćĚ™c~í»tI?|ë- ¤±y©˛#Ë<˝’¤‹yÁű+ôśř@‹?W ľü0G9ŤŤ^ čńÓ€™ŚMőŞ?UĹ—ż ‹—¦uąŢł>í}§‹@ „Ś ŽB =ÍKĄť  Ťl ÓÓč•€ö˙Ă+ň]č—D@ÄäÝ‘ĄĽ;˛(Ü$„\ ôu[ŞíĐ2OŻ>=ú÷ĘąţaÓ¸Gáč˘t`LR2<4h~o™§7äľ•ŕÖ" €äR[SLP¤ąy©¸SsÇĆTúĎ˙¬?…ÜŁpŕ“~­ţĎĎjÁíůőQ÷Q˝w¨%ä„w}|ZY—/SŔM×Őí“Ű3ś’űfűww¨Ľ,W™·Oţă$!$'cÖă®î`CŹô2/•w.ü…ľ ýÚ˙ŻčkŹ˙ĄfßÁŃO1uU{ëőżtqBŔ­ćó_•Ď5%÷ÍÝüÜĘ礞G@ÉŤ H_·Ąú.óôšaˇ ›w˝¤óg9ú)ä˝C-!aĆH@Ą˙ü!ŕ¦+\ş8möu$đo“{<!Ě =ŇÓĽtŘIă^t(.’$]˝˘ý˙đŠľúhŤľPögÁ,6z% öÖ·t¦ç¤ů˝Ś‘€îq»5wlŚn:ŰýĹúţ–˙[#єܿ®Łnu}ŕžôó`v‰ÖQřŇŰoëůżř Ф¨y鲣ą>ź2FFôqi‰ĆćÎŐŐŃ+jo}Ký˝뫏ÖpźÂYčlďÇjç­ Jr}>-őön©ÂüĽ”Ý7÷Çý“~!ĚNáAa˙Ĺ‹Úătę »ťâ)h^:ílF  {zÜúCq±™ÁPĐĐä‘ŐßĐâĎ.ĺŚ%>8rHG;†|ońů *čëŁ8$BÝÂÂŁî`79A!zćĄŰg‚CQĎ„Lh˛÷őíúęŁ5zŕËsV$±áˇA˝űÎő÷ž6ż7wlLË<˝Ęöű)I„€RC¬ @j™—Ž;m„JYĂ—u¶°@csçJ N~Ńßű±VTáÇIčLĎI˝űÎ]˝b~ďÓĂĂúüéß3Ľ€$C@©%ZP8>>®9sćP EĚKçťĎőů”uů˛Î|ţóćđă3='uÖóý‡UkTúŔźr†$Ń+˝w¨E='>ůţgűĎiÉąs€$C@©)<(ś3gA!BćĄ{揎Şôź˙Y}řěť’‚łżűÎ}ÔýľľúčjîUx ťx˙ŽvéüÔč¨>ú÷Ę(I†€R[¬ Ŕě7Źôő)Çď×™{î6‡÷÷žÖŢ×·ë/?¬?ýłU Aľ‰Îö~¬÷î—ďBč Š‹ü~yz^ @" jZťý:üë Śňű‡U´ YŹŮŹĐ"ëňeÝ÷áI]¸óNťĎ_b~˙ÄűGôQ÷Q† ßĂCzďP‹Îôś ůţ§FGUäéUÖĺË €$D@ ĆƬÇ@ę!$ 3wlLKÎťSŢĹ‹ę-Z¦Y¸PRčä?ýłGµ´čŠ5ŤFŻôáŃ#ęţuGČĐâąccZ|ţ÷ ‰HUé–—ĺMéy…@j!$Śaţč¨îu˙NC99ę+XŞ]°@Rpň;˙Ď«Ę/ş[|ůĎtWéýëÄ ĄŕÄ2K˝} - ‰HŻm}"DAP¤B dűýĘÖ…;ďÔŔgď ą_aďi-ĚľCU>Ć0äIÔŃŽ3KRĆH@K˝^†äF„‚B U&Ŕ:ů“%źÓĹĽë­ŘĂCz÷ť=:Úq@U>¦â’ĺLpGĽpđSŁŁZrîĺú| €YŕÝ÷˙ň5!¤/‚B`ö#$ś„ůŁŁZćéŐRo_DgˇÎ_p»ľPöJËŇâĎ.Ąh )ö¸OéŁî÷Őß{:âçźÖťţ¨lżźb0K ŮŤp ŚÎÂ;/\ ŻŽ^щ÷ŹčÄűG”wgľľ`ű˛î*˝_ łďH»:ťé9©3=ęLĎÉű JÁppI˙9†#­ôťh[cĎ´.sä ÷ípk …ŔěEHxڰpÉąsş—§sK>gNp"Iľ ýzďP‹Ţ;Ô˘ü˘»ő…˛/§üpäógŐÓ}TuŤ JÁ I>sţ‚2N"¤ťŔčÜ˝ăR! AabŽ;¦#ÇŽ%üřJKőĹ{ďUQ~~JÖŕ…Ť9)n1BÂi’ëó)×çÓPNކr˛5”“cvJ×':‘¤»JďW~Ń=Ę/úü¬’Ö™ž“Śú¸Ś‘€r/ú”íŇüŃQN¤•Âü_+W)€YŻâË_Đá#'¸r•€@Ęý7ą{‡)Ä4"(śŘ‘cÇôâ®]“~ŢwżůMýhË–”«!á­GH8Ͳý~eűý›Ű§ˇśůs˛u)''ä1Áa¸'%IóÜ®ĄĹ÷ĚšĐĐ žőś–ďBĚÇ~jtTŮţ!ĺů|t "­ef,ĐÎÔŞçt˙Ś,ß{Ö§˝ďtQh7EŢYú›úżĐH`T…ůy@Jčű$ mŤn 1Í ÷Ĺ’ĺ,\óç˝ýýúĂąs’¤ż˙ůĎ54<¬oÝJá0­ gČܱ1ł»plî\]\ś§‹ąy d†5ľ:z%jh¸řsK•żěnÍżýö[Ž^ Čwľ_ĂCř¤oÂPĐŘď`P:ÄD$@Ň»ó)€”wG–ňîȢ€ &ćďęëőđĆ}Ě‘cÇôŤúz ]ľ¬źµ¶ęż<ýô¬züdUŐ„űŤ›‡đ&;6¦Ďśż Ďśż « h('[ĂYYú—… C†$K‘ˇˇaaöZ“«ĹźÍׂŚLĺ/»;řýśÜšĹ=<4¨áˇ‹řä¬Ů-¨Ś‘€>=<¬…—/ u2nźŻÂĄ‘ÝŃżűý'gNʇ|P/lܨďmß.Izł­mÖŃ-ĘĎO©{,Îv„„7ŮüŃQ30”¤@F†./Z344CĽÁ„»ů n×âĎ…vŽv&üj ł†‡5wŚ™U¸9K7%ô8{E‰Š ň´z•M5ŹŮfl;¶n©’ŁľzRĎulkUĂö6IŇřŮ×Ěď;;{´bmđřö˝[dŻ(˝ĄŰ‰Ä.ÍÓóĎ~=âű›žßEq¦ Aáô¨˛ŰÍp˘IO¬C”%MşcĎşüeK–$ćťčéŃĐĺŕdŤŮYYz ´ô–ÖlŞű^Ătčx$$ĽĹ2efhxuÁ]ÎĘŇčüůşĽ0KWçĎ™19WGŻLŞ0žO_ 3FĘ pÓ8;!Bcs—ě%Ú÷“g”“ťIa€YŠ đĆeÇąoˇÁ $Zř­ęjýŹ-[âŢ˙đĹ]»ôf[›zűCŤŚNĆXaŮ‹»vé•={䝨(?_˙cËUG9Î/îÚeN\rů$IߨŻW[G‡r.T_{{Ěíěíď×ňŻĂým٢ď~ó›ćĎüĂĂúëíŰőłÖÖçĹŰŹŻmÚ¤#ÇŽé…ŤµlÉ}§ˇ!d?Ţ{ó͸µ›í “ĚüŃQĺł˙^üČČĐŘÜąľÖmhÜŰp*!˘Ő§Ż˝xŤ pÁŐ«š?:ŞŚ@€03ި OµëĘcţÜăő©ĺ KC—rvşU·µYŤ;k)0‹ŢWöě1˙-äúYkkH°etÎq?kmŐ ·[{ţç˙Śč¨óë?~ç;:ŃÓ#éz xXşÝ:rěľ¶i“öĽôRHŕţ<ë¶9vL˝ýýzâůçő­ęę„&[y˛şZmň«ŐéŚ.JR›Óiţ»Ęň==zâŻţ*$ä|řÁ#öăÇ[·ę[ŐŃ»ł{űűCj-I‹˛˛R: ” g cvŕ¬k-»ńŚÍť«@Fč)sÇĆaIĄ¸0oÂáł®S^Ů˙b›†.Ô´·KuOŻ”myaŇî“˝˘4dř1€H…SóĘž=fםśôĂęDOŹ.[˛DŻ9!Ab«Ó©ď8fÖůć›!Ď˙޶mfĐ÷ÂĆŤ!÷;ěíď×7ž^şÝz¦ˇA?ř ýőöíćóž}â ˝°qŁů3˙đ°ľ·m›ŢlkÓĎZ[őţäObs†j»]ŮYYş|YmqBÂ7ůKIRUeĄxú‡‡őť˙ößÔŰ߯ě¬,˝°qcH‡ˇu?ţzűv=PRu8ô›mmf~đAťčéI¨‹s¶#$LAsÇĆ €<žúş IDATdg[^¨ť ë´as“$©ĺ€+©CB‰‰Ţ‘•ĄÇţôOÓ˛o¶µEüˇŰmvŔY‡ńţxëÖN@ă^…ŮYYúÇ×^‹řyµÝ.9zâůçu˘§G?km5»==f0f}VEůůzmëVU<ů¤üĂĂzłµUßýć7ÍĺHÁĐňďęëCž—łpˇ^s8ô‡sçtäŘ1ý÷×_ź0$”‚Ý„ŻěŮŁ7ŰÚôŁúúľ==f0ií"ls:C‚Nk@hěÇ?ľöš–WWË?<¬żßłGŻ9Q·Á:„9]f`&$ÔŠ ®Ď:ëńú,˙PoßEIReyIÔçú‡FÔýŰľŕ/‡ą*.\s=ÎÎí?Ô-×)ŻlË e[^¨ŐŹ–Mę>Öő•ÝWőąűşäěrËuĘ«śE™×ÖU Ő«›śĹăPÓŢ_ËuĘ+)¤®_ű•¸űfl›±®S^ä÷qUYĚçvtąCöĺĺźVËA—ěĺĄZ˝ŞŚŔ7dĺWî4CBIú¸ż?mkíŢyŃ,[˛D?ŞŻŹč®ëíď7CĆ'««cNÎQm·ë‹%%úĐíÖ›mmf`×ÖŃa>&ÖŚÉ”–ęÉŞ*ĺçkٵĺ˙ěZ°(I? ­ž}â sčń‘cÇ& ÝľUUe÷ms:#‚Ec˝ŮYY!?{ĺ­·$I_,)‰ 9 NBJŠůüTFH`VjlîŠ:ð•ë”לm8ŢěŔ67ޱąËüÚ4Ą¸0OűŢx&á0ĚşľđŮŤ]§ĽZóíWC‚N)Ř™čş6;šµóőĂĎwlk•ŁľZ[·TŨU§6;öĘ?4ö“.ŐmmVÝÓ+µĂ±.âyöżŘfîKÓŢ.łFÎN·vĽţ˙Éóë™LSâýdDŰÝć×·ĎźŻ5_ýjÚÖă‹%%A•µłđÉŞ*}÷‰'bÎl}lőö‹–,чn·>t»#žm;¬Â»îŚeLôýÚ¦M‰]—Âf!–4é‰9ŚpńC·[YÓŁâÂĹňx891%„‰+ĘĎמ—^Ň×6m҇n·ţţç?×ĐđpB3OVďMęíŢ‹ŃTŮíҵ‰XÚśNsřŻ1«ń˛%KBBÓˇ)ÎÍđˇŰ=a÷eşH($ü—…Y:?g Ő0#®ÎźOHCC—ć=ďÂyú|rvö¨ĺ`·®=÷Ô#Ţwo*˛eD„gRp¦bÇ–jmv4ËuĘ+ggOČđáɰ1ŽÖ‘XóMë×–K’r˛3bngx@h,oőŞ2í?Ř­ý»C~ćŘÖvýą?}&jç_ăÎZ9»Üęíóéĺ7G %™ˇ¤9H}„“—łpˇŢzé%U|ó›ş|Y?kmŐ˙UY7ÔękoźtG`Q~~H7âdUUVę­mۦ}ßź¬Ş vţň—úî7ż)˙đ°9ÔŘÚE(ďOhźť‰I($ĽĽpˇ.§ÁTĎnŤńńqŠ@rťňš÷Ľ›ČęUerl©ž‘í7l¶v]ą6;š%IÎ.÷”CÂśE×—żasŁv8ÖE¬łqgmÜeŘËKbn§myaD@(Iűu›ĎŤěŐ®+WĂö69;ÝňŤD¬§¨ Ź`7„€pęŠňóőăkłKŇ3 z ¤$dr’/ZîÁwäر)wĆM¶:ťúĐíVQ~ľž¬ŞŇĂ>¨#ÇŽéź|2#ű^e·ëͶ6ťčé1'=1†?vźBkWaoO‚s#n‹őĚLî-ŕÖÉ^”ˇŐ«Ę´ďŤgÔňÓggěţwńî5“ť©˘kł+;ct=&˘v]ą˛;›»tÇ}›őĄG †ímć,Ĺ7˛ťŃ¸NyÍ.̡ံ·ĹüĎ‘Ůx^¸âB‚L፫¶ŰUUY))x˝żŢľ=äçÖ‰:Śá¸±,˙ú×Uńä“Úd™„ÄXvoÜ€íżżţş^ܵ˜ŤŮ'Ť/–#ÇŽ©`Ĺ }mÓ&µN°}áű˝lÉsżŚ}«Ş¬ŚÚ-iÜ×ń—qď5řâ®]ZţőŻëk›6(ZÄě$\ąrĄňňňäőz©€ŐŮŮIHC•ĺ%rľ]˷öĽ îĎ‹ óÔŰ绡uädgĘův˝jţňUsY®S^ąNyĺŘÖŞśěLŐ¬*ÓÖ-UÓÖ±gťÉŘŮé6glžŠâÂL áôůQ}˝Ž;¦ˇË—ŐętŞŐé4;‹ňóUUY©¶Ž˝ŮÖ¦oUWGťˇ÷ďţs33‚A)ر÷˝kÁă_oß®=/˝ńÜV§ÓśÄxîwźxÂś%ř‰żú+ýż?ţqDxçÖ÷¶o—xXGŽ‹şěxľU]­wíŇ›żüĄąţđ.BĂ“UUf·á_oßőţŤ'zzôĘž=ňkQVVHGfş‹;ÜŘfłÉfłQ%3ާ§GcF)ÚlË ĺůÍ‹j9ŕ ţwĐĄˇKŕ/Ń×&,ilîŇîëU»®bZ×]v_AÂťŃG'!¦‚€pzĺçë…ŤÍ0=üŕf(÷ÂĆŤfřµM›ôÂĆŤŞŞ¬ÔĄĄňë•={ôâ®]’‚“~Ř‘ý@i©ţńµ×ôµM›4tů˛^ÜµË ­˛ł˛ôÖK/E„u/lܨŢţ~˝ŮÖfv*&ňÜż«Ż×Đđ°ŢlkSożľQ˝CüÉŞ*s['Ł(?߼÷ˇ»‹Đ`ťúȱcúÚ¦MQ÷ă­[™Ő8 !!€”ĺżÖ©ʧo öó‡FĚ™‰'{OŔđĺt˙¶OEą!ÉmË e[^¨ş§WŞć/_Ńţkł9ßČLĘëó­ł+G­w@˝}#¶ ÂÄ,[˛ÄěpłÎĚ;‘mŮbv~čv«·żß2ű@i©Nµ¶ę•={ôłÖÖ‰H–-Y˘*»]/lÜł›ď5‡ĂŢk„rĆsżU]łĐxŢß˙üçf7ŁˇŞ˛ROVWG ä¬5ÇÚőř­ŞŞ¸ŹÍY¸P]?˙ą~ÖÚjv˛ł˛Te·ÇŽmÜcŃ:L:!$0ëE›‘W’Z¸&|îţÝÚŮđxÔźíüÉaóßF÷ßTŘýzű|qďĂX»®"ę Ĺ7bőŞ2í?Ř­¦˝]rÔÇľßá†-Mć= »cĆ&‰Aę# LÜ·Ş«ő­ęÉwř±ä,\¨6nśňPÚ‡|0îňă=/‘Ŕo*5¨¶Ű'Ýő7•úNĄÓ1•ÜĆËŔld˝OŢfGsÄĎ›;Ő´·kÂĺxĽ>mŘÜńý–.˝üĆ»’‚“¬ÜH'aÍce’¤Ž.wĚŕňĺ7‚dö˘ŚZ—UÝS+ÍŻůöŹĺńD­“®_[N@)# f7:  …ôťh[c…¦ÉČ•1Š$±šU6Ő-jÖĐĄ€›»äéóÉ^bëěꑳӭ˛ű‚3÷ű‹¦¨ Ď|~Í*[p&âÎ56ĆěEjܱţ†¶Ő±ĄZŤÍ]şĐšożŞÚuĺ*.\,ŰňąNő©±ąÓ\÷ÔĘi ęěĄzî©GôňOŢ•ë”W_ZőCŐ®+—myˇüC#j9č2²ű ´ła'¦„€ý  …FÇäî˝L!i!';Sηëe˙‹mşłÓm^R0ôjůé3ŞÝÜw9;ÖiçOG<_ -?}ć†ďÓglkÍ_ľŞŢ>ź@†ŰşĄjÚ'ŮŮ𸊠˱˝Uţˇí|ýpÄcĘî+óízş1%„@j $€Y®0?O·ĎWŕĘUŠĚ /}ńóa–»xéŞÚ:ÎMë2üŁv ¶n ŢtŢ:\xŞlË ĺůő‹j9čRË—ü—*.Č“˝˘Äś!¸v]ąěĺ%˛——DÝŰň`@ÖŘÜsáěĺ%Ň–Č›çćĹÜ?ŰňBy~ó˘›;ĺ:ĺ•ëT°»1gQ†ě奪y¬,ji,/|űŮCÝÓ+U»®\ŤÍ]rvőşŘ–\[·-·îtáîNŮ}+řl†2oźZ<@@¤Ž9ăăăă”Ŕ­ôŇK/IcĂzţŮŻSŚĐsşź"3dqî"ĺÝ‘E!féµqű«m7e]÷.ËŇóJ§wű=ĂÚŢäž±ĺi}}°ľľ>˙ą¨źE7=ż+-j‘—3_/>÷ĹI?o¦Â-»®×ű'á0ńëëµkŐĐI)˘ôî|Šaç.˘¦,7'Ký©+źň#Rč R!!RVŢYz¦öQyĎĚĚ/Ö‡ŐuěwHQŹ×T¨óhŹF7~k÷ésĘËËS^…hżűÝÔ®_áˇ$B  ĄŮî/–íţâYvĎé~BB€ëGB6=żKŞ®®NšýŰ´iÓ¤ź- ”D@¤€Ű(H¬€@j $q…„H1„„ ¦ha}}=…R !!*V@XXXHq€CH"é…„ Ň!!0é‰H" Ň!! Ň!!iŽ€!!iŽ€!!iŽ€!! Ň!!iŽ€!!i¨¬¬L!€ y”€ôóř㏫°°P6›í†Â-»vQP`–#$ ĺĺ婺şšBÄpc0UUUĘČČ @Š “LZuuőŚv"nÚ´‰"7!!0 .^şŞ¶ŽsÓşĚ˙(…7!!0 |ţ«juöS0+qOB`Šç.˘ %ĐILQŢYz¦öQyĎĚČň}‡ŐuěwĚ8BBŕŘî/–íţâYvĎé~BBpS0ÜHs„„@š#$Ň!!ć €4GH¤9BB ͸ĺ ő»ßB!’zN÷K’JJJ(€¤EHŕ–ËĚ̤ÜB„„@š#$pËť„ľÁË)ÇwńrČç^HF„„ną‚‚IŇŔĹK)Çwísnaa!Ĺ´ €4GHŕ–+--•$ą?î§H9=§űuď˝÷RIŤ@R((X*ożŹB ĺüî÷ź0Ô@Ň›G $ÂÂer?F!RŚ?„Łg05[ví˘Ŕ Ł“@R())QŕĘUş RܧĎIbŇÉ)77×ü7!!€¤`|hňž%$@ęđžPFF†ňňň(Ć$UUU)##B3$77WŐŐŐć× 7 •‘‘!÷é~UÝŻÎŁn €¤×ĽżK+ŁzüńÇ)€Y‡@ŇŞ¨¨Đ˝÷ŢŁćýť ŚR$­žÓýęúŔ­ŞŞ*ĺĺĺQł!!€¤V]ýu®\UëÁ(’VsK—222ŃŔ¬EH ©•––ę‘GŃ»˙ë”zN÷S$ťÖ¨ďśOµµµĘĚ̤ f%BBIŻşşZKŐôVĂŽTĽý>µýÓ˙Vyyąl60kHz™™™Ş­Ý ßŕ°š~ŃAAFŁzu÷!ĺććjÝşuŔ¬FH`V(,,TUU•\'=Ěv €¤Đô‹ů‡f %ĚŁf‹ęęjů|>5ýÂ)IŞx¨„˘ŕ–h|Ë)×IŹÖŻ_ŻŇŇR `ÖŁ“Ŕ¬˛nÝ:,UóţNyű}7]çQ·ş>pë‘GQEE Ě*™™™ŞŻ^y‹?ŁmŻ´ŕ¦ę<ęVÓ/ś*//×ăŹ?NA¤ BBłŽjÎmÚöJ«|—) fśŢ{ď=Ş­­Ą R !!€YÉţí¶·é(ŔŚ2‚‚ĄzöŮďR)‡Ŕ¬UXXŇQHP€™` ëëźg&c)iÎřřř8e0›ŤŚŚh۶—äřŁjża—íţbŠ€iŃĽżS‡ŹśTyy9Cڤ4BB)Á űúÎjÝę ­|ř~Š€©ľ ŚŞérťôH „„RçÜČŐÝÝ-ŰýĹZ˙xĄ23PLŠ·ß§Ww’opXëÖ­ÓĘ•+) €”GH ĺ>|XÍÍÍĘ»cˇžŮđ¨ óó( ŇyÔ­ćý]ŇśŰT__ŻÂÂBŠ -HI^ŻWŻĽňŠ.^Ľ¨•߯ŞG¤«1ů/«ń-§Ü§űuď˝÷ęŮgźe‚i…@ĘQkk«Ţ}÷]ĺĺ.ŇşŻ…IMˇíźţ·Z~ ŚŚ UWW3Ľ@Z"$ňzzzÔÜܬľľ>•ÜťŻŞG˙DĄwçS€4×ő[­ŹÉ78¬˛˛2ŐÖÖŇ= mH‡Vkk«a!@ł†ąąąŞ­­Uii)…Ö ¤•‘‘>|Xťťťşxń˘Jî^ŞŠ‡îUŮň"îYÂ|—ĺ:éŃá#'ĺ»xIZąrĄ***(@ëěěTkk«.^Ľ¨ŚŰČv‘l÷sßB€1U÷©^ąNöĘuňŚ$éŢ{ďUuu5ť†@ÚëééQWW—\.—€2n_ Ň{–¨äî|•Ü˝D…ůy `–pź>'÷ďĎ©çă~ąO÷K’rssełŮôçţçĘËăłDCH.—K.—K===şxń˘$)ăö*\š«Âü™™™Q;Ť .A„„@š»ŤéŤHs„„@š#$Ň!!ć €4GH¤9BB ÍiŽHs„„@š#$Ň!!ć €4GH¤9BB ÍiŽHs„„@š#$Ň!!ć €4GH¤9BB ÍiŽHs„„@š#$Ň!!ć €4GH¤9BB0żßŻŽŽy<Š1 ŹGňűýIi%ÉĺrióćÍ =6''Gv»]ëׯWNNNÄĎëęęÔÝÝ­˛˛2íÜą3)öݱ±QMMM’¤ööö¨ű˝cÇŮl6N†$çńxT\\|KΡ†††pđąçžKř÷űýQ_/©nóćÍjll ÷íۧššNfܒ׋q.†/ĹŠ’¤őë׫¶¶6ejëýo¦ë™Ş¨'¤6BB¸öˇÔét&üř––9íر#â—)—ËĄŽŽŤŹŹ'Íţy<ž¨űgÝo:ś’›ÓéÔćÍ›µzőj9Ž›ľî 6D|?Ѱňĺ—_–ĂáĐŕŕ`Z3‡Ă5D˝!/fĎ{QCCŽ?>©÷¤D555©®®Nűöí“ÝnŹxťKReeeJŐ4ÖűßL×3UQOHm„„¦˛˛2ć‡SăĂqooŻü~ż6lŘ ââb>ĚbĆ]>«WŻľéënll4˙˝cÇŮívůýţ„:OťN§ęęęŇňÝ6ŮŮŮjll”Íf“Çăˇc1ŐÔÔ¨ŁŁcF‚:§Ó™R‚·ő¤žŠ  ŚÝnź°SËáp¨ˇˇARp8áńăÇC>čΦ}M¦ŽG$'cqYYYÚ~7R·ÚÚZsx1]„HVĽ ‡ă¦włn&.€)~H6:=\.—\.EAĘăQÔ ©‹NB"»Ý®ŽŽIˇ÷ósą\RvvvȰÂđďűý~íßżßě6˛Ůl %őx<ÚżČ ľWŻ^=é.%żßŻîînIÁ.1kaě›ń}żßݦ¦&sťĹĹĹZ˝zuBHGGGH‡ĺDűjÝ®đ:NÖţýű#B\»Ý~ĂĂůŚ{ONĺLćřÇÁĐŰŰk~o*ű`̲kśw‰¬{hhČüżń˝˘˘˘¸űjCă8Z—g=¦áçYSS“99K´ó+|űŤs±˛˛2ęöXĎ%Ł^áőOäµţzŤuY×í…żÎb“ĘĘĘç}ř>Y_›Ö}™č5mż=Ź9LZ oOäőţ:›č5︯_ż~Âcžčv%˛îđşD»ĺD˘u‰uýźčşëńxÔŰŰőµ휉v]ťčś´ž—ÝÝÝš3gNÔ×břkŰŘ'ë÷Ťă5Ůkźő<ÉÉÉ1'3ö˙F®÷.—Kű÷ďźň{ât×3Ţű…ńú«¬¬śŇ¦óóDř¶Ĺ»žŻ…ŢŢŢ÷ X×Zëë&Övutt$\Ďhď˝ń®—€IŚ···ŹK—4ľuëÖ„žłuëVó9íííć÷+++Ç%ŤWVV†<Ţúý}űöŤçääĎ7ţËÉÉß˝{wĚuŽ×ÖÖF<Ďřݶ¶v|pp0î¶ĆÚoë>ŚÇť™ßw8Q×7ŃöîŰ·oĽ¸¸8ęs‹‹‹Ç÷íŰ7áńŻc˘GÔ[×üřńI/÷Ě™3ăv»=ćrív{Ôc0Őă뱓} źhÝѶ;Ţş'zťXŹařÖcj=Ďl6[ČăĘĘĘĚÇ?~bť˙Ń^ńö;üu688s{Ścm›¬ë8sćLÄyţÜsĎEÔvçÎťQ×ałŮĚšĹÚ–ÚÚÚÇy÷îÝ1_gńjj>2ŃŠŐ† ÔŘزFWڱëţŰeÜ´¨¨H555ćk ĄĄĹ| äää?‹wĚŚă~>X—~.µ··Çě’1®%áÝOVMMMfM¬>˝˝˝rą\Ş««Óś9sÔŘز­ÖşE܇¬®®N/żü˛¤`w¨Ýn7'g1^ă_úŇ—´oß>óx…ŰąsgÄq7®Ö×±őcĆ5äřńăSę"zůĺ—Íĺ]Ćő!Z]Śkµµ.ÓuźNŁ{+ÚkÍz}2ÎáÉ\ۢ˝ŤîÄÉÔ­»»Ű|-”••©¸¸8¤;wîTvvvÄyb}/0öËfł™aĆ{ČTçőÚ`L¬ÔŇŇ"‡Ă÷ú>“őliiŃš5kĚŻ­ď÷N§Ó|u8***šŇű`´Ďáצhź'\.WÔkŞQ·ŢŢŢú<±fÍ9ťNózb}mE{ďM¤žÖëeĽ÷ŢÉĽ§č$€ié$´>ľ¨¨(ˇNăűĆáá/++3;Îś9ňsë_ĐwěصŁ'Ľ“h:: Ť} ßëúÖŻ_ѡ`í ˙ëţŕŕ`H=Â~üřńńĘĘĘńĘĘĘ}™Čľ}űbn—aýúő1ŹC<;věąÍá?_x]>‰Ěşm6Ű”şŁy­Äë겞+ăűöí3Ď9ëńŠV÷3gÎŚMxŽK_˝zuDwŚőő^{ŁCgőęŐë=~üřxvvvĚşĹ;fÖ}ŠöóöövsŮĹĹĹ!ŰľOëׯ?sćĚř™3gBλđÚZ—a˝ŢX—c}LĽýłľÎ*++#jzćĚ™ëYĽ.Uëqß˝{·ą/ńÎYë¶E;6‰tĆŞ‹ő\ŠvNÄ«ËT; yľőy±®mĎ=÷\ĚkPĽő&ŇIhĽß…?×ZŹśśśk˝ń***ŠxýîŰ·Ď|îT®1Önµxď©]©gřy8Q=Ť¢˘˘¨y7Ň1o=&ŮŮŮűţú ˙6¶-;;;j7®ő˝¬¦¦fŇź'Ś×Mř~Ç{ďŤWOë{T´Z[ízÄç$JˇNkkkÇťNgĚ˙‡ż…ĐM$$ŚôX1¶ţbzüřń¸R´_h¬żÜHHťťsř¬ńKHqqqÔ_˛łł#~9±ţgürë´©°®;–ÁÁÁ)…nF}ŁBÖšCŔ&{ü¬Ç)ĽnS ­Ç8Ţşăś7#$ŚužYŹUĽó$Öđ˙xľ•q.Z÷q˘Ŕ¨[eeeÔ *Ö1łľÎă\Ö_|c i¶ÍŤ†E{Z—k9ÖkŠ•<Ç»>X÷3Ľ÷đ!Ő±¶­˛˛rŇ׏ÉÔ%Ö9o ;nVHh˝¶Ĺ»ĄA¬šOGHëu`=O¬A`¬ď‡…S Ëâť_ÖÇXCČXÇ0V=ăŐ%^=­űďQÖÚN5$ŚŠĹú<‘h f­Ďd?OÄ:Žń®çńę9ŃqŁ&•••„„pÝÂ466Ęn·ÇüoçÎťćcׯ_?Ą!B±†§“5H2oŔn §™čąáCŠ¬ĎąĆČhŚaŤÖ ŚáŤĆĎc KÍÉÉ1·×zcóé8~ÇŹʏ}řş§ÂxžËĺ2‡Y†są\r:ť!Ă+=~±žs#¦rîÄ«ÝL1†˛G«ůŕŕ ÚŰŰ#†1F;ă‰÷ZŤ6ÜŃú˝Í›7G ë5jęt:'uĽ¬ŹŤ·Oµµµ***2Ďë‰Î™XbMB`ý^¬ĺD;&ŹÇxžGŽăPÇEQeŐ‹†!˝ľľµÁG´ßĄĹ׏šÜ>==M°™=Ů–Őů|~ńĽYTN&PĹqĚWŔfYĆ'ŞľďÓrąüră%ŰNÜU0éă×,.ł»¸e2Ë2 ĂÂ0ĽJ„ô[¤( šĎçŇ^†Ă!Y–uÖŘv­çÖ-#F7ľ´=Ł(’SŔ"e;ŽCišžuÜĆWÄŘîŤ(Š(ŽăĘ föě ‚ 1<€v Ŕ NL¬d˲nzĺH¶%č˝ódˇmŰäyY–uŃIišäű>ůľOEQđmcL@Ę˛Śžžžčp8‰ďÝ~}&‘Ě÷ú?ďŃFŁ/Űl6#×uʶň]Ű®l[9›˛öf"q†4Ź{ýŔĐe…Î- -˘OApŐUŘŮĄLűłßt:m<˙ôW!.h]kl»$}üâśvâ±}đ}˙jöL’„¦Ó)ýü1#žv˝˙\ę}ˇŤz˝şŘüTŰ^s}KG 8ŽĂÄhŰ*9Ťh0ô^ĺçş. ŤFŤ×ÔűÚ­Đ$Ŕś2ţĎŚkŚmׂµQ=xF˝n§e]ŁÁËĆQXĽ†=ĹT[Qť~Ľąôű„¸"şILź=ţX–E÷÷÷Ť‘‹Ő.DBřdçŐ?_x=Ďă“ňů|.ťP±óŞná…™ \Y–)ëEź ¸®[YaÁ&OMQQU‰¶Izš¦;őI2™đI§ę>–Żxţŕ%ŰŻŻŇ5oqkÜ-‰„˘_¨l.úŇ5f;•Ŕ!óż&Xä^˘ź«ĂÚÚä'Ŕ¬oóľĺg¶/ŠB)¤ŠÎ{kĐÔ¶¬Ü2Q>Š˘FA›‰©őńKiTÂooÍOŘřóôôtäëěĽŃS°,‹Ż°UŤmĎĎĎŇĎĹľz {ÖÇe6n5­V=/ő>!ŽâŹőqMvoš¦ôüüLD?W!ľw˙Ş—‰µ×fłQŽőlĄ­ŘŹô"!|išŇĂĂ­V+Úív´Ůlh4ńIĺb±8ŘwEQĐăă#ÍçsÚl6´Űíčĺĺ…ůdh˝^čćžçń-MQŃh4âuÝív4ťNůĆ0Ś# MSr‡Çé%.čşÎ'ŤqÓóó3źîv;z~~ćç۱‰kźI+ ;'ŹĄ/¶!.ęĺfźźÚ~¬Ľ«ŐŞbż.y×Ë˝Z­čáá‹9Q]m‚5ťNéůůą×*Iqr:ťNyTW5ôé鉦Óie{ţĄV•Y–uäǬÍ?1CÓ´^Á3â8&MÓx›ĚçsžnÝ–ËĺÍMz}ßç}- CŤFôňňRéălŇ>{ń<Ź ©óůś¦Ó)O›ĄĎdâëÉĘČÚ”őۦ­ă¬ťEżĎX%úżóîęcŰăă#eYVé 2áęÔľx ,*-×GŁFŁ=>>VĆăľDQ$íGll‚@™¶hϧ§§Š=W«·gÓóFŰD{Šă7Řö]ÖżYŮĎ·Ň4ĄÇÇÇŁ÷ ć7ő÷ Ó4ů1 ő{ëĎIńůqmÄ1ŽőfO±źžžź˝_1â9\”@ąÝnK"*‰¨\,gĄeŰvIDĄmŰŇω¨śL&ü˙ëMůżľľ–š¦)ď%˘rą\Ý·X,ř÷Şzo·ŰĘw]ĘŁJ·,Ë2ĎóĆzQiFąßďŰŁnÇ6‡CŁŤ4M+___yŮt]ď•ţz˝nmŮlvŃö«ŰšýÉlwŞď0»ôńé® ‡ĂJ^–eőňłőzÝh3۶+í.ÚżÉÇ»Ô1ĎóŁňwőă¶şí÷űŇ0ŚĆ6YŻ×ŤýŁ©NmůwI§­Ź‹ăšěo8–yž÷.[ű°ö’ĄßĵíŇäŻĂá°<Ęôeă+g×±m6›)Ƕş/›¦Ůj—.ýżÍfŞń‡ůř9cĚ~żWÚĹ0ŚJ{Ôý·‹=›Úşî˙˘ÍŰžłŮ¬b7ŐřŰ6^ťú>Ńö,Ó4M:®ťň>ŃŐźšěąÝn[źź“É/´p&n ˙‚˝X,čüłw<ĎkŤ´EQĺŔmV† ďs]—˛,ăŰ+‹˘ ˘(H×u˛,‹|ß—ŢŻŞ“Xďú}]ěŃôť®ëEyžGqó3 t]çŽ×·w)W—¶Ě˛ŚÂ0ä6by:ŽCžç‘®ë•č¸Y–u·µ/‹¨X_yÖtřý©íÇVR¦Éy°:uEĚ»^n×uą]Nőé&’$áíQoÓ.~ćy™¦IQńí{¬ YŮŘŠ¶,Ë*őčęKŞ:ęşNišň6óg~¬Z%×V7˲*i‹ţŔú‡¬Ě]ëÔ–—tÚúx’$”$ÉQŰśkŃ>¬Ż‰ö1M“÷çľ\Ű.l `ţ(ëgŞôMÓäýĄř¨ëŘ&FĐ­Źm¬­ŘĐĄ/vé˙m6căŹxţ išüĐt^j–eIíÂV˘EÁËV÷_qLl˛'ŁnĎ8Ž•öŚ˘?+šĆzYŮúPϧëűkWqüéň|>§ß´ůS“=Ç9z†‰6u]÷¦În€ĎĘŕď_\Çqřv& ˝~uŇ4í´śmŻŹÇź*RőGżO°ČĚŔĄŔ™„€‹†! z||T^“eŮMFńľ" ‡­"dŰęëdYV ţs+QŞ€Ż Î$\Ďó( CúńăŹxí8Nĺ.€w‚E÷%ÂąKľ,şq=âs—Ŕ&@ţ>QŹľ ś DBľ8\Ŕ"!_„|q đĹHŔ"!QE4Ťh4]%ý˘(¨( úFȲěâ>ŕű>ŤF#ň}˙ÓŮCĺźĚQ}hŮdíu«¤iĘí–¦éÉ~T÷ĂsÓ˝t9?3·ŕ×·ÎgĎđśţ5úŮg÷ŕW"!üý2š$ %Irń´ă8¦‡‡‡/7żŐIŕt:%Ďó.îišR’$ź®ť›ü“ŮăŁ&k«ŐŠ>Őd±( n·SćGu?<7ÝK—ó3óŃ~ýř¬ăžÓźżź5=§\źŔpÝ—ä§§'âFp]—v»ٶ cܸ&IňĄV10<§ŕŁHDAC€ł&š\ Çq¨,K\ l7ŕ‹•„@?ĎŁűńăQe‹KQôýű÷ĘçY–ŃËË żĆ˛,ŹÇGiîv;~/Ń÷ďßi0¦idY–´ »ÝŽźßŁë:ŤÇc2Mó¬şEA›Í¦r.ă8ť¶ňEA»Ý®rNSŰ˝»ÝކĂ!éşNY–ŃfłáçžŮ¶MŽăŐ]´éx<–Ú¨­lŞűXűľ˝˝ŃŰŰŰQ9e¤iJ›Í¦SąŇ4Ą···Łöeź†ÁŰrłŮđr÷ig±<â}2?mŁŻĘ|É4MŹÇJű‰unň%V—ľe“Ą!Ú#IŢćm6WµĄ*Ź&jj·ľý¸-żşżš¦I¶mwΫŢWUc[—öí;NĽĽĽP–eRźŞŹŤ}ưsź˘ÔË|NŘív•ŐÇ–e‘mŰG÷ŞžM2írM×>Ô6ŢęşN“ɤS]e~)łS—şö±G—şţ ĎéKôż˘(čĺĺ…÷ý®ľ\/wź1ăĎé®ý@Jĺb±(‰¨¬‹Űí–žçyéş.˙·řgšfąßď+÷Ę®#˘Ň¶íĘuyž—žç)Żw]·Ěóü¤zA LW×őňőőµń^]וőUÝË®Ůn·ĘüÇ)óÉ^˛ľ´^Żů÷őqŤ!¦§CŘ÷Ë岓ŤÄ>đúúŞěĂA4ö‘¦şÖď}}}­ôť¦ru±Çl6»Č8xËĎ馶1MSŮw»<§Ű|Ů÷}iŢ–e•űýľ±źµ˝‡©žÓ§ö#=Ţ“`č&˛Éľ¦iĄmŰĺx<. èĽŠ“۶ËápX™řÚ¶]™¸äy^yчĺb±(‹Eĺ^]ץÂT×:†QÎfłr±X”“ɤňB-›€“!MÓĘÉdR.‹r<Wî•M D±®n/MÓ*fSĂ0Ęńx\™ŞŇŻ—m<•M×őĘr˝^—¶móüY™lŰć׉öb˘+W˝­-Ëę-ş®[Xţb}'“IŁhÁŇ_,•ú"KWşřgÝ,˙ÉdrTv™‰™a?Rőťý~ß©l]EB±-YÓîÓ–}EÂşÍfłŁĽëÂB_‘0ĎóŠ0ĎfłŠŤÇiµë“ɤÓř# Xb_śL&•ľŢ4NÔýHwfł™tl¬ŹCM"f‘°.ÖŰF]Ĺň×ýUŐD{‘Ň^â$yž7 |‡ĂˇuĚĹ·®ĎÖ.˘o ‡Ciß—•KU׺_Ö bvőuŃŞ|EqJ%"öoő9-ŢŰÔ˙d÷6=§ëľ,ë_bÇvvŻřĚč*vyNźÚŹ@$€‹‹„lâRź8Š/óőIKÓꟲ,++śÚ&Z21Ł 6ąS O˘Ŕ¤Ęs2™Ő÷p8đ‰€lRTyďĎóĽ2ATMÔU«9IJŐÓ®—Í4MĄ#ę«ęĺĘóĽ"NÔ'^m"!›ôÔď«Ű¤nO69Ő4Mş‚¬nĎ>´ůg}uW˝l˘řPŻ·řťĚë}§.n´•­k˝T~ÜÔo/%ŞÄ&Ń×ębH_‘ iš¦IE±žMĺ”ő§¦rŠ÷Ë|#ĎóÎăÄp8,÷ű}™çyůúúZ‡ŠřĄZńÇưńx|¶HŘ&öíőqµ‹˝Tă łŁ¬žËĺ˛ő‡ć#â Í®"ˇĘ·Äú¦y43[ĘîÍóĽâ—b;°ĎeĎ;ŃľmöĐ4íbăŕ->§E[śŰ˙lŰ>úľţ ˛—lܨűdW‘°Ëř+ľôíG ŔEEB•` ®n¨OZš&âwM/ŃbŮşľđvX–Ë%_9 ›´†ˇL_|QŻO ĉĄl˘-ÖGeSń%_&©&ńőş×…§®"ˇJth.ş„Ş­[â„Ll/ńsUŰ‹mqM‘PµBFśěË>oš¬‹}§nĎK‰„M~,®–ş†H¨ę˘ŹËV ÷ ŰĘĘDh¶mTe#U9™]O_\¤şWě/u‘Qô-Ůý]Úź­¨=W$ě"víޱKü\uŻř(Ľ‰˘‹*?Ů}őqłŹX#úˇĘţâJOŐĐ´šO±ÍĹ1Żn'Qě”ő۲,ů 6Ő}ÇÁ[}N÷íő˛wŁÄÜdľŃ4nţ~I‘đś~ n q]Wúą®ë4ů!Ţ]‰ă˙żďűĘëÄďÄ{š\źĎçŇrůľOI’TŇL’„_Acú“É„.ĂqéáábĐĎóZË/–ŤŽîyžň`rÇqx{DQtR[«Ęuîáô*ÓŰŠČn†ňŢsŠďJSđ ŐçQŃ~żŻ*/ë;צɏY;EqTĺ4ů©w“ŤÚ`öŻŕ/Ú8Ë2J’DéC®ë*Ë)k_±ĚMudÁšĆ Uń31 B˝mëcX_Ň4ĄŃhDEQĐp8¤$IZý˛©?¨e°úO&彺®óń>Ë2î“b»ŐŰv`~žeY%řEš¦üߪöoÂ0ŚŁ@S˛şŠĺM4auý©©®Ě†˛űęýř”ş~–çtźţÇ‚—ô}N‹ĎjŃźX›¶qCő =—sú€n@$€Ž4MvN;Ř‹k[dT]×ů‹ľ…± ńž4MéţţžFŁ­V«Ććz”Ŕ.öP ,]"höÝÄ<ív;ĺß`0čeŻ>íÉČS„UşőhČőɶj’ŢÇÖçpŞ8jYÖQŮŘ$sµZŃt:ýĐ~+ÚőˇIE“P!~wÎ$VśŚŹF#zxx étZ‰R|Žż×#űöí‹÷÷÷GBŞ ęůŠb˙ýý====ŃjµR¦Ő—ďßżsč§ĐÖe,ďŰßı˘O_f÷™¦I†aů)űQÇ0 ň<Ź4M;ĘOüˇá”q˘©ď«ľë*JĘú€řěë!Šźžçq{×0Űhšv1‘đźÓâw÷÷÷ŤýďááˇqŚéű^#¦Óu|»}ú‘řý9?ÂŔWä0|,}Ć>bBÇä8ź°$IR™tş®KłŮ¬÷ŠŮ÷}VPžŠGÓŠŐ=}hš€\cő›*M6)néÎ]áxŽPĐeb÷ňňÂWł˝7M~|m»ťâۧřęz˝ć‚k–eE_Eëş.ą®ËWţ^ÂßĹň†aHavö…¶É˝HEäyĂâ8¦8ŽÉ÷ýNcX—1Rd>ź7®¬şÄĐVVŃ>˘ť]ץŐjUkYb÷8ŽC›Í†’$áâńËË ż˙=čÓÇE;ÖëşŰí*«ßXşLTs‡^^^¤‚hűŚĎi1ý¦•ҲçÉ­Śy×~&˝÷ű üJ@$€˘ĎjS&Ąş®Sš¦|bÇ1˝˝˝ńĽŮä~±XôšhÜl˘n—˘(h:ťJWč±íš®ëŇÓÓŚu&žç‘ă8†!%IRYÉÄú~´ßď/"r‹"EŰ «sĆ1˲(MSŠ˘â8¦$I¤cŘz˝>y{ăp8$ß÷i:ťR–eAgŃó=a"!Ű>lš&Ńę"!ݱý¶})Ń7\×ĺŰoÓ4%˲ř8Âęęş.˝ĽĽTDSfŹKÖőźÓ˘čŐ§˙]B,듆®ëčŕ‘>Ó4éÇŹ˝^ĚOÇŘJ"6ąg&$Sď/LD·¤†ÁýDz¬ĘY}˛` ďń\cvl—D[Ôźm¬?&IBišRQ±S &öŹÇ…nń9­Úzüφ®y_clżĺ÷ř•€H„(J4Mpă8î˝B" ä8Žňe]ÜžÇ&b™ŘYVŞ űţÚ‘ue/úmeŤF4 >ý䀵QÓy~Y–5Úă#č*ľ]#XHź<Ä@˛r6­ ę"4MdĹ>ÎJ Á`@Ę#t]ż¸HdY‚¬b4Ńýý}oáÄ÷} 4ŤŻą¤ř!Fľä ˘ ĄŠ2Ë`ö”ů$kGvî ě™Ŕîa+#ű<7.ýlk ž#özŮŘ·Ű펶×óI’ähëőŻüśű_ŰŘĎž…—:‹­ňlËűŃľo?jň­¦~ „đNÔ_hĹh”óů\ú«|Q4źĎůËn×ɇřR¬šěĘ"ëşÎÄq¬|ŃgçEŐ'1×Ä4ÍJŮTâ„X6•ŔÎ|şuÄ(žOOOG˘Sš¦;ÓďZ+§T« Ó4ĺľÍţ} V«•T¬‹˘Orë~ÂĘźe™TdŚ˘¨ÓŞĽ——iJ’„űďd29k϶6˛ŕ02€Â|é0›eY¦ŚRÍVü5őĹ6*ŠB9†‰g^Bń}źŰSĺ7×´—čW˛q•ŤëâŹ4*áLQŢ[$dueç’ŞęĘęŔÎ(­·?óWÖWT˘én·ëQůWxN‹6NÓTiă *ˇ/Ýľő1\ËŮqç ű‘FěG˛Ľ»ô#-”ĘĹbQQY·Ű-˙|»Ý*ď·m»$˘Ň¶íĘç‡Ă߯ëzéű~ąX,¤éëş^AP&IR&IR†aXęşÎż}}íU'V&"*Ç)ă8ćiAŔÓÖ4­Ěóśß—çyiż×ó<~oEĄă8ü»ńx|”/űN¬g_›ŞÚ#ĎóRÓ4ţťëşeEe’$eÇ­e›Íf›řľĎË Ęłk[·}Ţ–®ĘnűýľRgÇqĘ J×u+mŘ%Ź:mţŮÖ–Mv‡üsć×eYJýOf7ŃOLÓ, h,‡ę^öçű>÷ß÷ůçĂá°â˙*»0˙÷<Źß'óă>y×ű^“ż¨úM} aýAÖ'Öëuď±­©_6°,«’7ł“ĘĆ]|KLßó<žv݇†Ăa/żoĘűp8pź4MłRîsúCY–ĺx<®ôcqěíeĆ‘˝diQŁďŞĆÁ>ĎŹzżě:–O&“Nľ!ë˛4¨Üď÷ʶ<Ĺ>űsZě˘?]Ş˙5ů˛Xn×uyą}߯أ-ʶ|ëĎéKő# ďI0\O$¬żLËňx}}­<ő?MÓzOákv»Ź$üŐI’„ĽŘn·0Đ.n”(Ščţţžîďď•×EÁ\*‚%ŕ둸QÄݞ–EQĐÓÓÓ»GšüzŕLBŕFq]—lۦÝnÇϬb焥iĘĎú#"šÍfŘV 8„Ŕ Ç1ůľO///üŕ|MÓ( C¬"ś—ź€,ËxôK†ă8X=(ˇ( MŐ˛¬“˘ŽđŐ€HŔKřâ@$ŕ‹‘€/DBľ8 řâ@$€ ˲/Wç4Mi4Ńh4˘4MŃľđ@$€ćůů™ż\˝‹˘ $I(I*Šâ—­ăt:%Ďóŕ耛"!| APż¬HöŐq]—˘(‚!7DBľ8 řâü&€cv»%IÂ˙mYŤÇăŁëŠ˘ ďßżó۶-MOĽNÓ42M“ľ˙N?~ü¨äIDd™¦y”Fš¦´ŰířÖdÓ4i<“®ëŇ<Ó4Ą··7ž^’$<ŹÉdB¦iVĘĹĘže˝ĽĽ´Ö˝N–e´Űí*A:LÓ$۶ĄőąišŇfłiÍ/Ë2nëáp¨´YýÚz{öÍďííŤŢŢ޸íUůwő9UűEA›Í†Űß4MšL&GĺÚl6܇Ćă1Y–ĄĚŁ( Úív•Ŕ2]ýđÉ(p^__KÓ4K":ú3Mł|}}=şg<ók–ËĄ4]×uů5Űí¶Ün·Ň<¨\,•{‡Cé8Žňzß÷ĄyÚ¶ÍÓ[Ż×G÷í÷űJ9ň<Ż”ł^÷ý~/Ígżß7–ŹJĎóĘ<Ď+÷‰yo·Ű^í´ßď•í$Ëďőő•·^ŻÓféN&““ó[,ĘkëuÝn·Ę´u]W–·Ţľş®ÝoYVy8ʲ,Ë ¤y8ŽsÔ6eY–aJÓl+ŕs‘ţ¦.¤Ů¶].‹r2™”š¦)E¦<Ďů÷ş®sQF–îl6+Ëň§čdŰviF%?۶+éď÷űŠP3ËŮlV.‹r8ňĎ]×=Ş‘&“ɑأiZY–UˇŽ „†a”¶m—ăń¸R>]׏ĤĂá ,Ëźý‰˘[=ď>"ˇ§¦iĺd2)‹E9›Í*ĺu§rkŁńx¬L{żßóű™ |J~ëőş´m›ç©io_QlUů\=m™řĚěkYÖ‘ŐËĹDK±˘O×…éĺrÉż3 ·i˝/ČDsŔç"!”?… QčŞ }yžWDąú÷âJ5Q,¦ápx”ݏâL€4M“ 2˘Szę"Ýz˝.ó?nµZńłćę, ň<Źt]'˲¤iٶMať—†!˙ńl:"âgN&éŮv¦i’çyüßâ™{§ÂĘ`Y–ô¬Ă0 É0 ˛m»r>˘XU[°úöé’ßp8<ĘŻŤ0 y{EQtd?]×)Š"2 žźźÓŞ—M¬ŻaÇqĄmu]ç×EÁË"ţż†xźďűdŰ6 ‡C ü"@$ú?qČqe ]׹x$Ż`AŔE“ů|NľďsI&â´! Yľď+ŻcßEˇádbO™pČęÍęUÁň<§ív{$šŠ8ŽsѶbBW=Їř}–e”$IĄN–eńzYD{3qL´7Ëďű÷ďĘüŇ4=ĘŻ ćC¶m7am—$‰T6 Cjc1MUąDźdľ* ‰ĎĎĎRáÓ÷}J’¤·đ ¸]ÝđĺWÇÝßßó(´2ýäŇ4­1lĺ×ăă#EA«ŐŠ~®˛ë"ŇŐaĺ°,«±L,‚.«‹Lę"P6 UŞhŔş®K*Ý÷B’ďű\`ŤFdš&ą®KŽămŰŤ‘‹=ĎŁů|NišR–e»°r‡ĂĘçžçńŔ}óS‘eß:ůkßş˝Um+~®*ŁęŢńxL›Í†Ň4Ą‡‡˛,‹<Ďk4ź„€/ʏ:+Š"ľU¸Ď} ˲h±X𭡆aT¶ëö‰H2a¨l‹ô5í·Ůl(ŽcʲěhKňĄq‡–Ë%Íçsn§0 ąť]×%×ui2™ÝËDB"ŞÜS_]X_µéşîÉůµµm_ź{/˘("×uąx™¦)· [Q;™L.ľJđq@$ápŘyeęşďßżó˙˙ńăÇŮ"źať·*Ë®»ćĘŻ4MééééhKަidY9ŽCş®síRřľO®ëR†”$IĹćqSdž!m·ŰŁsř&“ ˝ĽĽĐfłáBź¸ÚQuöă)ů˝—Ď]v'«[Ç|ĹjQ\Řô}ź–Ë%~ €@†g zQqÁIÓ4z{{٧§':' <–eÝäŮolű-[Q9›ÍČu]2Mł"V^"X‰ Ó4ąČÇÎ dÂÖŰŰĄiJžçŮÎu]zyyá«-ËŞ`iښۖźďű˝Wzž×xćäGÂVI?w1Š".’˛ -§l§Ü\řň+íÚĸ,Ë”„ł,ă+ćlŰćâXQ4ťN{—˶m"Ş®Ll*×{#úX.—\`­ŻfĽĆöăz;°(ĘQQ–e<"°,ŔŚëşü{v=k+U€Ź®ůɢȅč6űцߓzą,ËâÁx¶Ű-˙üZ"0ŕ}HřňčşŢ(*‰L§Sşżż']׏„›étJEQ¦iEY–EłŮŚ~ j}W1ÁJ±dDQD4 Ţől;QlŠęŰfÓ>EAÁ€”•u]o]Ů&F©f¶5 ă¨b~Şł%»ä'c<ó24‰€ľďÓýý= w  Á`@ŹŹŹĘüD‘ó#LŔĺHD\pʲLąę/Š˘ĘŠ3q[jü» řjş ¸9źĎEžşŘâş.:2ťNĄ÷¦iĘW/†ń®Ű>Ĺú«VÉ6»TžĂá~®Ú“ Tbfű:l{o–e<ČŚLčó[­V'ç'F ®—­4•ĄEQe+t׳)ĎAô!f™ß1ęŰóÓ4ĄÝn×±p{@$ú)ڰí˝QŃăă#­V+.vL§S.‡ĂĘŞ˛4MąbŰvĺ|9]×ůę>ٶcq«óăă#=??óëëgŕ=>>Ň|>çez~~®ś ¨ZYw-ę"&;ç/Ë2Úl6ÜfbtĺK¬:cő,Š‚éĺĺ…ŰdłŮT©¨V˙™¦ÉĹ?v­ę\Ŕsňc훦)ŤF#šĎç\`s‡GDŽă¸Ńç4M{·s MÓ䫣(˘ŃhD›ÍćČďXąę´ďűä8"đŮ(”eY–yž—“ɤ$"ĺźaĺ~żŻÜcYVIDĄ¦iĺáp¦=›Íx‹Ĺ˘rż¦i•/var/log/ceilometer/ceilometer-polling.log``. Then, ``kill -USR1 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/ceilometer/ceilometer-polling.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module (currently residing in oslo-incubator), as well as the Ceilometer version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from ceilometer import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation about oslo.reports: `oslo.reports `_ ceilometer-10.0.0/setup.cfg0000666000175100017510000004174413236733440015606 0ustar zuulzuul00000000000000[metadata] name = ceilometer url = http://launchpad.net/ceilometer summary = OpenStack Telemetry description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = https://docs.openstack.org/ceilometer/latest/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 Topic :: System :: Monitoring [global] setup-hooks = pbr.hooks.setup_hook [files] packages = ceilometer data_files = etc/ceilometer = etc/ceilometer/* [extras] gnocchi = oslo.cache>=1.5.0 # Apache-2.0 gnocchiclient>=7.0.0 # Apache-2.0 zaqar = python-zaqarclient>=1.0.0 # Apache-2.0 [entry_points] ceilometer.notification.pipeline = meter = ceilometer.pipeline.sample:SamplePipelineManager event = ceilometer.pipeline.event:EventPipelineManager ceilometer.sample.endpoint = http.request = ceilometer.middleware:HTTPRequest http.response = ceilometer.middleware:HTTPResponse hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification _sample = ceilometer.telemetry.notifications:TelemetryIpc meter = ceilometer.meter.notifications:ProcessMeterNotifications ceilometer.discover.compute = local_instances = ceilometer.compute.discovery:InstanceDiscovery ceilometer.discover.central = endpoint = ceilometer.polling.discovery.endpoint:EndpointDiscovery tenant = ceilometer.polling.discovery.tenant:TenantDiscovery lb_pools = ceilometer.network.services.discovery:LBPoolsDiscovery lb_vips = ceilometer.network.services.discovery:LBVipsDiscovery lb_members = ceilometer.network.services.discovery:LBMembersDiscovery lb_listeners = ceilometer.network.services.discovery:LBListenersDiscovery lb_loadbalancers = ceilometer.network.services.discovery:LBLoadBalancersDiscovery lb_health_probes = ceilometer.network.services.discovery:LBHealthMonitorsDiscovery vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery fw_services = ceilometer.network.services.discovery:FirewallDiscovery fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery tripleo_overcloud_nodes = ceilometer.hardware.discovery:NodesDiscoveryTripleO fip_services = ceilometer.network.services.discovery:FloatingIPDiscovery images = ceilometer.image.discovery:ImagesDiscovery volumes = ceilometer.volume.discovery:VolumeDiscovery volume_snapshots = ceilometer.volume.discovery:VolumeSnapshotsDiscovery volume_backups = ceilometer.volume.discovery:VolumeBackupsDiscovery ceilometer.discover.ipmi = local_node = ceilometer.polling.discovery.localnode:LocalNodeDiscovery ceilometer.poll.compute = disk.read.requests = ceilometer.compute.pollsters.disk:ReadRequestsPollster disk.write.requests = ceilometer.compute.pollsters.disk:WriteRequestsPollster disk.read.bytes = ceilometer.compute.pollsters.disk:ReadBytesPollster disk.write.bytes = ceilometer.compute.pollsters.disk:WriteBytesPollster disk.read.requests.rate = ceilometer.compute.pollsters.disk:ReadRequestsRatePollster disk.write.requests.rate = ceilometer.compute.pollsters.disk:WriteRequestsRatePollster disk.read.bytes.rate = ceilometer.compute.pollsters.disk:ReadBytesRatePollster disk.write.bytes.rate = ceilometer.compute.pollsters.disk:WriteBytesRatePollster disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster disk.device.read.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsRatePollster disk.device.write.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsRatePollster disk.device.read.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceReadBytesRatePollster disk.device.write.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesRatePollster disk.device.read.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskReadLatencyPollster disk.device.write.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskWriteLatencyPollster disk.latency = ceilometer.compute.pollsters.disk:DiskLatencyPollster disk.device.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskLatencyPollster disk.iops = ceilometer.compute.pollsters.disk:DiskIOPSPollster disk.device.iops = ceilometer.compute.pollsters.disk:PerDeviceDiskIOPSPollster cpu = ceilometer.compute.pollsters.instance_stats:CPUPollster cpu_util = ceilometer.compute.pollsters.instance_stats:CPUUtilPollster cpu_l3_cache = ceilometer.compute.pollsters.instance_stats:CPUL3CachePollster network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster network.incoming.packets.drop = ceilometer.compute.pollsters.net:IncomingDropPollster network.outgoing.packets.drop = ceilometer.compute.pollsters.net:OutgoingDropPollster network.incoming.packets.error = ceilometer.compute.pollsters.net:IncomingErrorsPollster network.outgoing.packets.error = ceilometer.compute.pollsters.net:OutgoingErrorsPollster memory.usage = ceilometer.compute.pollsters.instance_stats:MemoryUsagePollster memory.resident = ceilometer.compute.pollsters.instance_stats:MemoryResidentPollster memory.swap.in = ceilometer.compute.pollsters.instance_stats:MemorySwapInPollster memory.swap.out = ceilometer.compute.pollsters.instance_stats:MemorySwapOutPollster memory.bandwidth.total = ceilometer.compute.pollsters.instance_stats:MemoryBandwidthTotalPollster memory.bandwidth.local = ceilometer.compute.pollsters.instance_stats:MemoryBandwidthLocalPollster disk.capacity = ceilometer.compute.pollsters.disk:CapacityPollster disk.allocation = ceilometer.compute.pollsters.disk:AllocationPollster disk.usage = ceilometer.compute.pollsters.disk:PhysicalPollster disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster perf.cpu.cycles = ceilometer.compute.pollsters.instance_stats:PerfCPUCyclesPollster perf.instructions = ceilometer.compute.pollsters.instance_stats:PerfInstructionsPollster perf.cache.references = ceilometer.compute.pollsters.instance_stats:PerfCacheReferencesPollster perf.cache.misses = ceilometer.compute.pollsters.instance_stats:PerfCacheMissesPollster ceilometer.poll.ipmi = hardware.ipmi.node.power = ceilometer.ipmi.pollsters.node:PowerPollster hardware.ipmi.node.temperature = ceilometer.ipmi.pollsters.node:InletTemperaturePollster hardware.ipmi.node.outlet_temperature = ceilometer.ipmi.pollsters.node:OutletTemperaturePollster hardware.ipmi.node.airflow = ceilometer.ipmi.pollsters.node:AirflowPollster hardware.ipmi.node.cups = ceilometer.ipmi.pollsters.node:CUPSIndexPollster hardware.ipmi.node.cpu_util = ceilometer.ipmi.pollsters.node:CPUUtilPollster hardware.ipmi.node.mem_util = ceilometer.ipmi.pollsters.node:MemUtilPollster hardware.ipmi.node.io_util = ceilometer.ipmi.pollsters.node:IOUtilPollster hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster ceilometer.poll.central = ip.floating = ceilometer.network.floatingip:FloatingIPPollster image.size = ceilometer.image.glance:ImageSizePollster port = ceilometer.network.statistics.port_v2:PortPollster port.uptime = ceilometer.network.statistics.port_v2:PortPollsterUptime port.receive.packets = ceilometer.network.statistics.port_v2:PortPollsterReceivePackets port.transmit.packets = ceilometer.network.statistics.port_v2:PortPollsterTransmitPackets port.receive.bytes = ceilometer.network.statistics.port_v2:PortPollsterReceiveBytes port.transmit.bytes = ceilometer.network.statistics.port_v2:PortPollsterTransmitBytes port.receive.drops = ceilometer.network.statistics.port_v2:PortPollsterReceiveDrops port.receive.errors = ceilometer.network.statistics.port_v2:PortPollsterReceiveErrors # rgw.* are incorrect and deprecated to be removed in Rocky rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster rgw.usage = ceilometer.objectstore.rgw:UsagePollster radosgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster radosgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster radosgw.objects = ceilometer.objectstore.rgw:ObjectsPollster radosgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster radosgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster radosgw.usage = ceilometer.objectstore.rgw:UsagePollster storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster storage.objects = ceilometer.objectstore.swift:ObjectsPollster storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster switch.port = ceilometer.network.statistics.port:PortPollster switch.port.uptime = ceilometer.network.statistics.port:PortPollsterUptime switch.port.receive.packets = ceilometer.network.statistics.port:PortPollsterReceivePackets switch.port.transmit.packets = ceilometer.network.statistics.port:PortPollsterTransmitPackets switch.port.receive.bytes = ceilometer.network.statistics.port:PortPollsterReceiveBytes switch.port.transmit.bytes = ceilometer.network.statistics.port:PortPollsterTransmitBytes switch.port.receive.drops = ceilometer.network.statistics.port:PortPollsterReceiveDrops switch.port.transmit.drops = ceilometer.network.statistics.port:PortPollsterTransmitDrops switch.port.receive.errors = ceilometer.network.statistics.port:PortPollsterReceiveErrors switch.port.transmit.errors = ceilometer.network.statistics.port:PortPollsterTransmitErrors switch.port.receive.frame_error = ceilometer.network.statistics.port:PortPollsterReceiveFrameErrors switch.port.receive.overrun_error = ceilometer.network.statistics.port:PortPollsterReceiveOverrunErrors switch.port.receive.crc_error = ceilometer.network.statistics.port:PortPollsterReceiveCRCErrors switch.port.collision.count = ceilometer.network.statistics.port:PortPollsterCollisionCount switch.table = ceilometer.network.statistics.table:TablePollster switch.table.active.entries = ceilometer.network.statistics.table:TablePollsterActiveEntries switch.table.lookup.packets = ceilometer.network.statistics.table:TablePollsterLookupPackets switch.table.matched.packets = ceilometer.network.statistics.table:TablePollsterMatchedPackets switch = ceilometer.network.statistics.switch:SWPollster switch.ports = ceilometer.network.statistics.switch:SwitchPollsterPorts switch.flow = ceilometer.network.statistics.flow:FlowPollster switch.flow.bytes = ceilometer.network.statistics.flow:FlowPollsterBytes switch.flow.duration.nanoseconds = ceilometer.network.statistics.flow:FlowPollsterDurationNanoseconds switch.flow.duration.seconds = ceilometer.network.statistics.flow:FlowPollsterDurationSeconds switch.flow.packets = ceilometer.network.statistics.flow:FlowPollsterPackets network.services.lb.pool = ceilometer.network.services.lbaas:LBPoolPollster network.services.lb.vip = ceilometer.network.services.lbaas:LBVipPollster network.services.lb.member = ceilometer.network.services.lbaas:LBMemberPollster network.services.lb.listener = ceilometer.network.services.lbaas:LBListenerPollster network.services.lb.loadbalancer = ceilometer.network.services.lbaas:LBLoadBalancerPollster network.services.lb.health_monitor = ceilometer.network.services.lbaas:LBHealthMonitorPollster network.services.lb.total.connections = ceilometer.network.services.lbaas:LBTotalConnectionsPollster network.services.lb.active.connections = ceilometer.network.services.lbaas:LBActiveConnectionsPollster network.services.lb.incoming.bytes = ceilometer.network.services.lbaas:LBBytesInPollster network.services.lb.outgoing.bytes = ceilometer.network.services.lbaas:LBBytesOutPollster network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster volume.size = ceilometer.volume.cinder:VolumeSizePollster volume.snapshot.size = ceilometer.volume.cinder:VolumeSnapshotSize volume.backup.size = ceilometer.volume.cinder:VolumeBackupSize ceilometer.builder.poll.central = hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster ceilometer.compute.virt = libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector xenapi = ceilometer.compute.virt.xenapi.inspector:XenapiInspector ceilometer.hardware.inspectors = snmp = ceilometer.hardware.inspector.snmp:SNMPInspector ceilometer.transformer = accumulator = ceilometer.transformer.accumulator:TransformerAccumulator delta = ceilometer.transformer.conversions:DeltaTransformer unit_conversion = ceilometer.transformer.conversions:ScalingTransformer rate_of_change = ceilometer.transformer.conversions:RateOfChangeTransformer aggregator = ceilometer.transformer.conversions:AggregatorTransformer arithmetic = ceilometer.transformer.arithmetic:ArithmeticTransformer ceilometer.sample.publisher = test = ceilometer.publisher.test:TestPublisher notifier = ceilometer.publisher.messaging:SampleNotifierPublisher udp = ceilometer.publisher.udp:UDPPublisher file = ceilometer.publisher.file:FilePublisher http = ceilometer.publisher.http:HttpPublisher https = ceilometer.publisher.http:HttpPublisher gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher zaqar = ceilometer.publisher.zaqar:ZaqarPublisher ceilometer.event.publisher = test = ceilometer.publisher.test:TestPublisher notifier = ceilometer.publisher.messaging:EventNotifierPublisher http = ceilometer.publisher.http:HttpPublisher https = ceilometer.publisher.http:HttpPublisher gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher zaqar = ceilometer.publisher.zaqar:ZaqarPublisher ceilometer.event.trait_plugin = split = ceilometer.event.trait_plugins:SplitterTraitPlugin bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin console_scripts = ceilometer-polling = ceilometer.cmd.polling:main ceilometer-agent-notification = ceilometer.cmd.agent_notification:main ceilometer-send-sample = ceilometer.cmd.sample:send_sample ceilometer-upgrade = ceilometer.cmd.storage:upgrade ceilometer-rootwrap = oslo_rootwrap.cmd:main network.statistics.drivers = opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver opencontrail = ceilometer.network.statistics.opencontrail.driver:OpencontrailDriver oslo.config.opts = ceilometer = ceilometer.opts:list_opts ceilometer-auth = ceilometer.opts:list_keystoneauth_opts [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [pbr] warnerrors = true autodoc_index_modules = true [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = ceilometer/locale/ceilometer.pot [compile_catalog] directory = ceilometer/locale domain = ceilometer [update_catalog] domain = ceilometer output_dir = ceilometer/locale input_file = ceilometer/locale/ceilometer.pot [egg_info] tag_build = tag_date = 0 ceilometer-10.0.0/tools/0000775000175100017510000000000013236733440015111 5ustar zuulzuul00000000000000ceilometer-10.0.0/tools/send_test_data.py0000777000175100017510000001110513236733243020450 0ustar zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for sending test data for Ceilometer via oslo.messaging. Usage: Send messages with samples generated by make_test_data source .tox/py27/bin/activate ./tools/send_test_data.py --count 1000 --resources_count 10 --topic metering """ import argparse import datetime import functools import json import random import uuid import make_test_data import oslo_messaging from six import moves from ceilometer import messaging from ceilometer.publisher import utils from ceilometer import service def send_batch_notifier(notifier, topic, batch): notifier.sample({}, event_type=topic, payload=batch) def get_notifier(conf): return oslo_messaging.Notifier( messaging.get_transport(conf), driver='messagingv2', publisher_id='telemetry.publisher.test', topics=['metering'], ) def generate_data(conf, send_batch, make_data_args, samples_count, batch_size, resources_count, topic): make_data_args.interval = 1 make_data_args.start = (datetime.datetime.utcnow() - datetime.timedelta(minutes=samples_count)) make_data_args.end = datetime.datetime.utcnow() make_data_args.resource_id = None resources_list = [str(uuid.uuid4()) for _ in moves.xrange(resources_count)] resource_samples = {resource: 0 for resource in resources_list} batch = [] count = 0 for sample in make_test_data.make_test_data(conf, **make_data_args.__dict__): count += 1 resource = resources_list[random.randint(0, len(resources_list) - 1)] resource_samples[resource] += 1 sample['resource_id'] = resource # need to change the timestamp from datetime.datetime type to iso # format (unicode type), because collector will change iso format # timestamp to datetime.datetime type before recording to db. sample['timestamp'] = sample['timestamp'].isoformat() # need to recalculate signature because of the resource_id change sig = utils.compute_signature(sample, conf.publisher.telemetry_secret) sample['message_signature'] = sig batch.append(sample) if len(batch) == batch_size: send_batch(topic, batch) batch = [] if count == samples_count: send_batch(topic, batch) return resource_samples send_batch(topic, batch) return resource_samples def get_parser(): parser = argparse.ArgumentParser() parser.add_argument( '--batch-size', dest='batch_size', type=int, default=100 ) parser.add_argument( '--config-file', default='/etc/ceilometer/ceilometer.conf' ) parser.add_argument( '--topic', default='perfmetering' ) parser.add_argument( '--samples-count', dest='samples_count', type=int, default=1000 ) parser.add_argument( '--resources-count', dest='resources_count', type=int, default=100 ) parser.add_argument( '--result-directory', dest='result_dir', default='/tmp' ) return parser def main(): args = get_parser().parse_known_args()[0] make_data_args = make_test_data.get_parser().parse_known_args()[0] conf = service.prepare_service(argv=['/', '--config-file', args.config_file]) notifier = get_notifier(conf) send_batch = functools.partial(send_batch_notifier, notifier) result_dir = args.result_dir del args.config_file del args.result_dir resource_writes = generate_data(conf, send_batch, make_data_args, **args.__dict__) result_file = "%s/sample-by-resource-%s" % (result_dir, random.getrandbits(32)) with open(result_file, 'w') as f: f.write(json.dumps(resource_writes)) return result_file if __name__ == '__main__': main() ceilometer-10.0.0/tools/__init__.py0000666000175100017510000000000013236733243017213 0ustar zuulzuul00000000000000ceilometer-10.0.0/tools/pretty_tox.sh0000777000175100017510000000065213236733243017677 0ustar zuulzuul00000000000000#!/usr/bin/env bash set -o pipefail TESTRARGS=$1 # --until-failure is not compatible with --subunit see: # # https://bugs.launchpad.net/testrepository/+bug/1411804 # # this work around exists until that is addressed if [[ "$TESTARGS" =~ "until-failure" ]]; then python setup.py testr --slowest --testr-args="$TESTRARGS" else python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f fi ceilometer-10.0.0/releasenotes/0000775000175100017510000000000013236733440016442 5ustar zuulzuul00000000000000ceilometer-10.0.0/releasenotes/notes/0000775000175100017510000000000013236733440017572 5ustar zuulzuul00000000000000ceilometer-10.0.0/releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml0000666000175100017510000000041213236733243030150 0ustar zuulzuul00000000000000--- features: - > [`bug 1506959 `_] Add support to query unique set of meter names rather than meters associated with each resource. The list is available by adding unique=True option to request. ceilometer-10.0.0/releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml0000666000175100017510000000052413236733243025350 0ustar zuulzuul00000000000000--- features: - > Support for CORS is added. More information can be found [`here `_] upgrade: - > The api-paste.ini file can be modified to include or exclude the CORs middleware. Additional configurations can be made to middleware as well. ceilometer-10.0.0/releasenotes/notes/add-magnum-event-4c75ed0bb268d19c.yaml0000666000175100017510000000014413236733243026107 0ustar zuulzuul00000000000000--- features: - > Added support for magnum bay CRUD events, event_type is 'magnum.bay.*'. ceilometer-10.0.0/releasenotes/notes/http-dispatcher-verify-ssl-551d639f37849c6f.yaml0000666000175100017510000000112113236733243027762 0ustar zuulzuul00000000000000--- features: - In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set to True to use system-installed certificates (default value) or False to ignore certificate verification (use in development only!). verify_ssl can also be set to the location of a certificate file e.g. /some/path/cert.crt (use for self-signed certs) or to a directory of certificates. The value is passed as the 'verify' option to the underlying requests method, which is documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000ceilometer-10.0.0/releasenotes/notes/deprecated_database_event_dispatcher_panko-607d558c86a90f17.yamlceilometer-10.0.0/releasenotes/notes/deprecated_database_event_dispatcher_panko-607d558c86a90f17.yam0000666000175100017510000000025213236733243033120 0ustar zuulzuul00000000000000--- deprecations: - The event database dispatcher is now deprecated. It has been moved to a new project, alongside the Ceilometer API for /v2/events, called Panko. ceilometer-10.0.0/releasenotes/notes/http_proxy_to_wsgi_enabled-616fa123809e1600.yaml0000666000175100017510000000132713236733243030075 0ustar zuulzuul00000000000000--- features: - Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. The purpose of this middleware is to set up the request URL correctly in case there is a proxy (for instance, a loadbalancer such as HAProxy) in front of Ceilometer. So, for instance, when TLS connections are being terminated in the proxy, and one tries to get the versions from the / resource of Ceilometer, one will notice that the protocol is incorrect; It will show 'http' instead of 'https'. So this middleware handles such cases. Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off by default and needs to be enabled via a configuration value. ceilometer-10.0.0/releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml0000666000175100017510000000026713236733243025573 0ustar zuulzuul00000000000000--- features: - > Gnocchi dispatcher now uses client rather than direct http requests upgrade: - > gnocchiclient library is now a requirement if using ceilometer+gnocchi. ceilometer-10.0.0/releasenotes/notes/http-dispatcher-batching-4e17fce46a196b07.yaml0000666000175100017510000000036413236733243027564 0ustar zuulzuul00000000000000--- features: - | In the [dispatcher_http] section of ceilometer.conf, batch_mode can be set to True to activate sending meters and events in batches, or False (default value) to send each meter and event with a fresh HTTP call. ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000ceilometer-10.0.0/releasenotes/notes/remove-compute-workload-partitioning-option-26538bc1e80500e3.yamlceilometer-10.0.0/releasenotes/notes/remove-compute-workload-partitioning-option-26538bc1e80500e3.ya0000666000175100017510000000022313236733243032774 0ustar zuulzuul00000000000000--- upgrade: - | The deprecated `compute.workload_partitioning` option has been removed in favor of `compute.instance_discovery_method`. ceilometer-10.0.0/releasenotes/notes/keystone-v3-fab1e257c5672965.yaml0000666000175100017510000000010313236733243025007 0ustar zuulzuul00000000000000--- features: - > Add support for Keystone v3 authentication ceilometer-10.0.0/releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml0000666000175100017510000000031213236733243025566 0ustar zuulzuul00000000000000--- fixes: - > [`bug 255569 `_] Fix caching support in Gnocchi dispatcher. Added better locking support to enable smoother cache access. ceilometer-10.0.0/releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml0000666000175100017510000000106613236733243027471 0ustar zuulzuul00000000000000--- prelude: > Ceilometer backends are no more only databases but also REST API like Gnocchi. So ceilometer-dbsync binary name doesn't make a lot of sense and have been renamed ceilometer-upgrade. The new binary handles database schema upgrade like ceilometer-dbsync does, but it also handle any changes needed in configured ceilometer backends like Gnocchi. deprecations: - For backward compatibility reason we temporary keep ceilometer-dbsync, at least for one major version to ensure deployer have time update their tooling. ceilometer-10.0.0/releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml0000666000175100017510000000054513236733243032423 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1542189 `_] Handle malformed resource definitions in gnocchi_resources.yaml gracefully. Currently we raise an exception once we hit a bad resource and skip the rest. Instead the patch skips the bad resource and proceeds with rest of the definitions. ceilometer-10.0.0/releasenotes/notes/remove-eventlet-6738321434b60c78.yaml0000666000175100017510000000012713236733243025522 0ustar zuulzuul00000000000000--- features: - > Remove eventlet from Ceilometer in favour of threaded approach ceilometer-10.0.0/releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml0000666000175100017510000000031413236733243030307 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1530793 `_] network.services.lb.incoming.bytes meter was previous set to incorrect type. It should be a gauge meter. ceilometer-10.0.0/releasenotes/notes/memory-bandwidth-meter-f86cf01178573671.yaml0000666000175100017510000000025213236733243027054 0ustar zuulzuul00000000000000--- features: - Add two new meters, including memory.bandwidth.total and memory.bandwidth.local, to get memory bandwidth statistics based on Intel CMT feature. ceilometer-10.0.0/releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml0000666000175100017510000000025213236733243031255 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1531626 `_] Ensure aggregator transformer timeout is honoured if size is not provided. ceilometer-10.0.0/releasenotes/notes/deprecate-kafka-publisher-17b4f221758e15da.yaml0000666000175100017510000000072513236733243027621 0ustar zuulzuul00000000000000--- features: - | Ceilometer supports generic notifier to publish data and allow user to customize parameters such as topic, transport driver and priority. The publisher configuration in pipeline.yaml can be notifer://[notifier_ip]:[notifier_port]?topic=[topic]&driver=driver&max_retry=100 Not only rabbit driver, but also other driver like kafka can be used. deprecations: - | Kafka publisher is deprecated to use generic notifier instead. ceilometer-10.0.0/releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml0000666000175100017510000000033513236733243027431 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1506738 `_] [`bug 1509677 `_] Optimise SQL backend queries to minimise query load ceilometer-10.0.0/releasenotes/notes/kwapi_deprecated-c92b9e72c78365f0.yaml0000666000175100017510000000017213236733243026126 0ustar zuulzuul00000000000000--- deprecations: - The Kwapi pollsters are deprecated and will be removed in the next major version of Ceilometer. ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000ceilometer-10.0.0/releasenotes/notes/remove-shuffle_time_before_polling_task-option-05a4d225236c64b1.yamlceilometer-10.0.0/releasenotes/notes/remove-shuffle_time_before_polling_task-option-05a4d225236c64b10000666000175100017510000000024413236733243033122 0ustar zuulzuul00000000000000--- deprecations: - | The `shuffle_time_before_polling_task` option has been removed. This option never worked in the way it was originally intended too. ceilometer-10.0.0/releasenotes/notes/deprecate-ceilometer-collector-b793b91cd28b9e7f.yaml0000666000175100017510000000070213236733243031042 0ustar zuulzuul00000000000000--- features: - | Because of deprecating the collector, the default publishers in pipeline.yaml and event_pipeline.yaml are now changed using database instead of notifier. deprecations: - | Collector is no longer supported in this release. The collector introduces lags in pushing data to backend. To optimize the architecture, Ceilometer push data through dispatchers using publishers in notification agent directly. ceilometer-10.0.0/releasenotes/notes/deprecate-http-control-exchanges-026a8de6819841f8.yaml0000666000175100017510000000056513236733243031113 0ustar zuulzuul00000000000000--- deprecations: - | Allow users to add additional exchanges in ceilometer.conf instead of hardcoding exchanges. Now original http_control_exchanges is being deprecated and renamed notification_control_exchanges. Besides, the new option is integrated with other exchanges in default EXCHANGE_OPTS to make it available to extend additional exchanges. ceilometer-10.0.0/releasenotes/notes/add-full-snmpv3-usm-support-ab540c902fa89b9d.yaml0000666000175100017510000000022413236733243030205 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1597618 `_] Add the full support of snmp v3 user security model. ceilometer-10.0.0/releasenotes/notes/manager-based-ipc-queues-85e3bf59ffdfb0ac.yaml0000666000175100017510000000154113236733243027752 0ustar zuulzuul00000000000000--- features: - | Workload partitioning of notification agent is now split into queues based on pipeline type (sample, event, etc...) rather than per individual pipeline. This will save some memory usage specifically for pipeline definitions with many source/sink combinations. upgrade: - | If workload partitioning of the notification agent is enabled, the notification agent should not run alongside pre-Queens agents. Doing so may result in missed samples when leveraging transformations. To upgrade without loss of data, set `notification_control_exchanges` option to empty so only existing `ceilometer-pipe-*` queues are processed. Once cleared, reset `notification_control_exchanges` option and launch the new notification agent(s). If `workload_partitioning` is not enabled, no special steps are required. ceilometer-10.0.0/releasenotes/notes/drop-collector-4c207b35d67b2977.yaml0000666000175100017510000000055313236733243025477 0ustar zuulzuul00000000000000--- upgrade: - | The collector service is removed. From Ocata, it's possible to edit the pipeline.yaml and event_pipeline.yaml files and modify the publisher to provide the same functionality as collector dispatcher. You may change publisher to 'gnocchi', 'http', 'panko', or any combination of available publishers listed in documentation. ceilometer-10.0.0/releasenotes/notes/parallel_requests_option-a3f901b6001e26e4.yaml0000666000175100017510000000036613236733243027725 0ustar zuulzuul00000000000000--- features: - | A new option named `max_parallel_requests` is available to control the maximum number of parallel requests that can be executed by the agents. This option also replaces the `poolsize` option of the HTTP publisher. ceilometer-10.0.0/releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml0000666000175100017510000000036213236733243030677 0ustar zuulzuul00000000000000--- features: - Since the Glance v1 APIs won't be maintained any more, this change add the support of glance v2 in images pollsters. upgrade: - > The option ``glance_page_size`` has been removed because it's not actually needed. ceilometer-10.0.0/releasenotes/notes/always-requeue-7a2df9243987ab67.yaml0000666000175100017510000000112013236733243025577 0ustar zuulzuul00000000000000--- critical: - > The previous configuration options default for ``requeue_sample_on_dispatcher_error`` and ``requeue_event_on_dispatcher_error`` allowed to lose data very easily: if the dispatcher failed to send data to the backend (e.g. Gnocchi is down), then the dispatcher raised and the data were lost forever. This was completely unacceptable, and nobody should be able to configure Ceilometer in that way." upgrade: - > The options ``requeue_event_on_dispatcher_error`` and ``requeue_sample_on_dispatcher_error`` have been enabled and removed. ceilometer-10.0.0/releasenotes/notes/support-None-query-45abaae45f08eda4.yaml0000666000175100017510000000023713236733243026664 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1388680 `_] Suppose ability to query for None value when using SQL backend. ceilometer-10.0.0/releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml0000666000175100017510000000037313236733243026136 0ustar zuulzuul00000000000000--- features: - > Support for CADF-only payload in HTTP dispatcher is dropped as audit middleware in pyCADF was dropped in Kilo cycle. upgrade: - > audit middleware in keystonemiddleware library should be used for similar support. ceilometer-10.0.0/releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml0000666000175100017510000000023613236733243027053 0ustar zuulzuul00000000000000--- upgrade: - > gnocchi_resources.yaml in Ceilometer should be updated. fixes: - > Fix samples from Heat to map to correct Gnocchi resource type ceilometer-10.0.0/releasenotes/notes/remove-gnocchi-dispatcher-dd588252976c2abb.yaml0000666000175100017510000000055413236733243027741 0ustar zuulzuul00000000000000--- upgrade: - | The Gnocchi dispatcher has been removed and replaced by a native Gnocchi publisher. The configuration options from the `[dispatcher_gnocchi]` has been removed and should be passed via the URL in `pipeline.yaml`. The service authentication override can be done by adding specific credentials to a `[gnocchi]` section instead. ceilometer-10.0.0/releasenotes/notes/drop-kwapi-b687bc476186d01b.yaml0000666000175100017510000000012013236733243024667 0ustar zuulzuul00000000000000--- deprecations: - | Previously deprecated kwapi meters are not removed. ceilometer-10.0.0/releasenotes/notes/instance-discovery-new-default-7f9b451a515dddf4.yaml0000666000175100017510000000051413236733243031004 0ustar zuulzuul00000000000000--- upgrade: - | Ceilometer legacy backends and Ceilometer API are now deprecated. Polling all nova instances from compute agent is no more required with Gnocchi. So we switch the [compute]instance_discovery_method to libvirt_metadata. To switch back to the old deprecated behavior you can set it back to 'naive'. ceilometer-10.0.0/releasenotes/notes/deprecate-aggregated-disk-metrics-54a395c05e74d685.yaml0000666000175100017510000000035513236733243031173 0ustar zuulzuul00000000000000--- deprecations: - | disk.* aggregated metrics for instance are deprecated, in favor of the per disk metrics (disk.device.*). Now, it's up to the backend to provide such aggregation feature. Gnocchi already provides this. ceilometer-10.0.0/releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml0000666000175100017510000000021713236733243026773 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1523124 `_] Fix gnocchi dispatcher to support UDP collector ceilometer-10.0.0/releasenotes/notes/remove-file-dispatcher-56ba1066c20d314a.yaml0000666000175100017510000000011013236733243027121 0ustar zuulzuul00000000000000--- upgrade: - | The deprecated file dispatcher has been removed. ceilometer-10.0.0/releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml0000666000175100017510000000032213236733243026661 0ustar zuulzuul00000000000000--- features: - > [`bug 1518338 `_] Add support for storing SNMP metrics in Gnocchi.This functionality requires Gnocchi v2.1.0 to be installed. ceilometer-10.0.0/releasenotes/notes/polling-deprecation-4d5b83180893c053.yaml0000666000175100017510000000022713236733243026420 0ustar zuulzuul00000000000000--- deprecations: - | Usage of pipeline.yaml for polling configuration is now deprecated. The dedicated polling.yaml should be used instead. ceilometer-10.0.0/releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml0000666000175100017510000000107013236733243027501 0ustar zuulzuul00000000000000--- upgrade: - Batching is enabled by default now when coordinated workers are enabled. Depending on load, it is recommended to scale out the number of `pipeline_processing_queues` to improve distribution. `batch_size` should also be configured accordingly. fixes: - Fix to improve handling messages in environments heavily backed up. Previously, notification handlers greedily grabbed messages from queues which could cause ordering issues. A fix was applied to sequentially process messages in a single thread to prevent ordering issues. ceilometer-10.0.0/releasenotes/notes/add-memory-swap-metric-f1633962ab2cf0f6.yaml0000666000175100017510000000015013236733243027157 0ustar zuulzuul00000000000000--- features: - Add memory swap metric for VM, including 'memory.swap.in' and 'memory.swap.out'. ceilometer-10.0.0/releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml0000666000175100017510000000034113236733243026656 0ustar zuulzuul00000000000000--- critical: - > [`bug 1519767 `_] fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and its potential race conditions are now patched. ceilometer-10.0.0/releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml0000666000175100017510000000061213236733243027314 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1536338 `_] Patch was added to fix the broken floatingip pollster that polled data from nova api, but since the nova api filtered the data by tenant, ceilometer was not getting any data back. The fix changes the pollster to use the neutron api instead to get the floating ip info. ceilometer-10.0.0/releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml0000666000175100017510000000037413236733243030334 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1539163 `_] Add ability to define whether to use first or last timestamps when aggregating samples. This will allow more flexibility when chaining transformers. ceilometer-10.0.0/releasenotes/notes/remove-exchange-control-options-75ecd49423639068.yaml0000666000175100017510000000012213236733243030713 0ustar zuulzuul00000000000000--- upgrade: - | The deprecated control exchange options have been removed. ceilometer-10.0.0/releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml0000666000175100017510000000060013236733243030176 0ustar zuulzuul00000000000000--- upgrade: - > To utilize the new policy support. The policy.json file should be updated accordingly. The pre-existing policy.json file will continue to function as it does if policy changes are not required. fixes: - > [`bug 1504495 `_] Configure ceilometer to handle policy.json rules when possible. ceilometer-10.0.0/releasenotes/notes/perf-events-meter-b06c2a915c33bfaf.yaml0000666000175100017510000000042513236733243026374 0ustar zuulzuul00000000000000--- features: - Add four new meters, including perf.cpu.cycles for the number of cpu cycles one instruction needs, perf.instructions for the count of instructions, perf.cache_references for the count of cache hits and cache_misses for the count of caches misses. ceilometer-10.0.0/releasenotes/notes/remove-ceilometer-dbsync-53aa1b529f194f15.yaml0000666000175100017510000000014613236733243027517 0ustar zuulzuul00000000000000--- other: - The deprecated ceilometer-dbsync has been removed. Use ceilometer-upgrade instead. ceilometer-10.0.0/releasenotes/notes/remove-http-dispatcher-1afdce1d1dc3158d.yaml0000666000175100017510000000011013236733243027472 0ustar zuulzuul00000000000000--- upgrade: - | The deprecated http dispatcher has been removed. ceilometer-10.0.0/releasenotes/notes/remove-nova-http-log-option-64e97a511e58da5d.yaml0000666000175100017510000000012513236733243030203 0ustar zuulzuul00000000000000--- upgrade: - | The deprecated `nova_http_log_debug` option has been removed. ceilometer-10.0.0/releasenotes/notes/add-tool-for-migrating-data-to-gnocchi-cea8d4db68ce03d0.yaml0000666000175100017510000000034113236733243032315 0ustar zuulzuul00000000000000--- upgrade: - > Add a tool for migrating metrics data from Ceilometer's native storage to Gnocchi. Since we have deprecated Ceilometer API and the Gnocchi will be the recommended metrics data storage backend. ceilometer-10.0.0/releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml0000666000175100017510000000020713236733243026021 0ustar zuulzuul00000000000000--- features: - > Ceilometer alarms code is now fully removed from code base. Equivalent functionality is handled by Aodh. ceilometer-10.0.0/releasenotes/notes/deprecate-http-dispatcher-dbbaacee8182b550.yaml0000666000175100017510000000113613236733243030142 0ustar zuulzuul00000000000000--- upgrade: - Configuration values can passed in via the querystring of publisher in pipeline. For example, rather than setting target, timeout, verify_ssl, and batch_mode under [dispatcher_http] section of conf, you can specify http:///?verify_ssl=True&batch=True&timeout=10. Use `raw_only=1` if only the raw details of event are required. deprecations: - As the collector service is being deprecated, the duplication of publishers and dispatchers is being addressed. The http dispatcher is now marked as deprecated and the recommended path is to use http publisher. ceilometer-10.0.0/releasenotes/notes/http-publisher-authentication-6371c5a9aa8d4c03.yaml0000666000175100017510000000140713236733243030662 0ustar zuulzuul00000000000000--- features: - In the 'publishers' section of a meter/event pipeline definition, https:// can now be used in addition to http://. Furthermore, either Basic or client-certificate authentication can be used (obviously, client cert only makes sense in the https case). For Basic authentication, use the form http://username:password@hostname/. For client certificate authentication pass the client certificate's path (and the key file path, if the key is not in the certificate file) using the parameters 'clientcert' and 'clientkey', e.g. https://hostname/path?clientcert=/path/to/cert&clientkey=/path/to/key. Any parameters or credentials used for http(s) publishers are removed from the URL before the actual HTTP request is made. ceilometer-10.0.0/releasenotes/notes/ceilometer-api-removal-6bd44d3eab05e593.yaml0000666000175100017510000000010713236733243027314 0ustar zuulzuul00000000000000--- upgrade: - | The deprecated Ceilometer API has been removed. ceilometer-10.0.0/releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml0000666000175100017510000000117413236733243027474 0ustar zuulzuul00000000000000--- features: - > Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is deprecated. The same metrics are available between v1 and v2. issues: - > Neutron API is not designed to be polled against. When polling against Neutron is enabled, Ceilometer's polling agents may generate a significant load against the Neutron API. It is recommended that a dedicated API be enabled for polling while Neutron's API is improved to handle polling. upgrade: - > By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add neutron_lbaas_version=v1 option to configuration file. ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000ceilometer-10.0.0/releasenotes/notes/support-cinder-volume-snapshot-backup-metering-d0a93b86bd53e803.yamlceilometer-10.0.0/releasenotes/notes/support-cinder-volume-snapshot-backup-metering-d0a93b86bd53e8030000666000175100017510000000022113236733243033122 0ustar zuulzuul00000000000000--- features: - Add support of metering the size of cinder volume/snapshot/backup. Like other meters, these are useful for billing system. ceilometer-10.0.0/releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml0000666000175100017510000000066413236733243027405 0ustar zuulzuul00000000000000--- critical: - > [`bug 1533787 `_] Fix an issue where agents are not properly getting registered to group when multiple notification agents are deployed. This can result in bad transformation as the agents are not coordinated. It is still recommended to set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when deploying multiple agents. ceilometer-10.0.0/releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml0000666000175100017510000000035113236733243026241 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1550436 `_] Cache json parsers when building parsing logic to handle event and meter definitions. This will improve agent startup and setup time. ceilometer-10.0.0/releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml0000666000175100017510000000022113236733243027323 0ustar zuulzuul00000000000000--- features: - > [`bug 1513731 `_] Add support for hardware cpu_util in snmp.yaml ceilometer-10.0.0/releasenotes/notes/unify-timestamp-of-polled-data-fbfcff43cd2d04bc.yaml0000666000175100017510000000037613236733243031177 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1491509 `_] Patch to unify timestamp in samples polled by pollsters. Set the time point polling starts as timestamp of samples, and drop timetamping in pollsters. ceilometer-10.0.0/releasenotes/notes/remove-direct-publisher-5785ee7edd16c4d9.yaml0000666000175100017510000000012713236733243027540 0ustar zuulzuul00000000000000--- upgrade: - | Remove direct publisher and use the explicit publisher instead. ceilometer-10.0.0/releasenotes/notes/tooz-coordination-system-d1054b9d1a5ddf32.yaml0000666000175100017510000000034713236733243027754 0ustar zuulzuul00000000000000--- upgrade: - | Ceilometer now leverages the latest distribution mechanism provided by the tooz library. Therefore the options `coordination.retry_backoff` and `coordination.max_retry_interval` do not exist anymore. ceilometer-10.0.0/releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml0000666000175100017510000000055413236733243027315 0ustar zuulzuul00000000000000 --- fixes: - > [`bug 1536498 `_] Patch to fix duplicate meter definitions causing duplicate samples. If a duplicate is found, log a warning and skip the meter definition. Note that the first occurrence of a meter will be used and any following duplicates will be skipped from processing. ceilometer-10.0.0/releasenotes/notes/pipeline-fallback-polling-3d962a0fff49ccdd.yaml0000666000175100017510000000030513236733243030126 0ustar zuulzuul00000000000000--- upgrade: - | The deprecated support of configure polling in the `pipeline.yaml` file has been removed. Ceilometer now only uses the `polling.yaml` file for polling configuration. ceilometer-10.0.0/releasenotes/notes/network-statistics-from-opendaylight-787df77484d8d751.yaml0000666000175100017510000000042213236733243032076 0ustar zuulzuul00000000000000--- prelude: > Network Statistics From OpenDaylight. features: - Add a ceilometer driver to collect network statistics information using REST APIs exposed by network-statistics module in OpenDaylight. - Add support for network statistics meters with gnocchi ceilometer-10.0.0/releasenotes/notes/remove-refresh-pipeline-618af089c5435db7.yaml0000666000175100017510000000065113236733243027364 0ustar zuulzuul00000000000000--- deprecations: - | The pipeline dynamic refresh code has been removed. Ceilometer relies on the cotyledon library for a few releases which provides reload functionality by sending the SIGHUP signal to the process. This achieves the same feature while making sure the reload is explicit once the file is correctly and entirely written to the disk, avoiding the failing load of half-written files. ceilometer-10.0.0/releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml0000666000175100017510000000050613236733243030237 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1532661 `_] Fix statistics query failures due to large numbers stored in MongoDB. Data from MongoDB is returned as Int64 for big numbers when int and float types are expected. The data is cast to appropriate type to handle large data. ceilometer-10.0.0/releasenotes/notes/polling-definition-efffb92e3810e571.yaml0000666000175100017510000000112013236733243026551 0ustar zuulzuul00000000000000--- upgrade: - Pipeline processing in polling agents was removed in Liberty cycle. A new polling specific definition file is created to handle polling functionality and pipeline definition file is now reserved exclusively for transformations and routing. The polling.yaml file follows the same syntax as the pipeline.yaml but only handles polling attributes such as interval, discovery, resources, meter matching. It is configured by setting cfg_file under the polling section.If no polling definition file is found, it will fallback to reuse pipeline_cfg_file. ceilometer-10.0.0/releasenotes/notes/scan-domains-for-tenants-8f8c9edcb74cc173.yaml0000666000175100017510000000017713236733243027671 0ustar zuulzuul00000000000000--- features: - The tenant (project) discovery code in the polling agent now scans for tenants in all available domains. ceilometer-10.0.0/releasenotes/notes/remove-kafka-broker-publisher-7026b370cfc831db.yaml0000666000175100017510000000014713236733243030516 0ustar zuulzuul00000000000000--- upgrade: - | The deprecated kafka publisher has been removed, use NotifierPublisher instead. ceilometer-10.0.0/releasenotes/notes/batch-messaging-d126cc525879d58e.yaml0000666000175100017510000000100613236733243025664 0ustar zuulzuul00000000000000--- features: - > Add support for batch processing of messages from queue. This will allow the collector and notification agent to grab multiple messages per thread to enable more efficient processing. upgrade: - > batch_size and batch_timeout configuration options are added to both [notification] and [collector] sections of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. ceilometer-10.0.0/releasenotes/notes/ceilometer-api-deprecate-862bfaa54e80fa01.yaml0000666000175100017510000000020413236733243027573 0ustar zuulzuul00000000000000--- deprecations: - Ceilometer API is deprecated. Use the APIs from Aodh (alarms), Gnocchi (metrics), and/or Panko (events). ceilometer-10.0.0/releasenotes/notes/.placeholder0000666000175100017510000000000013236733243022046 0ustar zuulzuul00000000000000ceilometer-10.0.0/releasenotes/notes/zaqar-publisher-f7efa030b71731f4.yaml0000666000175100017510000000012613236733243026001 0ustar zuulzuul00000000000000--- features: - Add a new publisher for pushing samples or events to a Zaqar queue. ceilometer-10.0.0/releasenotes/notes/add-db-legacy-clean-tool-7b3e3714f414c448.yaml0000666000175100017510000000026213236733243027150 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1578128 `_] Add a tool that allow users to drop the legacy alarm and alarm_history tables. ceilometer-10.0.0/releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml0000666000175100017510000000065413236733243027212 0ustar zuulzuul00000000000000--- fixes: - | A local cache is used when polling instance metrics to minimise calls Nova API. A new option is added `resource_cache_expiry` to configure a time to live for cache before it expires. This resolves issue where migrated instances are not removed from cache. This is only relevant when `instance_discovery_method` is set to `naive`. It is recommended to use `libvirt_metadata` if possible. ceilometer-10.0.0/releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml0000666000175100017510000000054513236733243030373 0ustar zuulzuul00000000000000--- features: - > [`bug 1480333 `_] Support ability to configure collector to capture events or meters mutually exclusively, rather than capturing both always. other: - > Configure individual dispatchers by specifying meter_dispatchers and event_dispatchers in configuration file. ceilometer-10.0.0/releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml0000666000175100017510000000054213236733243025276 0ustar zuulzuul00000000000000--- features: - > Support resource caching in Gnocchi dispatcher to improve write performance to avoid additional queries. other: - > A dogpile.cache supported backend is required to enable cache. Additional configuration `options `_ are also required. ceilometer-10.0.0/releasenotes/notes/less-nova-polling-ac56687da3f8b1a3.yaml0000666000175100017510000000226013236733243026330 0ustar zuulzuul00000000000000--- features: - The Ceilometer compute agent can now retrieve some instance metadata from the metadata libvirt API instead of polling the Nova API. Since Mitaka, Nova fills this metadata with some information about the instance. To enable this feature you should set [compute]/instance_discovery_method = libvirt_metadata in the configuration file. The only downside of this method is that user_metadata (and some other instance attributes) are no longer part of the samples created by the agent. But when Gnocchi is used as backend, this is not an issue since Gnocchi doesn't store resource metadata aside of the measurements. And the missing informations are still retrieved through the Nova notifications and will fully update the resource information in Gnocchi. upgrade: - If you are using Gnocchi as backend it's strongly recommended to switch [compute]/instance_discovery_method to libvirt_metadata. This will reduce the load on the Nova API especially if you have many compute nodes. deprecations: - The [compute]/workload_partitioning = True is deprecated in favor of [compute]/instance_discovery_method = workload_partitioning ceilometer-10.0.0/releasenotes/notes/drop-image-meter-9c9b6cebd546dae7.yaml0000666000175100017510000000074413236733243026274 0ustar zuulzuul00000000000000--- prelude: > In an effort to minimise the noise, Ceilometer will no longer produce meters which have no measurable data associated with it. Image meter only captures state information which is already captured in events and other meters. upgrade: - Any existing commands utilising `image` meter should be switched to `image.size` meter which will provide equivalent functionality deprecations: - The `image` meter is dropped in favour of `image.size` meter. ceilometer-10.0.0/releasenotes/notes/event-type-race-c295baf7f1661eab.yaml0000666000175100017510000000024513236733243026046 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1254800 `_] Add better support to catch race conditions when creating event_types ceilometer-10.0.0/releasenotes/notes/drop-instance-meter-1b657717b21a0f55.yaml0000666000175100017510000000066713236733243026421 0ustar zuulzuul00000000000000--- prelude: > Samples are required to measure some aspect of a resource. Samples not measuring anything will be dropped. upgrade: - The `instance` meter no longer will be generated. For equivalent functionality, perform the exact same query on any compute meter such as `cpu`, `disk.read.requests`, `memory.usage`, `network.incoming.bytes`, etc... deprecations: - The `instance` meter no longer will be generated. ceilometer-10.0.0/releasenotes/notes/deprecate-file-dispatcher-2aff376db7609136.yaml0000666000175100017510000000037313236733243027623 0ustar zuulzuul00000000000000--- deprecations: - With collector service being deprecated, we now have to address the duplication between dispatchers and publishers. The file dispatcher is now marked as deprecated. Use the file publisher to push samples into a file. ceilometer-10.0.0/releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml0000666000175100017510000000033213236733243026536 0ustar zuulzuul00000000000000--- upgrade: - > Run db-sync to add new indices. fixes: - > [`bug 1526793 `_] Additional indices were added to better support querying of event data. ceilometer-10.0.0/releasenotes/notes/deprecate-pollster-list-ccf22b0dea44f043.yaml0000666000175100017510000000024713236733243027571 0ustar zuulzuul00000000000000--- deprecations: - | Deprecating support for enabling pollsters via command line. Meter and pollster enablement should be configured via polling.yaml file. ceilometer-10.0.0/releasenotes/notes/cinder-capacity-samples-de94dcfed5540b6c.yaml0000666000175100017510000000025213236733243027623 0ustar zuulzuul00000000000000--- features: - | Add support to capture volume capacity usage details from cinder. This data is extracted from notifications sent by Cinder starting in Ocata. ceilometer-10.0.0/releasenotes/notes/support-multiple-meter-definition-files-e3ce1fa73ef2e1de.yaml0000666000175100017510000000036213236733243033102 0ustar zuulzuul00000000000000--- features: - | Support loading multiple meter definition files and allow users to add their own meter definitions into several files according to different types of metrics under the directory of /etc/ceilometer/meters.d.ceilometer-10.0.0/releasenotes/notes/ceilometer-event-api-removed-49c57835e307b997.yaml0000666000175100017510000000031113236733243030154 0ustar zuulzuul00000000000000--- other: - >- The Events API (exposed at /v2/events) which was deprecated has been removed. The Panko project is now responsible for providing this API and can be installed separately. ceilometer-10.0.0/releasenotes/notes/fix-radosgw-name-6de6899ddcd7e06d.yaml0000666000175100017510000000103413236733243026230 0ustar zuulzuul00000000000000--- upgrade: - | Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw.*` deprecations: - | Previously, to enable/disable radosgw.* meters, you must define entry_point name rather than meter name. This is corrected so you do not need to be aware of entry_point naming. Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw.*`. `rgw.*` support is deprecated and will be removed in Rocky. fixes: - | Fix ability to enable/disable radosgw.* meters explicitly ceilometer-10.0.0/releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml0000666000175100017510000000063513236733243030140 0ustar zuulzuul00000000000000--- features: - > To minimise load on Nova API, an additional configuration option was added to control discovery interval vs metric polling interval. If resource_update_interval option is configured in compute section, the compute agent will discover new instances based on defined interval. The agent will continue to poll the discovered instances at the interval defined by pipeline. ceilometer-10.0.0/releasenotes/notes/selective-pipeline-notification-47e8a390b1c7dcc4.yaml0000666000175100017510000000063213236733243031231 0ustar zuulzuul00000000000000--- features: - | The notification-agent can now be configured to either build meters or events. By default, the notification agent will continue to load both pipelines and build both data models. To selectively enable a pipeline, configure the `pipelines` option under the `[notification]` section. Addition pipelines can be created following the format used by existing pipelines. ceilometer-10.0.0/releasenotes/notes/snmp-diskio-samples-fc4b5ed5f19c096c.yaml0000666000175100017510000000016213236733243026743 0ustar zuulzuul00000000000000--- features: - | Add hardware.disk.read.* and hardware.disk.write.* metrics to capture diskio details. ceilometer-10.0.0/releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml0000666000175100017510000000054113236733243030506 0ustar zuulzuul00000000000000--- fixes: - > [`bug 1536699 `_] Patch to fix volume field lookup in meter definition file. In case the field is missing in the definition, it raises a keyerror and aborts. Instead we should skip the missing field meter and continue with the rest of the definitions. ceilometer-10.0.0/releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml0000666000175100017510000000051013236733243026631 0ustar zuulzuul00000000000000--- features: - > RPC collector support is dropped. The queue-based notifier publisher and collector was added as the recommended alternative as of Icehouse cycle. upgrade: - > Pipeline.yaml files for agents should be updated to notifier:// or udp:// publishers. The rpc:// publisher is no longer supported. ceilometer-10.0.0/releasenotes/notes/pecan-debug-removed-dc737efbf911bde7.yaml0000666000175100017510000000007613236733243026742 0ustar zuulzuul00000000000000--- upgrade: - The api.pecan_debug option has been removed. ceilometer-10.0.0/releasenotes/notes/use-notification-transport-url-489f3d31dc66c4d2.yaml0000666000175100017510000000024513236733243031017 0ustar zuulzuul00000000000000--- fixes: - The transport_url defined in [oslo_messaging_notifications] was never used, which contradicts the oslo_messaging documentation. This is now fixed.ceilometer-10.0.0/releasenotes/notes/remove-pollster-list-bda30d747fb87c9e.yaml0000666000175100017510000000011713236733243027155 0ustar zuulzuul00000000000000--- upgrade: - | The deprecated `pollster-list` option has been removed. ceilometer-10.0.0/releasenotes/notes/ship-yaml-files-33aa5852bedba7f0.yaml0000666000175100017510000000036513236733243026034 0ustar zuulzuul00000000000000--- other: - | Ship YAML files to ceilometer/pipeline/data/ make it convenient to update all the files on upgrade. Users can copy yaml files from /usr/share/ceilometer and customise their own files located in /etc/ceilometer/. ceilometer-10.0.0/releasenotes/notes/support-meter-batch-recording-mongo-6c2bdf4fbb9764eb.yaml0000666000175100017510000000042713236733243032123 0ustar zuulzuul00000000000000--- features: - Add support of batch recording metering data to mongodb backend, since the pymongo support *insert_many* interface which can be used to batch record items, in "big-data" scenarios, this change can improve the performance of metering data recording. ceilometer-10.0.0/releasenotes/source/0000775000175100017510000000000013236733440017742 5ustar zuulzuul00000000000000ceilometer-10.0.0/releasenotes/source/newton.rst0000666000175100017510000001614213236733243022015 0ustar zuulzuul00000000000000==================== Newton Release Notes ==================== 7.0.5 ===== Bug Fixes --------- .. releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml @ 66dd8ab65e2d9352de86e47056dea0b701e21a15 - A local cache is used when polling instance metrics to minimise calls Nova API. A new option is added `resource_cache_expiry` to configure a time to live for cache before it expires. This resolves issue where migrated instances are not removed from cache. 7.0.1 ===== New Features ------------ .. releasenotes/notes/http_proxy_to_wsgi_enabled-616fa123809e1600.yaml @ 032032642ad49e01d706f19f51d672fcff403442 - Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. The purpose of this middleware is to set up the request URL correctly in case there is a proxy (for instance, a loadbalancer such as HAProxy) in front of Ceilometer. So, for instance, when TLS connections are being terminated in the proxy, and one tries to get the versions from the / resource of Ceilometer, one will notice that the protocol is incorrect; It will show 'http' instead of 'https'. So this middleware handles such cases. Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off by default and needs to be enabled via a configuration value. 7.0.0 ===== Prelude ------- .. releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml @ 18c181f0b3ce07a0cd552a9060dd09a95cc26078 Ceilometer backends are no more only databases but also REST API like Gnocchi. So ceilometer-dbsync binary name doesn't make a lot of sense and have been renamed ceilometer-upgrade. The new binary handles database schema upgrade like ceilometer-dbsync does, but it also handle any changes needed in configured ceilometer backends like Gnocchi. New Features ------------ .. releasenotes/notes/add-magnum-event-4c75ed0bb268d19c.yaml @ cf3f7c992e0d29e06a7bff6c1df2f0144418d80f - Added support for magnum bay CRUD events, event_type is 'magnum.bay.*'. .. releasenotes/notes/http-dispatcher-verify-ssl-551d639f37849c6f.yaml @ 2fca7ebd7c6a4d29c8a320fffd035ed9814e8293 - In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set to True to use system-installed certificates (default value) or False to ignore certificate verification (use in development only!). verify_ssl can also be set to the location of a certificate file e.g. /some/path/cert.crt (use for self-signed certs) or to a directory of certificates. The value is passed as the 'verify' option to the underlying requests method, which is documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification .. releasenotes/notes/memory-bandwidth-meter-f86cf01178573671.yaml @ ed7b6dbc952e49ca69de9a94a01398b106aece4b - Add two new meters, including memory.bandwidth.total and memory.bandwidth.local, to get memory bandwidth statistics based on Intel CMT feature. .. releasenotes/notes/perf-events-meter-b06c2a915c33bfaf.yaml @ aaedbbe0eb02ad1f86395a5a490495b64ce26777 - Add four new meters, including perf.cpu.cycles for the number of cpu cycles one instruction needs, perf.instructions for the count of instructions, perf.cache_references for the count of cache hits and cache_misses for the count of caches misses. .. releasenotes/notes/support-meter-batch-recording-mongo-6c2bdf4fbb9764eb.yaml @ a2a04e5d234ba358c25d541f31f8ca1a61bfd5d8 - Add support of batch recording metering data to mongodb backend, since the pymongo support *insert_many* interface which can be used to batch record items, in "big-data" scenarios, this change can improve the performance of metering data recording. .. releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml @ f8933f4abda4ecfc07ee41f84fd5fd8f6667e95a - Since the Glance v1 APIs won't be maintained any more, this change add the support of glance v2 in images pollsters. Upgrade Notes ------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ 40684dafae76eab77b66bb1da7e143a3d7e2c9c8 - The options 'requeue_event_on_dispatcher_error' and 'requeue_sample_on_dispatcher_error' have been enabled and removed. .. releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml @ 5750fddf288c749cacfc825753928f66e755758d - Batching is enabled by default now when coordinated workers are enabled. Depending on load, it is recommended to scale out the number of `pipeline_processing_queues` to improve distribution. `batch_size` should also be configured accordingly. .. releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml @ f8933f4abda4ecfc07ee41f84fd5fd8f6667e95a - The option 'glance_page_size' has been removed because it's not actually needed. Deprecation Notes ----------------- .. releasenotes/notes/deprecated_database_event_dispatcher_panko-607d558c86a90f17.yaml @ 3685dcf417543db0bb708b347e996d88385c8c5b - The event database dispatcher is now deprecated. It has been moved to a new project, alongside the Ceilometer API for /v2/events, called Panko. .. releasenotes/notes/kwapi_deprecated-c92b9e72c78365f0.yaml @ 2bb81d41f1c5086b68b1290362c72966c1e33702 - The Kwapi pollsters are deprecated and will be removed in the next major version of Ceilometer. .. releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml @ 18c181f0b3ce07a0cd552a9060dd09a95cc26078 - For backward compatibility reason we temporary keep ceilometer-dbsync, at least for one major version to ensure deployer have time update their tooling. Critical Issues --------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ 40684dafae76eab77b66bb1da7e143a3d7e2c9c8 - The previous configuration options default for 'requeue_sample_on_dispatcher_error' and 'requeue_event_on_dispatcher_error' allowed to lose data very easily: if the dispatcher failed to send data to the backend (e.g. Gnocchi is down), then the dispatcher raised and the data were lost forever. This was completely unacceptable, and nobody should be able to configure Ceilometer in that way." Bug Fixes --------- .. releasenotes/notes/add-db-legacy-clean-tool-7b3e3714f414c448.yaml @ 800034dc0bbb9502893dedd9bcde7c170780c375 - [`bug 1578128 `_] Add a tool that allow users to drop the legacy alarm and alarm_history tables. .. releasenotes/notes/add-full-snmpv3-usm-support-ab540c902fa89b9d.yaml @ dc254e2f78a4bb42b0df6556df8347c7137ab5b2 - [`bug 1597618 `_] Add the full support of snmp v3 user security model. .. releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml @ 5750fddf288c749cacfc825753928f66e755758d - Fix to improve handling messages in environments heavily backed up. Previously, notification handlers greedily grabbed messages from queues which could cause ordering issues. A fix was applied to sequentially process messages in a single thread to prevent ordering issues. .. releasenotes/notes/unify-timestamp-of-polled-data-fbfcff43cd2d04bc.yaml @ 8dd821a03dcff45258251bebfd2beb86c07d94f7 - [`bug 1491509 `_] Patch to unify timestamp in samples polled by pollsters. Set the time point polling starts as timestamp of samples, and drop timetamping in pollsters. ceilometer-10.0.0/releasenotes/source/unreleased.rst0000666000175100017510000000015613236733243022630 0ustar zuulzuul00000000000000============================= Current Series Release Notes ============================= .. release-notes:: ceilometer-10.0.0/releasenotes/source/conf.py0000666000175100017510000002176013236733243021252 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Ceilometer Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ceilometer Release Notes' copyright = u'2015, Ceilometer Developers' # Release notes do not need a version number in the title, they # cover multiple releases. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # openstackdocstheme options repository_name = 'openstack/ceilometer' bug_project = 'ceilometer' bug_tag = '' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # Must set this variable to include year, month, day, hours, and minutes. html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'CeilometerReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'CeilometerReleaseNotes.tex', u'Ceilometer Release Notes Documentation', u'Ceilometer Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ceilometerreleasenotes', u'Ceilometer Release Notes Documentation', [u'Ceilometer Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'CeilometerReleaseNotes', u'Ceilometer Release Notes Documentation', u'Ceilometer Developers', 'CeilometerReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ceilometer-10.0.0/releasenotes/source/ocata.rst0000666000175100017510000000021013236733243021557 0ustar zuulzuul00000000000000=========================== Ocata Series Release Notes =========================== .. release-notes:: :branch: origin/stable/ocata ceilometer-10.0.0/releasenotes/source/pike.rst0000666000175100017510000000021713236733243021427 0ustar zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ceilometer-10.0.0/releasenotes/source/mitaka.rst0000666000175100017510000003332213236733243021750 0ustar zuulzuul00000000000000==================== Mitaka Release Notes ==================== 6.0.0 ===== New Features ------------ .. releasenotes/notes/batch-messaging-d126cc525879d58e.yaml @ c5895d2c6efc6676679e6973c06b85c0c3a10585 - Add support for batch processing of messages from queue. This will allow the collector and notification agent to grab multiple messages per thread to enable more efficient processing. .. releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - To minimise load on Nova API, an additional configuration option was added to control discovery interval vs metric polling interval. If resource_update_interval option is configured in compute section, the compute agent will discover new instances based on defined interval. The agent will continue to poll the discovered instances at the interval defined by pipeline. .. releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - [`bug 1480333 `_] Support ability to configure collector to capture events or meters mutally exclusively, rather than capturing both always. .. releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml @ c5895d2c6efc6676679e6973c06b85c0c3a10585 - Support for CORS is added. More information can be found [`here `_] .. releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - Support resource caching in Gnocchi dispatcher to improve write performance to avoid additional queries. .. releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Gnocchi dispatcher now uses client rather than direct http requests .. releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1518338 `_] Add support for storing SNMP metrics in Gnocchi.This functionality requires Gnocchi v2.1.0 to be installed. .. releasenotes/notes/keystone-v3-fab1e257c5672965.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Add support for Keystone v3 authentication .. releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - Ceilometer alarms code is now fully removed from code base. Equivalent functionality is handled by Aodh. .. releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Support for CADF-only payload in HTTP dispatcher is dropped as audit middleware in pyCADF was dropped in Kilo cycle. .. releasenotes/notes/remove-eventlet-6738321434b60c78.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - Remove eventlet from Ceilometer in favour of threaded approach .. releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - RPC collector support is dropped. The queue-based notifier publisher and collector was added as the recommended alternative as of Icehouse cycle. .. releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is deprecated. The same metrics are available between v1 and v2. .. releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - [`bug 1513731 `_] Add support for hardware cpu_util in snmp.yaml .. releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1506959 `_] Add support to query unique set of meter names rather than meters associated with each resource. The list is available by adding unique=True option to request. Known Issues ------------ .. releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - Neutron API is not designed to be polled against. When polling against Neutron is enabled, Ceilometer's polling agents may generage a significant load against the Neutron API. It is recommended that a dedicated API be enabled for polling while Neutron's API is improved to handle polling. Upgrade Notes ------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ 244439979fd28ecb0c76d132f0be784c988b54c8 - The options 'requeue_event_on_dispatcher_error' and 'requeue_sample_on_dispatcher_error' have been enabled and removed. .. releasenotes/notes/batch-messaging-d126cc525879d58e.yaml @ c5895d2c6efc6676679e6973c06b85c0c3a10585 - batch_size and batch_timeout configuration options are added to both [notification] and [collector] sections of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. .. releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml @ c5895d2c6efc6676679e6973c06b85c0c3a10585 - The api-paste.ini file can be modified to include or exclude the CORs middleware. Additional configurations can be made to middleware as well. .. releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - gnocchiclient library is now a requirement if using ceilometer+gnocchi. .. releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - gnocchi_resources.yaml in Ceilometer should be updated. .. releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - To utilize the new policy support. The policy.json file should be updated accordingly. The pre-existing policy.json file will continue to function as it does if policy changes are not required. .. releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Run db-sync to add new indices. .. releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - audit middleware in keystonemiddleware library should be used for similar support. .. releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Pipeline.yaml files for agents should be updated to notifier:// or udp:// publishers. The rpc:// publisher is no longer supported. .. releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add neutron_lbaas_version=v1 option to configuration file. Critical Issues --------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ 244439979fd28ecb0c76d132f0be784c988b54c8 - The previous configuration options default for 'requeue_sample_on_dispatcher_error' and 'requeue_event_on_dispatcher_error' allowed to lose data very easily: if the dispatcher failed to send data to the backend (e.g. Gnocchi is down), then the dispatcher raised and the data were lost forever. This was completely unacceptable, and nobody should be able to configure Ceilometer in that way." .. releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml @ e84a10882a9b682ff41c84e8bf4ee2497e7e7a31 - [`bug 1533787 `_] Fix an issue where agents are not properly getting registered to group when multiple notification agents are deployed. This can result in bad transformation as the agents are not coordinated. It is still recommended to set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when deploying multiple agents. .. releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - [`bug 1519767 `_] fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and its potential race conditions are now patched. Bug Fixes --------- .. releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - [`bug 1531626 `_] Ensure aggregator transformer timeout is honoured if size is not provided. .. releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1550436 `_] Cache json parsers when building parsing logic to handle event and meter definitions. This will improve agent startup and setup time. .. releasenotes/notes/event-type-race-c295baf7f1661eab.yaml @ 0e3ae8a667d9b9d6e19a7515854eb1703fc05013 - [`bug 1254800 `_] Add better support to catch race conditions when creating event_types .. releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1539163 `_] Add ability to define whether to use first or last timestamps when aggregating samples. This will allow more flexibility when chaining transformers. .. releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml @ 1f9f4e1072a5e5037b93734bafcc65e4211eb19f - [`bug 1536338 `_] Patch was added to fix the broken floatingip pollster that polled data from nova api, but since the nova api filtered the data by tenant, ceilometer was not getting any data back. The fix changes the pollster to use the neutron api instead to get the floating ip info. .. releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - [`bug 1530793 `_] network.services.lb.incoming.bytes meter was previous set to incorrect type. It should be a gauge meter. .. releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - [`bug 255569 `_] Fix caching support in Gnocchi dispatcher. Added better locking support to enable smoother cache access. .. releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Fix samples from Heat to map to correct Gnocchi resource type .. releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - [`bug 1523124 `_] Fix gnocchi dispatcher to support UDP collector .. releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml @ 02b1e1399bf885d03113a1cc125b1f97ed5540b9 - [`bug 1542189 `_] Handle malformed resource definitions in gnocchi_resources.yaml gracefully. Currently we raise an exception once we hit a bad resource and skip the rest. Instead the patch skips the bad resource and proceeds with rest of the definitions. .. releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1504495 `_] Configure ceilometer to handle policy.json rules when possible. .. releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - [`bug 1526793 `_] Additional indices were added to better support querying of event data. .. releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml @ 903a0a527cb240cfd9462b7f56d3463db7128993 - [`bug 1536699 `_] Patch to fix volume field lookup in meter definition file. In case the field is missing in the definition, it raises a keyerror and aborts. Instead we should skip the missing field meter and continue with the rest of the definitions. .. releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1532661 `_] Fix statistics query failures due to large numbers stored in MongoDB. Data from MongoDB is returned as Int64 for big numbers when int and float types are expected. The data is cast to appropriate type to handle large data. .. releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml @ 0c6f11cf88bf1a13a723879de46ec616678d2e0b - [`bug 1536498 `_] Patch to fix duplicate meter definitions causing duplicate samples. If a duplicate is found, log a warning and skip the meter definition. Note that the first occurance of a meter will be used and any following duplicates will be skipped from processing. .. releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - [`bug 1506738 `_] [`bug 1509677 `_] Optimise SQL backend queries to minimise query load .. releasenotes/notes/support-None-query-45abaae45f08eda4.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1388680 `_] Suppose ability to query for None value when using SQL backend. Other Notes ----------- .. releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - Configure individual dispatchers by specifying meter_dispatchers and event_dispatchers in configuration file. .. releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - A dogpile.cache supported backend is required to enable cache. Additional configuration `options `_ are also required. ceilometer-10.0.0/releasenotes/source/_static/0000775000175100017510000000000013236733440021370 5ustar zuulzuul00000000000000ceilometer-10.0.0/releasenotes/source/_static/.placeholder0000666000175100017510000000000013236733243023644 0ustar zuulzuul00000000000000ceilometer-10.0.0/releasenotes/source/locale/0000775000175100017510000000000013236733440021201 5ustar zuulzuul00000000000000ceilometer-10.0.0/releasenotes/source/locale/fr/0000775000175100017510000000000013236733440021610 5ustar zuulzuul00000000000000ceilometer-10.0.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175100017510000000000013236733440023375 5ustar zuulzuul00000000000000ceilometer-10.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000666000175100017510000000253613236733243026437 0ustar zuulzuul00000000000000# GĂ©rald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Ceilometer Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-11-21 04:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 05:24+0000\n" "Last-Translator: GĂ©rald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "5.0.1" msgstr "5.0.1" msgid "5.0.2" msgstr "5.0.2" msgid "5.0.3" msgstr "5.0.3" msgid "6.0.0" msgstr "6.0.0" msgid "7.0.0" msgstr "7.0.0" msgid "Bug Fixes" msgstr "Corrections de bugs" msgid "Ceilometer Release Notes" msgstr "Note de release de Ceilometer" msgid "Critical Issues" msgstr "Erreurs critiques" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Deprecation Notes" msgstr "Notes dĂ©prĂ©ciĂ©es " msgid "Known Issues" msgstr "Problèmes connus" msgid "Liberty Series Release Notes" msgstr "Note de release pour Liberty" msgid "New Features" msgstr "Nouvelles fonctionnalitĂ©s" msgid "Other Notes" msgstr "Autres notes" msgid "Start using reno to manage release notes." msgstr "Commence Ă  utiliser reno pour la gestion des notes de release" msgid "Upgrade Notes" msgstr "Notes de mises Ă  jours" ceilometer-10.0.0/releasenotes/source/locale/en_GB/0000775000175100017510000000000013236733440022153 5ustar zuulzuul00000000000000ceilometer-10.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175100017510000000000013236733440023740 5ustar zuulzuul00000000000000ceilometer-10.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000666000175100017510000015446213236733243027010 0ustar zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: Ceilometer Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2018-02-05 18:54+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-02-06 01:12+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en-GB\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "5.0.1" msgstr "5.0.1" msgid "5.0.2" msgstr "5.0.2" msgid "5.0.3" msgstr "5.0.3" msgid "6.0.0" msgstr "6.0.0" msgid "7.0.0" msgstr "7.0.0" msgid "7.0.1" msgstr "7.0.1" msgid "7.0.5" msgstr "7.0.5" msgid "8.0.0" msgstr "8.0.0" msgid "9.0.0" msgstr "9.0.0" msgid "9.0.0-197" msgstr "9.0.0-197" msgid "" "A dogpile.cache supported backend is required to enable cache. Additional " "configuration `options `_ are also required." msgstr "" "A dogpile.cache supported backend is required to enable cache. Additional " "configuration `options `_ are also required." msgid "" "A local cache is used when polling instance metrics to minimise calls Nova " "API. A new option is added `resource_cache_expiry` to configure a time to " "live for cache before it expires. This resolves issue where migrated " "instances are not removed from cache." msgstr "" "A local cache is used when polling instance metrics to minimise calls Nova " "API. A new option is added `resource_cache_expiry` to configure a time to " "live for cache before it expires. This resolves issue where migrated " "instances are not removed from cache." msgid "" "A local cache is used when polling instance metrics to minimise calls Nova " "API. A new option is added `resource_cache_expiry` to configure a time to " "live for cache before it expires. This resolves issue where migrated " "instances are not removed from cache. This is only relevant when " "`instance_discovery_method` is set to `naive`. It is recommended to use " "`libvirt_metadata` if possible." msgstr "" "A local cache is used when polling instance metrics to minimise calls Nova " "API. A new option is added `resource_cache_expiry` to configure a time to " "live for cache before it expires. This resolves issue where migrated " "instances are not removed from cache. This is only relevant when " "`instance_discovery_method` is set to `naive`. It is recommended to use " "`libvirt_metadata` if possible." msgid "" "A new option named `max_parallel_requests` is available to control the " "maximum number of parallel requests that can be executed by the agents. This " "option also replaces the `poolsize` option of the HTTP publisher." msgstr "" "A new option named `max_parallel_requests` is available to control the " "maximum number of parallel requests that can be executed by the agents. This " "option also replaces the `poolsize` option of the HTTP publisher." msgid "" "Add a ceilometer driver to collect network statistics information using REST " "APIs exposed by network-statistics module in OpenDaylight." msgstr "" "Add a Ceilometer driver to collect network statistics information using REST " "APIs exposed by network-statistics module in OpenDaylight." msgid "Add a new publisher for pushing samples or events to a Zaqar queue." msgstr "Add a new publisher for pushing samples or events to a Zaqar queue." msgid "" "Add a tool for migrating metrics data from Ceilometer's native storage to " "Gnocchi. Since we have deprecated Ceilometer API and the Gnocchi will be the " "recommended metrics data storage backend." msgstr "" "Add a tool for migrating metrics data from Ceilometer's native storage to " "Gnocchi. Since we have deprecated Ceilometer API and the Gnocchi will be the " "recommended metrics data storage backend." msgid "" "Add four new meters, including perf.cpu.cycles for the number of cpu cycles " "one instruction needs, perf.instructions for the count of instructions, perf." "cache_references for the count of cache hits and cache_misses for the count " "of caches misses." msgstr "" "Add four new meters, including perf.cpu.cycles for the number of cpu cycles " "one instruction needs, perf.instructions for the count of instructions, perf." "cache_references for the count of cache hits and cache_misses for the count " "of caches misses." msgid "" "Add memory swap metric for VM, including 'memory.swap.in' and 'memory.swap." "out'." msgstr "" "Add memory swap metric for VM, including 'memory.swap.in' and 'memory.swap." "out'." msgid "Add support for Keystone v3 authentication" msgstr "Add support for Keystone v3 authentication" msgid "" "Add support for batch processing of messages from queue. This will allow the " "collector and notification agent to grab multiple messages per thread to " "enable more efficient processing." msgstr "" "Add support for batch processing of messages from queue. This will allow the " "collector and notification agent to grab multiple messages per thread to " "enable more efficient processing." msgid "Add support for network statistics meters with gnocchi" msgstr "Add support for network statistics meters with Gnocchi" msgid "" "Add support of batch recording metering data to mongodb backend, since the " "pymongo support *insert_many* interface which can be used to batch record " "items, in \"big-data\" scenarios, this change can improve the performance of " "metering data recording." msgstr "" "Add support of batch recording metering data to MongoDB backend, since the " "pymongo support *insert_many* interface which can be used to batch record " "items, in \"big-data\" scenarios, this change can improve the performance of " "metering data recording." msgid "" "Add support of metering the size of cinder volume/snapshot/backup. Like " "other meters, these are useful for billing system." msgstr "" "Add support of metering the size of Cinder volume/snapshot/backup. Like " "other meters, these are useful for billing system." msgid "" "Add two new meters, including memory.bandwidth.total and memory.bandwidth." "local, to get memory bandwidth statistics based on Intel CMT feature." msgstr "" "Add two new meters, including memory.bandwidth.total and memory.bandwidth." "local, to get memory bandwidth statistics based on Intel CMT feature." msgid "Added support for magnum bay CRUD events, event_type is 'magnum.bay.*'." msgstr "" "Added support for Magnum bay CRUD events, event_type is 'magnum.bay.*'." msgid "" "Addition pipelines can be created following the format used by existing " "pipelines." msgstr "" "Addition pipelines can be created following the format used by existing " "pipelines." msgid "" "Allow users to add additional exchanges in ceilometer.conf instead of " "hardcoding exchanges. Now original http_control_exchanges is being " "deprecated and renamed notification_control_exchanges. Besides, the new " "option is integrated with other exchanges in default EXCHANGE_OPTS to make " "it available to extend additional exchanges." msgstr "" "Allow users to add additional exchanges in ceilometer.conf instead of " "hardcoding exchanges. Now original http_control_exchanges is being " "deprecated and renamed notification_control_exchanges. Besides, the new " "option is integrated with other exchanges in default EXCHANGE_OPTS to make " "it available to extend additional exchanges." msgid "" "Any existing commands utilising `image` meter should be switched to `image." "size` meter which will provide equivalent functionality" msgstr "" "Any existing commands utilising `image` meter should be switched to `image." "size` meter which will provide equivalent functionality" msgid "" "As the collector service is being deprecated, the duplication of publishers " "and dispatchers is being addressed. The http dispatcher is now marked as " "deprecated and the recommended path is to use http publisher." msgstr "" "As the collector service is being deprecated, the duplication of publishers " "and dispatchers is being addressed. The http dispatcher is now marked as " "deprecated and the recommended path is to use http publisher." msgid "" "Batching is enabled by default now when coordinated workers are enabled. " "Depending on load, it is recommended to scale out the number of " "`pipeline_processing_queues` to improve distribution. `batch_size` should " "also be configured accordingly." msgstr "" "Batching is enabled by default now when coordinated workers are enabled. " "Depending on load, it is recommended to scale out the number of " "`pipeline_processing_queues` to improve distribution. `batch_size` should " "also be configured accordingly." msgid "" "Because of deprecating the collector, the default publishers in pipeline." "yaml and event_pipeline.yaml are now changed using database instead of " "notifier." msgstr "" "Because of deprecating the collector, the default publishers in pipeline." "yaml and event_pipeline.yaml are now changed using database instead of " "notifier." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add " "neutron_lbaas_version=v1 option to configuration file." msgstr "" "By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add " "neutron_lbaas_version=v1 option to configuration file." msgid "" "Ceilometer API is deprecated. Use the APIs from Aodh (alarms), Gnocchi " "(metrics), and/or Panko (events)." msgstr "" "Ceilometer API is deprecated. Use the APIs from Aodh (alarms), Gnocchi " "(metrics), and/or Panko (events)." msgid "Ceilometer Release Notes" msgstr "Ceilometer Release Notes" msgid "" "Ceilometer alarms code is now fully removed from code base. Equivalent " "functionality is handled by Aodh." msgstr "" "Ceilometer alarms code is now fully removed from code base. Equivalent " "functionality is handled by Aodh." msgid "" "Ceilometer backends are no more only databases but also REST API like " "Gnocchi. So ceilometer-dbsync binary name doesn't make a lot of sense and " "have been renamed ceilometer-upgrade. The new binary handles database schema " "upgrade like ceilometer-dbsync does, but it also handle any changes needed " "in configured ceilometer backends like Gnocchi." msgstr "" "Ceilometer backends are no more only databases but also REST API like " "Gnocchi. So ceilometer-dbsync binary name doesn't make a lot of sense and " "have been renamed ceilometer-upgrade. The new binary handles database schema " "upgrade like ceilometer-dbsync does, but it also handle any changes needed " "in configured Ceilometer backends like Gnocchi." msgid "" "Ceilometer legacy backends and Ceilometer API are now deprecated. Polling " "all nova instances from compute agent is no more required with Gnocchi. So " "we switch the [compute]instance_discovery_method to libvirt_metadata. To " "switch back to the old deprecated behavior you can set it back to 'naive'." msgstr "" "Ceilometer legacy backends and Ceilometer API are now deprecated. Polling " "all nova instances from compute agent is no more required with Gnocchi. So " "we switch the [compute]instance_discovery_method to libvirt_metadata. To " "switch back to the old deprecated behaviour you can set it back to 'naive'." msgid "" "Ceilometer now leverages the latest distribution mechanism provided by the " "tooz library. Therefore the options `coordination.retry_backoff` and " "`coordination.max_retry_interval` do not exist anymore." msgstr "" "Ceilometer now leverages the latest distribution mechanism provided by the " "tooz library. Therefore the options `coordination.retry_backoff` and " "`coordination.max_retry_interval` do not exist any more." msgid "" "Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. " "The purpose of this middleware is to set up the request URL correctly in " "case there is a proxy (for instance, a loadbalancer such as HAProxy) in " "front of Ceilometer. So, for instance, when TLS connections are being " "terminated in the proxy, and one tries to get the versions from the / " "resource of Ceilometer, one will notice that the protocol is incorrect; It " "will show 'http' instead of 'https'. So this middleware handles such cases. " "Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off " "by default and needs to be enabled via a configuration value." msgstr "" "Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. " "The purpose of this middleware is to set up the request URL correctly in " "case there is a proxy (for instance, a load balancer such as HAProxy) in " "front of Ceilometer. So, for instance, when TLS connections are being " "terminated in the proxy, and one tries to get the versions from the / " "resource of Ceilometer, one will notice that the protocol is incorrect; It " "will show 'http' instead of 'https'. So this middleware handles such cases. " "Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off " "by default and needs to be enabled via a configuration value." msgid "" "Ceilometer supports generic notifier to publish data and allow user to " "customize parameters such as topic, transport driver and priority. The " "publisher configuration in pipeline.yaml can be notifer://[notifier_ip]:" "[notifier_port]?topic=[topic]&driver=driver&max_retry=100 Not only rabbit " "driver, but also other driver like kafka can be used." msgstr "" "Ceilometer supports generic notifier to publish data and allow user to " "customise parameters such as topic, transport driver and priority. The " "publisher configuration in pipeline.yaml can be notifer://[notifier_ip]:" "[notifier_port]?topic=[topic]&driver=driver&max_retry=100 Not only rabbit " "driver, but also other driver like Kafka can be used." msgid "" "Collector is no longer supported in this release. The collector introduces " "lags in pushing data to backend. To optimize the architecture, Ceilometer " "push data through dispatchers using publishers in notification agent " "directly." msgstr "" "Collector is no longer supported in this release. The collector introduces " "lags in pushing data to backend. To optimise the architecture, Ceilometer " "pushes data through dispatchers using publishers in notification agent " "directly." msgid "" "Configuration values can passed in via the querystring of publisher in " "pipeline. For example, rather than setting target, timeout, verify_ssl, and " "batch_mode under [dispatcher_http] section of conf, you can specify http://" "/?verify_ssl=True&batch=True&timeout=10. Use `raw_only=1` if only " "the raw details of event are required." msgstr "" "Configuration values can passed in via the querystring of publisher in " "pipeline. For example, rather than setting target, timeout, verify_ssl, and " "batch_mode under [dispatcher_http] section of conf, you can specify http://" "/?verify_ssl=True&batch=True&timeout=10. Use `raw_only=1` if only " "the raw details of event are required." msgid "" "Configure individual dispatchers by specifying meter_dispatchers and " "event_dispatchers in configuration file." msgstr "" "Configure individual dispatchers by specifying meter_dispatchers and " "event_dispatchers in configuration file." msgid "Critical Issues" msgstr "Critical Issues" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "" "Deprecating support for enabling pollsters via command line. Meter and " "pollster enablement should be configured via polling.yaml file." msgstr "" "Deprecating support for enabling pollsters via command line. Meter and " "pollster enablement should be configured via polling.yaml file." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "Fix ability to enable/disable radosgw.* meters explicitly" msgstr "Fix ability to enable/disable radosgw.* meters explicitly" msgid "Fix samples from Heat to map to correct Gnocchi resource type" msgstr "Fix samples from Heat to map to correct Gnocchi resource type" msgid "" "Fix to improve handling messages in environments heavily backed up. " "Previously, notification handlers greedily grabbed messages from queues " "which could cause ordering issues. A fix was applied to sequentially process " "messages in a single thread to prevent ordering issues." msgstr "" "Fix to improve handling messages in environments heavily backed up. " "Previously, notification handlers greedily grabbed messages from queues " "which could cause ordering issues. A fix was applied to sequentially process " "messages in a single thread to prevent ordering issues." msgid "" "For backward compatibility reason we temporary keep ceilometer-dbsync, at " "least for one major version to ensure deployer have time update their " "tooling." msgstr "" "For backward compatibility reason we temporary keep ceilometer-dbsync, at " "least for one major version to ensure deployers have time update their " "tooling." msgid "Gnocchi dispatcher now uses client rather than direct http requests" msgstr "Gnocchi dispatcher now uses client rather than direct HTTP requests" msgid "" "If workload partitioning of the notification agent is enabled, the " "notification agent should not run alongside pre-Queens agents. Doing so may " "result in missed samples when leveraging transformations. To upgrade without " "loss of data, set `notification_control_exchanges` option to empty so only " "existing `ceilometer-pipe-*` queues are processed. Once cleared, reset " "`notification_control_exchanges` option and launch the new notification " "agent(s). If `workload_partitioning` is not enabled, no special steps are " "required." msgstr "" "If workload partitioning of the notification agent is enabled, the " "notification agent should not run alongside pre-Queens agents. Doing so may " "result in missed samples when leveraging transformations. To upgrade without " "loss of data, set `notification_control_exchanges` option to empty so only " "existing `ceilometer-pipe-*` queues are processed. Once cleared, reset " "`notification_control_exchanges` option and launch the new notification " "agent(s). If `workload_partitioning` is not enabled, no special steps are " "required." msgid "" "If you are using Gnocchi as backend it's strongly recommended to switch " "[compute]/instance_discovery_method to libvirt_metadata. This will reduce " "the load on the Nova API especially if you have many compute nodes." msgstr "" "If you are using Gnocchi as backend it's strongly recommended to switch " "[compute]/instance_discovery_method to libvirt_metadata. This will reduce " "the load on the Nova API especially if you have many compute nodes." msgid "" "In an effort to minimise the noise, Ceilometer will no longer produce meters " "which have no measureable data associated with it. Image meter only captures " "state information which is already captured in events and other meters." msgstr "" "In an effort to minimise the noise, Ceilometer will no longer produce meters " "which have no measurable data associated with it. Image meter only captures " "state information which is already captured in events and other meters." msgid "" "In the 'publishers' section of a meter/event pipeline definition, https:// " "can now be used in addition to http://. Furthermore, either Basic or client-" "certificate authentication can be used (obviously, client cert only makes " "sense in the https case). For Basic authentication, use the form http://" "username:password@hostname/. For client certificate authentication pass the " "client certificate's path (and the key file path, if the key is not in the " "certificate file) using the parameters 'clientcert' and 'clientkey', e.g. " "https://hostname/path?clientcert=/path/to/cert&clientkey=/path/to/key. Any " "parameters or credentials used for http(s) publishers are removed from the " "URL before the actual HTTP request is made." msgstr "" "In the 'publishers' section of a meter/event pipeline definition, https:// " "can now be used in addition to http://. Furthermore, either Basic or client-" "certificate authentication can be used (obviously, client cert only makes " "sense in the https case). For Basic authentication, use the form http://" "username:password@hostname/. For client certificate authentication pass the " "client certificate's path (and the key file path, if the key is not in the " "certificate file) using the parameters 'clientcert' and 'clientkey', e.g. " "https://hostname/path?clientcert=/path/to/cert&clientkey=/path/to/key. Any " "parameters or credentials used for http(s) publishers are removed from the " "URL before the actual HTTP request is made." msgid "" "In the [dispatcher_http] section of ceilometer.conf, batch_mode can be set " "to True to activate sending meters and events in batches, or False (default " "value) to send each meter and event with a fresh HTTP call." msgstr "" "In the [dispatcher_http] section of ceilometer.conf, batch_mode can be set " "to True to activate sending meters and events in batches, or False (default " "value) to send each meter and event with a fresh HTTP call." msgid "" "In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set " "to True to use system-installed certificates (default value) or False to " "ignore certificate verification (use in development only!). verify_ssl can " "also be set to the location of a certificate file e.g. /some/path/cert.crt " "(use for self-signed certs) or to a directory of certificates. The value is " "passed as the 'verify' option to the underlying requests method, which is " "documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-" "cert-verification" msgstr "" "In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set " "to True to use system-installed certificates (default value) or False to " "ignore certificate verification (use in development only!). verify_ssl can " "also be set to the location of a certificate file e.g. /some/path/cert.crt " "(use for self-signed certs) or to a directory of certificates. The value is " "passed as the 'verify' option to the underlying requests method, which is " "documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-" "cert-verification" msgid "Kafka publisher is deprecated to use generic notifier instead." msgstr "Kafka publisher is deprecated to use generic notifier instead." msgid "Known Issues" msgstr "Known Issues" msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "Mitaka Release Notes" msgstr "Mitaka Release Notes" msgid "Network Statistics From OpenDaylight." msgstr "Network Statistics From OpenDaylight." msgid "" "Neutron API is not designed to be polled against. When polling against " "Neutron is enabled, Ceilometer's polling agents may generage a significant " "load against the Neutron API. It is recommended that a dedicated API be " "enabled for polling while Neutron's API is improved to handle polling." msgstr "" "Neutron API is not designed to be polled against. When polling against " "Neutron is enabled, Ceilometer's polling agents may generate a significant " "load against the Neutron API. It is recommended that a dedicated API be " "enabled for polling while Neutron's API is improved to handle polling." msgid "New Features" msgstr "New Features" msgid "Newton Release Notes" msgstr "Newton Release Notes" msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "" "Pipeline processing in polling agents was removed in Liberty cycle. A new " "polling specific definition file is created to handle polling functionality " "and pipeline definition file is now reserved exclusively for transformations " "and routing. The polling.yaml file follows the same syntax as the pipeline." "yaml but only handles polling attributes such as interval, discovery, " "resources, meter matching. It is configured by setting cfg_file under the " "polling section.If no polling definition file is found, it will fallback to " "reuse pipeline_cfg_file." msgstr "" "Pipeline processing in polling agents was removed in Liberty cycle. A new " "polling specific definition file is created to handle polling functionality " "and pipeline definition file is now reserved exclusively for transformations " "and routing. The polling.yaml file follows the same syntax as the pipeline." "yaml but only handles polling attributes such as interval, discovery, " "resources, meter matching. It is configured by setting cfg_file under the " "polling section.If no polling definition file is found, it will fallback to " "reuse pipeline_cfg_file." msgid "" "Pipeline.yaml files for agents should be updated to notifier:// or udp:// " "publishers. The rpc:// publisher is no longer supported." msgstr "" "Pipeline.yaml files for agents should be updated to notifier:// or udp:// " "publishers. The rpc:// publisher is no longer supported." msgid "Prelude" msgstr "Prelude" msgid "Previously deprecated kwapi meters are not removed." msgstr "Previously deprecated Kwapi meters are not removed." msgid "" "Previously, to enable/disable radosgw.* meters, you must define entry_point " "name rather than meter name. This is corrected so you do not need to be " "aware of entry_point naming. Use `radosgw.*` to enable/disable radosgw " "meters explicitly rather than `rgw.*`. `rgw.*` support is deprecated and " "will be removed in Rocky." msgstr "" "Previously, to enable/disable radosgw.* meters, you must define entry_point " "name rather than meter name. This is corrected so you do not need to be " "aware of entry_point naming. Use `radosgw.*` to enable/disable radosgw " "meters explicitly rather than `rgw.*`. `rgw.*` support is deprecated and " "will be removed in Rocky." msgid "" "RPC collector support is dropped. The queue-based notifier publisher and " "collector was added as the recommended alternative as of Icehouse cycle." msgstr "" "RPC collector support is dropped. The queue-based notifier publisher and " "collector was added as the recommended alternative as of Icehouse cycle." msgid "Remove direct publisher and use the explicit publisher instead." msgstr "Remove direct publisher and use the explicit publisher instead." msgid "Remove eventlet from Ceilometer in favour of threaded approach" msgstr "Remove eventlet from Ceilometer in favour of threaded approach" msgid "Run db-sync to add new indices." msgstr "Run db-sync to add new indices." msgid "" "Samples are required to measure some aspect of a resource. Samples not " "measuring anything will be dropped." msgstr "" "Samples are required to measure some aspect of a resource. Samples not " "measuring anything will be dropped." msgid "" "Ship YAML files to ceilometer/pipeline/data/ make it convenient to update " "all the files on upgrade. Users can copy yaml files from /usr/share/" "ceilometer and customise their own files located in /etc/ceilometer/." msgstr "" "Ship YAML files to ceilometer/pipeline/data/ make it convenient to update " "all the files on upgrade. Users can copy yaml files from /usr/share/" "ceilometer and customise their own files located in /etc/ceilometer/." msgid "" "Since the Glance v1 APIs won't be maintained any more, this change add the " "support of glance v2 in images pollsters." msgstr "" "Since the Glance v1 APIs won't be maintained any more, this change add the " "support of glance v2 in images pollsters." msgid "Start using reno to manage release notes." msgstr "Start using Reno to manage release notes." msgid "" "Support for CADF-only payload in HTTP dispatcher is dropped as audit " "middleware in pyCADF was dropped in Kilo cycle." msgstr "" "Support for CADF-only payload in HTTP dispatcher is dropped as audit " "middleware in pyCADF was dropped in Kilo cycle." msgid "" "Support for CORS is added. More information can be found [`here `_]" msgstr "" "Support for CORS is added. More information can be found [`here `_]" msgid "" "Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is " "deprecated. The same metrics are available between v1 and v2." msgstr "" "Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is " "deprecated. The same metrics are available between v1 and v2." msgid "" "Support loading multiple meter definition files and allow users to add their " "own meter definitions into several files according to different types of " "metrics under the directory of /etc/ceilometer/meters.d." msgstr "" "Support loading multiple meter definition files and allow users to add their " "own meter definitions into several files according to different types of " "metrics under the directory of /etc/ceilometer/meters.d." msgid "" "Support resource caching in Gnocchi dispatcher to improve write performance " "to avoid additional queries." msgstr "" "Support resource caching in Gnocchi dispatcher to improve write performance " "to avoid additional queries." msgid "" "The Ceilometer compute agent can now retrieve some instance metadata from " "the metadata libvirt API instead of polling the Nova API. Since Mitaka, Nova " "fills this metadata with some information about the instance. To enable this " "feature you should set [compute]/instance_discovery_method = " "libvirt_metadata in the configuration file. The only downside of this method " "is that user_metadata (and some other instance attributes) are no longer " "part of the samples created by the agent. But when Gnocchi is used as " "backend, this is not an issue since Gnocchi doesn't store resource metadata " "aside of the measurements. And the missing informations are still retrieved " "through the Nova notifications and will fully update the resource " "information in Gnocchi." msgstr "" "The Ceilometer compute agent can now retrieve some instance metadata from " "the metadata libvirt API instead of polling the Nova API. Since Mitaka, Nova " "fills this metadata with some information about the instance. To enable this " "feature you should set [compute]/instance_discovery_method = " "libvirt_metadata in the configuration file. The only downside of this method " "is that user_metadata (and some other instance attributes) are no longer " "part of the samples created by the agent. But when Gnocchi is used as " "backend, this is not an issue since Gnocchi doesn't store resource metadata " "aside of the measurements. And the missing information is still retrieved " "through the Nova notifications and will fully update the resource " "information in Gnocchi." msgid "" "The Events API (exposed at /v2/events) which was deprecated has been " "removed. The Panko project is now responsible for providing this API and can " "be installed separately." msgstr "" "The Events API (exposed at /v2/events) which was deprecated has been " "removed. The Panko project is now responsible for providing this API and can " "be installed separately." msgid "" "The Gnocchi dispatcher has been removed and replaced by a native Gnocchi " "publisher. The configuration options from the `[dispatcher_gnocchi]` has " "been removed and should be passed via the URL in `pipeline.yaml`. The " "service authentication override can be done by adding specific credentials " "to a `[gnocchi]` section instead." msgstr "" "The Gnocchi dispatcher has been removed and replaced by a native Gnocchi " "publisher. The configuration options from the `[dispatcher_gnocchi]` has " "been removed and should be passed via the URL in `pipeline.yaml`. The " "service authentication override can be done by adding specific credentials " "to a `[gnocchi]` section instead." msgid "" "The Kwapi pollsters are deprecated and will be removed in the next major " "version of Ceilometer." msgstr "" "The Kwapi pollsters are deprecated and will be removed in the next major " "version of Ceilometer." msgid "" "The [compute]/workload_partitioning = True is deprecated in favor of " "[compute]/instance_discovery_method = workload_partitioning" msgstr "" "The [compute]/workload_partitioning = True is deprecated in favour of " "[compute]/instance_discovery_method = workload_partitioning" msgid "The `image` meter is dropped in favour of `image.size` meter." msgstr "The `image` meter is dropped in favour of `image.size` meter." msgid "The `instance` meter no longer will be generated." msgstr "The `instance` meter no longer will be generated." msgid "" "The `instance` meter no longer will be generated. For equivalent " "functionality, perform the exact same query on any compute meter such as " "`cpu`, `disk.read.requests`, `memory.usage`, `network.incoming.bytes`, etc..." msgstr "" "The `instance` meter no longer will be generated. For equivalent " "functionality, perform the exact same query on any compute meter such as " "`cpu`, `disk.read.requests`, `memory.usage`, `network.incoming.bytes`, etc..." msgid "" "The `shuffle_time_before_polling_task` option has been removed. This option " "never worked in the way it was originally intended too." msgstr "" "The `shuffle_time_before_polling_task` option has been removed. This option " "never worked in the way it was originally intended to." msgid "" "The api-paste.ini file can be modified to include or exclude the CORs " "middleware. Additional configurations can be made to middleware as well." msgstr "" "The api-paste.ini file can be modified to include or exclude the CORs " "middleware. Additional configurations can be made to middleware as well." msgid "The api.pecan_debug option has been removed." msgstr "The api.pecan_debug option has been removed." msgid "" "The collector service is removed. From Ocata, it's possible to edit the " "pipeline.yaml and event_pipeline.yaml files and modify the publisher to " "provide the same functionality as collector dispatcher. You may change " "publisher to 'gnocchi', 'http', 'panko', or any combination of available " "publishers listed in documentation." msgstr "" "The collector service is removed. From Ocata, it's possible to edit the " "pipeline.yaml and event_pipeline.yaml files and modify the publisher to " "provide the same functionality as collector dispatcher. You may change " "publisher to 'gnocchi', 'http', 'panko', or any combination of available " "publishers listed in documentation." msgid "The deprecated Ceilometer API has been removed." msgstr "The deprecated Ceilometer API has been removed." msgid "" "The deprecated `compute.workload_partitioning` option has been removed in " "favor of `compute.instance_discovery_method`." msgstr "" "The deprecated `compute.workload_partitioning` option has been removed in " "favour of `compute.instance_discovery_method`." msgid "The deprecated `nova_http_log_debug` option has been removed." msgstr "The deprecated `nova_http_log_debug` option has been removed." msgid "The deprecated `pollster-list` option has been removed." msgstr "The deprecated `pollster-list` option has been removed." msgid "" "The deprecated ceilometer-dbsync has been removed. Use ceilometer-upgrade " "instead." msgstr "" "The deprecated ceilometer-dbsync has been removed. Use ceilometer-upgrade " "instead." msgid "The deprecated control exchange options have been removed." msgstr "The deprecated control exchange options have been removed." msgid "The deprecated file dispatcher has been removed." msgstr "The deprecated file dispatcher has been removed." msgid "The deprecated http dispatcher has been removed." msgstr "The deprecated http dispatcher has been removed." msgid "" "The deprecated kafka publisher has been removed, use NotifierPublisher " "instead." msgstr "" "The deprecated Kafka publisher has been removed, use NotifierPublisher " "instead." msgid "" "The deprecated support of configure polling in the `pipeline.yaml` file has " "been removed. Ceilometer now only uses the `polling.yaml` file for polling " "configuration." msgstr "" "The deprecated support of configure polling in the `pipeline.yaml` file has " "been removed. Ceilometer now only uses the `polling.yaml` file for polling " "configuration." msgid "" "The event database dispatcher is now deprecated. It has been moved to a new " "project, alongside the Ceilometer API for /v2/events, called Panko." msgstr "" "The event database dispatcher is now deprecated. It has been moved to a new " "project, alongside the Ceilometer API for /v2/events, called Panko." msgid "" "The notification-agent can now be configured to either build meters or " "events. By default, the notification agent will continue to load both " "pipelines and build both data models. To selectively enable a pipeline, " "configure the `pipelines` option under the `[notification]` section." msgstr "" "The notification-agent can now be configured to either build meters or " "events. By default, the notification agent will continue to load both " "pipelines and build both data models. To selectively enable a pipeline, " "configure the `pipelines` option under the `[notification]` section." msgid "" "The option 'glance_page_size' has been removed because it's not actually " "needed." msgstr "" "The option 'glance_page_size' has been removed because it's not actually " "needed." msgid "" "The options 'requeue_event_on_dispatcher_error' and " "'requeue_sample_on_dispatcher_error' have been enabled and removed." msgstr "" "The options 'requeue_event_on_dispatcher_error' and " "'requeue_sample_on_dispatcher_error' have been enabled and removed." msgid "" "The pipeline dynamic refresh code has been removed. Ceilometer relies on the " "cotyledon library for a few releases which provides reload functionality by " "sending the SIGHUP signal to the process. This achieves the same feature " "while making sure the reload is explicit once the file is correctly and " "entirely written to the disk, avoiding the failing load of half-written " "files." msgstr "" "The pipeline dynamic refresh code has been removed. Ceilometer relies on the " "cotyledon library for a few releases which provides reload functionality by " "sending the SIGHUP signal to the process. This achieves the same feature " "while making sure the reload is explicit once the file is correctly and " "entirely written to the disk, avoiding the failing load of half-written " "files." msgid "" "The previous configuration options default for " "'requeue_sample_on_dispatcher_error' and 'requeue_event_on_dispatcher_error' " "allowed to lose data very easily: if the dispatcher failed to send data to " "the backend (e.g. Gnocchi is down), then the dispatcher raised and the data " "were lost forever. This was completely unacceptable, and nobody should be " "able to configure Ceilometer in that way.\"" msgstr "" "The previous configuration options default for " "'requeue_sample_on_dispatcher_error' and 'requeue_event_on_dispatcher_error' " "allowed to lose data very easily: if the dispatcher failed to send data to " "the backend (e.g. Gnocchi is down), then the dispatcher raised and the data " "were lost forever. This was completely unacceptable, and nobody should be " "able to configure Ceilometer in that way.\"" msgid "" "The tenant (project) discovery code in the polling agent now scans for " "tenants in all available domains." msgstr "" "The tenant (project) discovery code in the polling agent now scans for " "tenants in all available domains." msgid "" "The transport_url defined in [oslo_messaging_notifications] was never used, " "which contradicts the oslo_messaging documentation. This is now fixed." msgstr "" "The transport_url defined in [oslo_messaging_notifications] was never used, " "which contradicts the oslo_messaging documentation. This is now fixed." msgid "" "To minimise load on Nova API, an additional configuration option was added " "to control discovery interval vs metric polling interval. If " "resource_update_interval option is configured in compute section, the " "compute agent will discover new instances based on defined interval. The " "agent will continue to poll the discovered instances at the interval defined " "by pipeline." msgstr "" "To minimise load on Nova API, an additional configuration option was added " "to control discovery interval vs metric polling interval. If " "resource_update_interval option is configured in compute section, the " "compute agent will discover new instances based on defined interval. The " "agent will continue to poll the discovered instances at the interval defined " "by pipeline." msgid "" "To utilize the new policy support. The policy.json file should be updated " "accordingly. The pre-existing policy.json file will continue to function as " "it does if policy changes are not required." msgstr "" "To utilize the new policy support. The policy.json file should be updated " "accordingly. The pre-existing policy.json file will continue to function as " "it does if policy changes are not required." msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "" "Usage of pipeline.yaml for polling configuration is now deprecated. The " "dedicated polling.yaml should be used instead." msgstr "" "Usage of pipeline.yaml for polling configuration is now deprecated. The " "dedicated polling.yaml should be used instead." msgid "" "Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw." "*`" msgstr "" "Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw." "*`" msgid "" "With collector service being deprecated, we now have to address the " "duplication between dispatchers and publishers. The file dispatcher is now " "marked as deprecated. Use the file publisher to push samples into a file." msgstr "" "With collector service being deprecated, we now have to address the " "duplication between dispatchers and publishers. The file dispatcher is now " "marked as deprecated. Use the file publisher to push samples into a file." msgid "" "Workload partitioning of notification agent is now split into queues based " "on pipeline type (sample, event, etc...) rather than per individual " "pipeline. This will save some memory usage specifically for pipeline " "definitions with many source/sink combinations." msgstr "" "Workload partitioning of notification agent is now split into queues based " "on pipeline type (sample, event, etc...) rather than per individual " "pipeline. This will save some memory usage specifically for pipeline " "definitions with many source/sink combinations." msgid "" "[`bug 1254800 `_] Add " "better support to catch race conditions when creating event_types" msgstr "" "[`bug 1254800 `_] Add " "better support to catch race conditions when creating event_types" msgid "" "[`bug 1388680 `_] " "Suppose ability to query for None value when using SQL backend." msgstr "" "[`bug 1388680 `_] " "Suppose ability to query for None value when using SQL backend." msgid "" "[`bug 1480333 `_] " "Support ability to configure collector to capture events or meters mutally " "exclusively, rather than capturing both always." msgstr "" "[`bug 1480333 `_] " "Support ability to configure collector to capture events or meters mutally " "exclusively, rather than capturing both always." msgid "" "[`bug 1491509 `_] Patch " "to unify timestamp in samples polled by pollsters. Set the time point " "polling starts as timestamp of samples, and drop timetamping in pollsters." msgstr "" "[`bug 1491509 `_] Patch " "to unify timestamp in samples polled by pollsters. Set the time point " "polling starts as timestamp of samples, and drop timestamping in pollsters." msgid "" "[`bug 1504495 `_] " "Configure ceilometer to handle policy.json rules when possible." msgstr "" "[`bug 1504495 `_] " "Configure Ceilometer to handle policy.json rules when possible." msgid "" "[`bug 1506738 `_] [`bug " "1509677 `_] Optimise SQL " "backend queries to minimise query load" msgstr "" "[`bug 1506738 `_] [`bug " "1509677 `_] Optimise SQL " "backend queries to minimise query load" msgid "" "[`bug 1506959 `_] Add " "support to query unique set of meter names rather than meters associated " "with each resource. The list is available by adding unique=True option to " "request." msgstr "" "[`bug 1506959 `_] Add " "support to query unique set of meter names rather than meters associated " "with each resource. The list is available by adding unique=True option to " "request." msgid "" "[`bug 1513731 `_] Add " "support for hardware cpu_util in snmp.yaml" msgstr "" "[`bug 1513731 `_] Add " "support for hardware cpu_util in snmp.yaml" msgid "" "[`bug 1518338 `_] Add " "support for storing SNMP metrics in Gnocchi.This functionality requires " "Gnocchi v2.1.0 to be installed." msgstr "" "[`bug 1518338 `_] Add " "support for storing SNMP metrics in Gnocchi.This functionality requires " "Gnocchi v2.1.0 to be installed." msgid "" "[`bug 1519767 `_] " "fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and " "its potential race conditions are now patched." msgstr "" "[`bug 1519767 `_] " "fnmatch functionality in python <= 2.7.9 is not thread-safe. this issue and " "its potential race conditions are now patched." msgid "" "[`bug 1523124 `_] Fix " "gnocchi dispatcher to support UDP collector" msgstr "" "[`bug 1523124 `_] Fix " "Gnocchi dispatcher to support UDP collector" msgid "" "[`bug 1526793 `_] " "Additional indices were added to better support querying of event data." msgstr "" "[`bug 1526793 `_] " "Additional indices were added to better support querying of event data." msgid "" "[`bug 1530793 `_] " "network.services.lb.incoming.bytes meter was previous set to incorrect type. " "It should be a gauge meter." msgstr "" "[`bug 1530793 `_] " "network.services.lb.incoming.bytes meter was previous set to incorrect type. " "It should be a gauge meter." msgid "" "[`bug 1531626 `_] Ensure " "aggregator transformer timeout is honoured if size is not provided." msgstr "" "[`bug 1531626 `_] Ensure " "aggregator transformer timeout is honoured if size is not provided." msgid "" "[`bug 1532661 `_] Fix " "statistics query failures due to large numbers stored in MongoDB. Data from " "MongoDB is returned as Int64 for big numbers when int and float types are " "expected. The data is cast to appropriate type to handle large data." msgstr "" "[`bug 1532661 `_] Fix " "statistics query failures due to large numbers stored in MongoDB. Data from " "MongoDB is returned as Int64 for big numbers when int and float types are " "expected. The data is cast to appropriate type to handle large data." msgid "" "[`bug 1533787 `_] Fix an " "issue where agents are not properly getting registered to group when " "multiple notification agents are deployed. This can result in bad " "transformation as the agents are not coordinated. It is still recommended to " "set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when " "deploying multiple agents." msgstr "" "[`bug 1533787 `_] Fix an " "issue where agents are not properly getting registered to group when " "multiple notification agents are deployed. This can result in bad " "transformation as the agents are not coordinated. It is still recommended to " "set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when " "deploying multiple agents." msgid "" "[`bug 1536338 `_] Patch " "was added to fix the broken floatingip pollster that polled data from nova " "api, but since the nova api filtered the data by tenant, ceilometer was not " "getting any data back. The fix changes the pollster to use the neutron api " "instead to get the floating ip info." msgstr "" "[`bug 1536338 `_] Patch " "was added to fix the broken floatingip pollster that polled data from Nova " "API, but since the Nova API filtered the data by tenant, Ceilometer was not " "getting any data back. The fix changes the pollster to use the Neutron API " "instead to get the floating IP info." msgid "" "[`bug 1536498 `_] Patch " "to fix duplicate meter definitions causing duplicate samples. If a duplicate " "is found, log a warning and skip the meter definition. Note that the first " "occurance of a meter will be used and any following duplicates will be " "skipped from processing." msgstr "" "[`bug 1536498 `_] Patch " "to fix duplicate meter definitions causing duplicate samples. If a duplicate " "is found, log a warning and skip the meter definition. Note that the first " "occurrence of a meter will be used and any following duplicates will be " "skipped from processing." msgid "" "[`bug 1536699 `_] Patch " "to fix volume field lookup in meter definition file. In case the field is " "missing in the definition, it raises a keyerror and aborts. Instead we " "should skip the missing field meter and continue with the rest of the " "definitions." msgstr "" "[`bug 1536699 `_] Patch " "to fix volume field lookup in meter definition file. In case the field is " "missing in the definition, it raises a key error and aborts. Instead we " "should skip the missing field meter and continue with the rest of the " "definitions." msgid "" "[`bug 1539163 `_] Add " "ability to define whether to use first or last timestamps when aggregating " "samples. This will allow more flexibility when chaining transformers." msgstr "" "[`bug 1539163 `_] Add " "ability to define whether to use first or last timestamps when aggregating " "samples. This will allow more flexibility when chaining transformers." msgid "" "[`bug 1542189 `_] Handle " "malformed resource definitions in gnocchi_resources.yaml gracefully. " "Currently we raise an exception once we hit a bad resource and skip the " "rest. Instead the patch skips the bad resource and proceeds with rest of the " "definitions." msgstr "" "[`bug 1542189 `_] Handle " "malformed resource definitions in gnocchi_resources.yaml gracefully. " "Currently we raise an exception once we hit a bad resource and skip the " "rest. Instead the patch skips the bad resource and proceeds with rest of the " "definitions." msgid "" "[`bug 1550436 `_] Cache " "json parsers when building parsing logic to handle event and meter " "definitions. This will improve agent startup and setup time." msgstr "" "[`bug 1550436 `_] Cache " "json parsers when building parsing logic to handle event and meter " "definitions. This will improve agent startup and setup time." msgid "" "[`bug 1578128 `_] Add a " "tool that allow users to drop the legacy alarm and alarm_history tables." msgstr "" "[`bug 1578128 `_] Add a " "tool that allow users to drop the legacy alarm and alarm_history tables." msgid "" "[`bug 1597618 `_] Add " "the full support of snmp v3 user security model." msgstr "" "[`bug 1597618 `_] Add " "the full support of SNMP v3 user security model." msgid "" "[`bug 255569 `_] Fix " "caching support in Gnocchi dispatcher. Added better locking support to " "enable smoother cache access." msgstr "" "[`bug 255569 `_] Fix " "caching support in Gnocchi dispatcher. Added better locking support to " "enable smoother cache access." msgid "" "audit middleware in keystonemiddleware library should be used for similar " "support." msgstr "" "audit middleware in keystonemiddleware library should be used for similar " "support." msgid "" "batch_size and batch_timeout configuration options are added to both " "[notification] and [collector] sections of configuration. The batch_size " "controls the number of messages to grab before processing. Similarly, the " "batch_timeout defines the wait time before processing." msgstr "" "batch_size and batch_timeout configuration options are added to both " "[notification] and [collector] sections of configuration. The batch_size " "controls the number of messages to grab before processing. Similarly, the " "batch_timeout defines the wait time before processing." msgid "" "disk.* aggregated metrics for instance are deprecated, in favor of the per " "disk metrics (disk.device.*). Now, it's up to the backend to provide such " "aggregation feature. Gnocchi already provides this." msgstr "" "disk.* aggregated metrics for instance are deprecated, in favour of the per " "disk metrics (disk.device.*). Now, it's up to the backend to provide such an " "aggregation feature. Gnocchi already provides this." msgid "gnocchi_resources.yaml in Ceilometer should be updated." msgstr "gnocchi_resources.yaml in Ceilometer should be updated." msgid "gnocchiclient library is now a requirement if using ceilometer+gnocchi." msgstr "" "gnocchiclient library is now a requirement if using ceilometer+gnocchi." ceilometer-10.0.0/releasenotes/source/index.rst0000666000175100017510000000025313236733243021606 0ustar zuulzuul00000000000000========================= Ceilometer Release Notes ========================= .. toctree:: :maxdepth: 1 unreleased pike ocata newton mitaka liberty ceilometer-10.0.0/releasenotes/source/liberty.rst0000666000175100017510000000022013236733243022143 0ustar zuulzuul00000000000000============================= Liberty Series Release Notes ============================= .. release-notes:: :branch: origin/stable/liberty ceilometer-10.0.0/ChangeLog0000664000175100017510000046206613236733435015545 0ustar zuulzuul00000000000000CHANGES ======= 10.0.0 ------ * Imported Translations from Zanata * add hardware.disk.read|write.\* stats * add volume.provider.\* meters to docs * add note explaining gnocchi coordination\_url * Deprecate aggregated disk.\* metrics on instance * utils: move hash\_of\_set where it's solely used * utils: move kill\_listeners to ceilometer.notification * Imported Translations from Zanata * Zuul: Remove project name * utils: move publisher-only utils functions in publisher * remove repeated host * Imported Translations from Zanata * Replace curly quotes with straight quotes * Delete not applicable definition * add volume provider resource types to gnocchi * support cinder capacity statistics * Remove use of unsupported TEMPEST\_SERVICES variable * Imported Translations from Zanata * Remove state\_description for trove * polling: iter randomly over sources and pollsters when polling * polling: simplify manager method * Remove shuffle\_time\_before\_polling\_task option * utils: remove unused decimal/dt conversion functions * devstack: fix gnocchi database setup * gate: move tripleo job to experimental * Remove bundled intree ceileometer tempest plugin * tempest: reuse zuul conf of the plugin * tempest: use new plugin * Remove the redundant conf.py file * ignore compute.instance.create.start for metrics * tempest: remove ceilometer-api alarm * Remove extra space between method parameters * change doc ceilometer bug tracker url * remove gnocchi ignore option * simplify gnocchi batch push setup * compute sample as dictionary once * fix gnocchi stats logging * use hashmap to quickly find matching resource def * cleanup measurements page * Add README.rst record more project message * Imported Translations from Zanata * Check required Gnocchi version is installed * Replace ujson with json * Fix maximum recursion depth exceeded bug when property referencing itself * Run all telemetry tests in integration jobs * update best practices * cleanup data-pipelines admin guide * cleanup data collection admin guide * cleanup admin-guide architecture * reorder admin and contributor topics * remove configuration from contributor guide * remove install section from contributor guide * partial clean up of contributor install info * cleanup contributor plugins details * Do not check iterable objects before for loop * Do not check keystone domains length in TenantDiscovery * Add doc8 to pep8 check for ceilometer project * Imported Translations from Zanata * Remove filter\_service\_activity option in doc * Minor update comment of devstack settings * Add user\_id for sample volume.snapshot.size * Remove useless trailing newlines * Add missing name traits * devstack: redis on opensuse needs to have default config * ensure pipeline\_listener set up * remove sample sorting * Update http publisher options in doc * clean up non-install parts of contributor docs * Update NotifierPublisher class doc * Fix incorrect yaml code block in pipeline definition * [doc] frequency of polling should be controlled via the polling configuration * remove kafka publisher * Fix typo in utils.py * split partitioning polling tests * drop base polling test separation * static resources not picked up * Move delayed out of utils * doc: remove useless Ceilometer service creation * Move utils.dict\_to\_keyval to opendaylight * minor update to gnocchi contributor docs * ignore api docs * cleanup collector references * Imported Translations from Zanata * remove ceilometerclient * Imported Translations from Zanata * Imported Translations from Zanata * remove unused pollster exception resources * simplify test data generation * remove duplicate polling.yaml setup * remove test\_manager\_exception\_persistency * minor polling cleaning * Add cpu\_l3\_cache to polling yaml * simplify cache generation * Put configurations to appropriate part * polling: run polling tasks immediately on start * Set shuffle\_time\_before\_polling\_task to float and set a minimum * fix ceilometer-compute invoke libvirt exception error * Imported Translations from Zanata * always declare partitioning variables * Remove setting of version/release from releasenotes * treat warnings as errors in release notes build * place release notes inline * zuul: run TripleO jobs with new zuulv3 layout * Remove unused ceilometer.utils.update\_nested * Remove unused ceilometer.utils.EPOCH\_TIME * Remove unused ceilometer.utils.sanitize\_timestamp * utils: remove unused create\_periodic function * queues per manager rather than per pipeline * pluggable notification agent * remove redundant filter check * move pipeline out of \_\_init\_\_ * separate polling code * nearly pluggable notification agent * separate base manager from pipeline * set models as part of pipeline manager class * remove agent setup\_\* helper functions * move sample/event specifc pipeline models to own module * Change oslo.messaging prefetch default * Imported Translations from Zanata * stop double refreshing on start * update install docs * move listener targets to notification agent * rename sample handler * common notification endpoint * refresh agent if group membership changes * Revert "devstack: Set an optimal processing queue" * libvirt: share disk device listing * revise doc of "workload\_partitioning" and term "Notification Agents" * Make Doc links point to the latest branch * Fix bug for ceilometer polling generates an exception * Imported Translations from Zanata * Clarify that there is only one polling agent * Remove direct publisher since it is deprecated * Fix Format in contributor/plugins.rst * set ceilometer\_backend * remove kafka and keystonemiddleware reqs * Zuul: add file extension to playbook path * Clean up direct publisher * Replace jsonutils by ujson * Remove deprecated storage drivers * remove branch specific references * Move binary and notification tests in unit * Remove Ceilometer API * Imported Translations from Zanata * Rename magnum events * fix radosgw meter name * Remove the wrap for skip inspect rbd disk info * tests: fix Redis key length debugging output format * snmp: warn if snmp call timeout * Move oslo.cache to gnocchi flavor * Use generic user for both zuul v2 and v3 * fix gnocchi publisher * zuul: remove ElasticSearch tempest experimental job * Migrate to Zuul v3 * handle new tempest swift interface * Remove unused and unmaintained doc Makefile * Update tests to do not use deprecated test.services() * kill collector * Imported Translations from Zanata * pipeline: remove polling fallback support * event: move models out of storage * Replace the Gnocchi dispatcher by a publisher * Add Hanxi Liu as a Ceilometer maintainer * Fix a typo in the Installation Guide * Use tempest.clients.Manager as base clients class * capture aodh events * Move object storage container\_client to match tempest * agent: log pollster skipping to debug * Cleanup test-requirements * messaging-publisher: fix threadsafe of flush() * devstack: install ceilometer extra * remove unused ExchangeTopics * remove ironic exchange * Replace the usage of some aliases in tempest * Remove deprecated pollster-list option * re-add cpu\_util metric to measurement list * Remove class KafkaBrokerPublisher * Set default ceilometer storage backend to gnocchi * Add dipatcher correlated renos * Remove deprecated nova\_http\_log\_debug option * Remove deprecated compute.workload\_partitioning * Initialize hashrings as AgentManager object * Removes use of timeutils.set\_time\_override * Remove class HttpDispatcher * Add missing command of adding admin role to gnocchi * Remove class FileDispatcher * Fix wrong links in ceilometer * Remove class PublisherBase * Retry to upgrade Gnocchi if connection fails * Use gnocchiclient 4.0 exception types * Update description 'resource\_update\_interval' option * support new gnocchiclient interface * fix disk total\_time metrics * Remove deprecated \*\_control\_exchange * Imported Translations from Zanata * Update reno for stable/pike 9.0.0 ----- * update measurements * vmware:The cpu\_util value should be a decimal * keystone: pass region name to keystone client * Add disk total duration of reads/writes metric * Modify memory swap metric type * Fix a typo in ceilometer * Remove install-guide env which is not effective * fix cpu\_util precision is too precise * ensure timezone is included in timestamps * snmp: Fix discovery when total memory is missing * Fix share.size meter * vmware: ignore -1 values * stop converting to old message format * deprecated pollster-list * use gnocchi devstack script * [Trivialfix]Fix typos in ceilometer * Update and replace http with https for doc links in ceilometer * Deprecate kafka publisher * wrong document location * Update the documentation link for doc migration * Fix neutron lbaas v2 when no listeners are set * add configuration folder * Fix inspect\_vnics error for libvirt * generate conf on doc build * turn on warning-is-error for doc builds * add missing sphinx extension * High precision rate of change timedelta * Specify region when looking for radosgw admin URL * doc: move install guide to main doc dir * remove templates * doc: move old dev docs to contributor section * doc: initial index cleanup * Project\_id for SDN controller meters * Gnocchi support for SDN controller meters * Ceilometer meter support to collect network statistics * Fix some initializations * Docs: update the path of "meters.yaml" and its new feature * Docs: switch to openstackdocstheme * Remove the explanation of nonexistent parameter * Update URL home-page in documents according to document migration * Add memory swap metric * Move "test\_versions.py" to the directory of functional test * Remove interval reference to pipeline * api: remove unused code * tempest: Use primary user token * Update Documentation link in README * Add some unit test cases * Limit the default numbers of metrics polled * Don't check libvirt exception if libvirt is absent * Revert "deprecate archive policy of Gnocchi dispatcher" * Fixed ceilometer arithmetic transformer bug * explicitly note sanitize\_timestamp return condition * Remove old maintainers * Add a description of the parameters to function * Migrate telemetry-events * Migrate telemetry-best-practices * Migrate telemetry-data-retrieval * Migrate telemetry-data-collection * Migrate telemetry-data-pipelines * Migrate telemetry troubleshooting guide * migrate telemetry measurements * Fix typo for additional device support in intel node manger * Add some unit test cases * Add event definition for manila * Migrate telemetry system architecture * Fix UnicodeEncodeError error when log messge in poll\_and\_notify * use tempest manager * Fix some reST field lists in docstrings * expect oslo.messaging messages * Fix can not find VMwareAPISession when import oslo\_vmware * post\_test\_hook: stop sourcing gnocchi/devstack/settings * cap cpu\_util * ensure resources are hashable * Doc: guide for operating resource type * Add share create/delete/expand/shrink meters * fix service path for fedora * modify the description for GnocchiDispatcher * add instruction to set auth\_mode * Update meters definition reference * Added Nova Compute CPU metrics to gnocchi\_resources.yaml * devstack: install Gnocchi using pip * This remove annoying message repetition in logs * Fix html\_last\_updated\_fmt for Python3 * update link away from google docs * Replace assertRaisesRegexp with assertRaisesRegex * Trivial fix typos * change to reference gnocchi.xyz * Change default polling interval * Fix typo in docstring * XenAPI: use os-xenapi for XenAPI driver * Support loading multiple meter definition files * tempest: fix some future flake8 issue * tempest: remove deprecation warning * Deprecate olsotest.mockpatch in favor of native fixtures * Fix a typo * tempest: Allow to configure granularity * integration: always run all commands * Remove upper constraint on sqlalchemy * Adds a Zaqar publisher * tempest: tell tempest our scenario are long * integration: Fix report generation * tests: fix messaging driver * minor doc updates * Replace Ceilometer coordination layer by tooz partition system * Fix publisher doc link * gnocchi/notification: allow to configure the maximum number of requests in parallel * tempest: Don't hardcode the network name * Remove powervm from inspector doc * Cleanup post\_test\_hook.sh * tempest: use the configured flavor * tests: remove oslo.config fixture usage * tempest: rework gabbi setup * Remove log translations * Change the compute polling local cache to be mutex * Optimize the link address * Format the abstraction layer of inspector methods to avoid TypeError * fix install-guide gnocchi+keystone instruction * Correct the configuration of pipeline\_processing\_queues in devstack * compute: remove default duration value * compute disks: use the generic compute pollster * compute vnics: use the generic compute pollster * Add tool for migrating metric data from ceilometer's storage to gnocchi * Use vcpu.x.time and vcpu.x.wait values in libvirt inspector * tests: fix conf object creation * Adds 'disabled' to the possible states for a member in LBaaSv2 * deprecated support of pipeline.yaml for polling * Correct bad use response\_strings in live.yaml * Use HostAddressOpt for opts that accept IP and hostnames * deprecate archive policy of Gnocchi dispatcher * tempest: Fix exception handling * [install-guide] Add more links * tempest: remove confusing stack check step * fix gnocchi unprocessed measures debug * compute: Remove dead code * compute: create one pollster to rule them all * Bump gnocchiclient min version * Remove second tenacity in requirements * tests: stop hammering CPU while waiting for sample to wait * coordination: remove started check * coordination: remove group\_id check * coordination: stop checking for \_coordinator to be None * coordination: create coordinator at init time * coordination: make group\_id to never be None * Add sem-ver flag so pbr generates correct version * tests: simplify broken test * libvirt: rewrite the error handling * Fix the incorrect gnocchi command * fix blacklisting NovaLike resources * tests: remove unused mocked method * remove resource if not created * fix gnocchi\_resources mapping * cleanup unused devstack code * start notification agent after restarting apache * Swallow & report 404s from Swift (missing tenant) * Remove Rohit Jaiswal from maintainers * devstack: Set an optimal processing queue * tempest: remove broken tests * remove ceilometer-collector condition when configuring storage * make gnocchi independent of ceilometer-api * Switch to use stable data\_utils * make gnocchi posting more resilient * remove tooz safety catch * Bugfix: use transport\_url from [oslo\_messaging\_notifications] if present * Bump kafka-python and oslo.messaging * Make sure to get all tenants by scanning each available domain * add jitter to notification agent * Modify variable's usage in Log Messages * agent: only create partition coordinator if backend url provided * agent: start coordinator at run() and never stops * coordination: use tooz builtin heartbeat manager * coordination: use join\_group\_create() * coordination: simplify retry condition by using tenacity.TryAgain * coordination: stop tracking joined groups * coordination: use a conf object with registered options in tests * Remove pipeline periodic refresh feature * Switch to use stable data\_utils * Enable Basic and https certificate authentication for http publisher * tempest: skip legacy telemetry-api tests * Use more specific asserts in tests * Load pipeline config files from /etc/ceilometer/ firstly * Don't run ceilometer-upgrade on unconfigured db * use tooz hashring * Remove unused variable * prepare future tempest breakage * include gnocchi+keystone instructions in install guide * add configuration instructions from admin guide and dev docs * upgrade gnocchi via ceilometer * switch dispatcher references to publisher * remove legacy db stuff * gnocchi: remove archive policy setting for identity * Use bytes for coordination member * Using Panko as publisher than dispatcher if enabled * remove keystone\_authtoken from polling agent * support gnocchi timeout * Remove smoke tag for TelemetryNotificationAPITest * add missing instance meta * Trivial: remove extra spaces * Support extended declaring exchanges * Remove unused override * remove collector instructions * chill out on the number of items in toc * Revert "verify gnocchi connection before processing" * Fix reno title format * doc: update pipeline link * Ship YAML file to /usr/share * Deprecate event\_dispatchers and meter\_dispatchers options * Remove useless metric name * set OS\_AUTH\_TYPE in gate * Support i18n for api app * Correct the use of marker function * match generic cirros name * nova: track flavor name * Trivial-fix: use domain\_id instead of domain\_name * fix [service\_credentials] section location * fix gnocchi url links * drop kwapi pollster * Correct the doc link * remove PaaS event format * cleanup devstack cache initialisation * add note about batching+gnocchi * Switch to use test\_utils.call\_until\_true * gabbi: use history * Deprecate collector * Remove support for py34 * Use https instead of http for git.openstack.org * stop hardcode timeout in tempest tests * Update reno for stable/ocata 8.0.0 ----- * add polling.yaml docs * Do not use non-UUID resource ID in Aodh+Gnocchi tests * Use Tempest stable library interfaces * polling definition file * remove endpoint\_override * gnocchi: do not use removed encode\_resource\_id * update multi-publisher image * make connection pool configurable * make http publisher equivalent to dispatcher * add ceilometer-upgrade step to install guide * update verify to use gnocchi * drop api and storage references from install-guide * Switch to decorators.idempotent\_id * modernise gabbi tests * drop notes re mod\_wsgi * move and trim legacy db docs * show panko events for debug * devstack: make sure it's possible to deploy panko only * set project\_id for cinder polling * install-doc: Disable ceilometer-api service * check panko during integration test * set panko dispatcher if enabled * tripleo: Fix logging of tripleo discovery * agent: always print the extension name on failure * Fix same type of simple message error * Don't load many times the same publisher * pipeline: remove tests helper from runtime code * Trivial: add white space of error message * Add support of refereshing the resource info in local cache * Simplify code of test\_complex\_query * Trivial: remove white space of exception message * switch instance\_discovery to libvirt\_metadata * publisher: fix pipeline confusing reference * remove test\_hbase\_table\_utils * Use parameter skip\_op\_id in compute.virt.vmware.test\_vsphere\_operations * gnocchi: replace / by \_ in resource\_id * gnocchi: don't rely on gnocchi uuid5 * gnocchi: prepare removal of gnocchi uuid5 * gnocchi: move volume\_type to correct section * tempest: use tempest img * fix the gnocchi resource type upgrade * remove residual instance pollster * use domainListGetStats to get cputime * drop instance meter * Fix oslo.vmware change that added new keyword argument * Don't poll nova with compute agent * tempest: Allow to not run deprecated API tests * Remove events storage and API * Update custom install doc * correct volumes name source * add volume.backup.size to gnocchi * upgrade: fix gnocchi resource update * stop assuming ceph/swift share same endpoint * devstack: check for ceilometer+panko service * tempest: allow usage of scenario-img for autoscaling * Fix the gate failure because of several issues * Trival-Fix: replace "nova" with "ceilometer" in api-ref * Fix error module usage * add support to parse user metadata * [doc] Note lack of constraints is a choice * always create ceilometer user * glossary: remove collector and add publisher * Test suite for Aodh's gnocchi-resource-threshold alarm * simplify fail gnochi+ceilometer check * Fix publisher comment * integration: run gabbi first * integration: deleting the stack sometimes timeout * tempest: support keystone v3 for autoscaling * tests: generate reports when tempest fail * Make sure gnocchi is enabled before ceilometer * Added new instance metrics to gnocchi definition * Revert "Add hypervisor inspector sanity check" * gnocchi: Allow to set a different creds section * Allow to configure the triplo network name * Fix oslo.vmware lazy loading * test: remove unused variable * utils: remove unused function stringify\_timestamps * gnocchi: Add volume\_type attribute to volume * devstack: Don't enable api by default * add libxml2 required for lxml * fix http publisher test * filtered out the phynical nics when query vm nics with VMware API * Correct ceilometer reraising of exception * Fix typo in plugin.sh * Add old vm state for compute.instance.update * capture keystone authentication as metric * tools: stop using global conf * gnocchi: use batch create\_metrics=True * Fix the install guide bug * Update Ceilometer architecture docs * Remove useless mock * drop \`counters\` support from pipeline * cleanup unit test location * update architecture docs * Remove api index file * Bump minimal version of cotyledon * Make rabbitmq configuration much simpler * Reenable the tempest test about nova notification * use hyphen-less option name sql\_expire\_sample\_only * Translate info-level log messages for LOG.error * drop disable\_non\_metric\_meters option * cleanup gnocchiclient * Change gnocchi\_ext.NotFound to gnocchi\_ext.ResourceTypeNotFound * integration tests: fix generation of tests\_results.html * Use one log level * Add volume and floatingip event\_delete for gnocchi event\_dispatcher * more gnocchi mapping cleanup * cleanup gnocchi mapping * cleanup manual install * fix postgresql functional gate * Add a release note for HTTPProxyToWSGI middleware * Modify unit of disk requests rate * Handling KeyError error, make gnocchi event dispatcher work * Corrected debug msg in CPUL3CachePollster * doc cleanup * use aodhclient in integration test * Print ceilometer configuration on startup * register ceilometer options at runtime * utils: stop using global conf * intel\_node\_manager: stop using global conf * Remove useless singleton check * register oslo\_db options at runtime * register keystoneauth options at runtime * storage: stop using global conf * cmd.storage: stop using global conf * test\_storage\_scenarios: stop using global conf * raise InstanceShutOffException if vm power off when get\_vm\_moid use map of vm manage obj to reduce vmware api calls * Remove duplicated code * Remove useless code * drop image pollster * gnocchi: remove useless keepalive adapter * comment: remove unused comment * XenAPI: Not support two key network meters * Replace retrying with tenacity * Clean the glance v1 code in tempest plugin * Convert file publisher to be equivalent to file dispatcher * Add aliases for direct publisher * doc: fix the wsgi configuration file name * deprecate ceilometer api * make blacklist log more specific * extract 'instance\_id' as 'resource\_id' in event definitions * disable signing in direct publisher * Add pyOpenSSL to test-requirements * Trivial fix: fix a wrong config option type usage * Add support of metering volume related resources * coordination: remove unused kwarg * Fix bug for ceilometer polling generates an error * fix perf when libvirt is >=2.0.0 and <2.3.0 * Remove ceilometer tool show\_data.py since it is not usable * Bump hacking to 0.12 * Replace oslo\_utils.timeutils.isotime * inspectors: stop using global conf * meter.notifications: stop using global conf * collector: stop using global conf * pollsters: stop using global conf * Replace SimpleProducer with KafkaProducer * document: remove configuration README description * devstack: allow to configure not backend * Modify variable's using method in Log Messages * compute.util: stop using global conf * event.converter: stop using global conf * discovery: stop using global conf * event: stop using global conf * sample: stop using global conf * keystone\_client: stop using global conf * publisher: stop using global conf * Use method constant\_time\_compare from oslo.utils * Add more verbosity for gnocchi\_resources.yaml * Add package "pifpaf" license description * Change redirect status from 307 to 308 * Use set\_defaults method in oslo\_middleware to check CORS\_OPTS * change gnocchi cache enable condition * Correct wrong description of method enforce * Fix gate problem related to \_error\_checker() * Remove deprecated ceilometer-dbsync * gnocchi: don't show backtrace on connection failure * fix gate * Redact password from opendaylight client logging * Broken Link: setuptools entry point * enable caching * Replace 'vmware' with 'vsphere' * gnocchi: stronger configuration file parsing * Don't create useless threads * pipeline: stop using global conf * declarative: stop using global conf * coordination: stop using global conf * Remove buggy usage of global config * pipeline services: stop using global conf * Fix collector single worker to handle UDP package with many worker config * Add http\_proxy\_to\_wsgi to config-generator * Fixed cotyledon version requirement * Remove pecan\_debug option * Fix the issue that missing the app file * neutron\_client: stop using global conf * nova\_client: stop using global conf * messaging: stop using global config * collector: stop using global config * api: Remove global conf * tests: fix broken udp tests * tests: fix tests for functional tests without dsvm * fix perf when libvirt is >=2.0.0 and <2.3.0 * Add http\_proxy\_to\_wsgi to api-paste * make gnocchi event dispatcher work * Enable release notes translation * collector: do not set any dispatcher by default * Remove the unnecessary space * Add autoscaling scenario in tempest tests * Compute agent can poll tx and rx errors and drops * Batching of event & meter dispatching over HTTP * Stop adding ServiceAvailable group option * specific the title * Trivial - Changes rst markup * Alters rst markup to comply with OpenStack rst guidelines * Add prefix "$" for command examples * Use 'code-block' for pieces of code * Docstrings should not start with a space * collector: fix graceful shutdown when udp enabled * Updates rst markup to better align with OpenStack rst guidelines * Modify startup parameters of ceilometer-api in devstack script * Fix the "Gnocchi" link pages of custom.rst doc * Bad Link: stevedore * Clean the deprecated non-metric related code * Clean imports in code * Using assertIsNone() instead of assertIs(None) * Using assertIsNone() instead of assertIs(None) * Fix typo in a docstring in agent/manager.py * XenAPI: polling meters are always 0 * Fix UnicodeEncodeError in Ceilometer polling * gabbi: set panko\_is\_disabled to avoid relying on Keystone autodiscovery * tox: refactor targets * Fix problem when using wsgi script to start * Remove import objects and use import modules * Refactor Ceilometer resource API * Move oslo.db to hard requirements list * Remove left over from old ceilometer-api binary * gabbi: import pipeline\_cfg\_file option before using it * Update reno for stable/newton 7.0.0.0rc1 ---------- * agentbase: remove flaky test * add note regarding pipeline\_processing\_queues option * Refactor Ceilometer event API * Refactor Ceilometer alarm API * standardize release note page ordering * gnocchi: enable event\_dispatcher in devstack and doc * Use pbr wsgi\_scripts feature to build ceilometer-api * Change fnmatch.match method to fnmatch.fnmatch * tox: Remove useless env * Use deps extra for optional requirements * Don't require gnocchiclient * Initialize correctly collector * update docs to show Telemetry projects * Remove store\_events options * Remove sql-expire-samples-only as a CLI option * Update the compute node service\_credentials parameters * Corrected file mode settings * Add oslo.config cli opt to skip the confirm check * add new meters about some perf events * Set a correct number of threads for polling tasks * improve notification processing * Fix string interpolation in log * correct input params in get\_samples * refactor service to be less pipeline dependent * [api-ref] Correct response code * [api-ref] Remove temporary block in conf.py * XenAPI: correct polling on memory\_usage * gnocchi: Create resource-types on upgrades * Allow to skip metering/event database upgrade * Deprecate Kwapi pollsters * Rename ceilometer-dbsync in ceilometer-upgrade * make reload test more resilient 7.0.0.0b3 --------- * generalise instable API redirect assertion * Nit: Aligning the content * Config logABug feature for Ceilometer api-ref * Fix checking IP version when using IPv6 * remove needless brackets * The debug that network.incoming/outgoing.packets print is not right * Update readme file * [install] Create endpoint in one command * [install] Add a missing stash * Replace urllib.quote() with six.moves.urllib.parse.quote() * correct the meaning of direct publish transport * correct the mistake in install-guide document * Add a publish transport in Ceilometer architecture document * XenAPI: failed to poll cpu\_util * Cleanup imports in code * add url in setup.cfg * conversions: remove no used local variable * Add api-ref/build to .gitignore * Fix a warning when running \`tox -e api-ref\` * Gnocchi dispatcher fails on skipped metric * Get ready for os-api-ref sphinx theme change * [dev-docs] Changed location of a class * Limit Happybase to < 1.0.0 * Fix tempest.conf generation * Configuration of certificate verification for HTTP dispatcher * api: redirect to Panko if enabled * fix the %{} when string formatting * ValueError exception when SNMP returns NoSuchObject * Format error message in http.py * HTTP Dispatcher: Post event data as JSON & improve logging * dispatcher: deprecate event database dispatcher for Panko * dispatcher/database: simplify connection retrieving * dispatcher: split the database dispatcher * Switch to use Glance v2 in image pollsters * Fix the not found link for notification in glossary.rst * Added a link to the architecture guide * Removed redundant phrase to increase readability * Fixed small grammar error in overview.rst * Set the correct for image bytes\_sent event * Change keystoneclient to keystoneauth * tests: rewrite batching test in a less racy way * Remove an unused method of network pollsters' base class * Fixed a small grammar error configuration help doc * add memory bandwidth meter * Do not limit elasticsearch to 2.0 * tests: remove dead code with self.source\_resources * Remove Nadya Privalova from core reviewers * Fix wrong parameter reference in periodic decorator * Add missing %s in print message * test: Fix wrong override value of config option interface * Modify py3.4 to py3.5 in the py35-functional of tox * Put py34 first in the env order of tox * This adds migrated API reference files * consumes error notif. when event are disabled * publisher: make direct publisher generic * Revert "[install] Create endpoint in one command" * Register the gnocchi event dispatcher * Reduce code duplication * Specify host info when using the notifier publisher * Add Python 3.4 and 3.5 classifiers and targets * Retrieval of RBD device information issue * Install configuration files in etc * Added full support of snmp v3 usm model 7.0.0.0b2 --------- * base.Resource not define \_\_ne\_\_() built-in function * Check lbaas version if call is v2 specific * move out oslo.service * Remove unused AdvEnum class definition * fix l3\_cache definition * throw PollsterPermenantError exception for memory usage meter * events: fix operator check in event filter * Raise PollsterPermanentError if inspector is not implemented * Remove unused LOG * Fix get\_gnocchiclient to work with python3.5 * collector: use an intermediate proxy class for event dispatcher * Replace raw\_input with input to make PY3 compatible * pollsters: Remove eventlet timers * Imported Translations from Zanata * Fix the name as it was a typo * devstack: prefix \_drop\_database * start partition coordinator only when there is a group to join * split discover into different namespaces * devstack: disable workload\_partition for compute polling * Add a tool to clean the legacy alarm tables * devstack: do not wait for service to start * Config: no need to set default=None * sqlalchemy: do not run upgrade on fresh install * sqlalchemy: fix JSONEncodedDict implementation type * Add install-guide for ceilometer * gnocchi: use events to end Gnocchi resource * Don't generate hostname of host running build to config file * Fixing a trivial typo * Add 'task\_state' attribute to meter metadata * add support of batch recording metering data for mongodb * Switch to use glanceclient to get image * Add install description for Neutron * add l3 cache usage meter * doc: remove left over of docbookrestapi * tests: do not override auth version to v2 6.1.0 ----- * dispacher/gnocchi: measures sent fix logging * Copy images\_client from tempest + Correct concurrency of gabbi 1.22.0 * Rename gabbits with \_ to have - instead * Correct concurrency of gabbi tests for gabbi 1.22.0 * Use "topics" instead of "topic" in Notifier initialization * Clean deprecated "rpc\_backend" in tests * Use trusts in Heat integration test * Remove configuration README in etc * Imported Translations from Zanata * Copy images\_client from tempest * Add Magnum events to definitions * [dispatcher/gnocchi] add unit for metric * Delete unused last\_dup variable * catch DriverLoadFailure for get\_transport optional * catch DriverLoadFailure for get\_transport optional * Bump to Nova v2.1 * Fix the py34 jenkins job * tempest: import ImagesClient as ImagesClientV2 * fix some typos in our doc, comments and releasenotes * enable swift pollsters poll data for specific region * enable swift pollsters poll data for specific region * Imported Translations from Zanata * tempest\_plugin: drop telemetry decorator * tempest\_plugin: drop telemetry decorator * Updated from global requirements * [Trivial] Update Neutron resource status list * doc: remove database alarm capability * replace deprecated heat command with OSC * Update to hacking 0.11.0 * Verify message's signature for every dispatcher * fix CI failure due to oslo.messaging 5.0.0 * remove log in tools/make\_test\_{event\_}data.py * fix CI failure due to oslo.messaging 5.0.0 * remove record\_metering\_data method from collector * tests: replace overtest by pifpaf * Add log hints for partition coordinator * fix opts.list\_opts for polling options * update help string for messaging\_urls * Drop timestamping in pollsters * Set the time point polling starts as timestamp of samples * Fix notification listeners usage * tox: only install hacking in pep8 target * Remove unused pylintrc * devstack: remove useless policy\_file setting * event: verify signature before recording events for all dispatchers * tests: stop services on tests teardown * Fix oslo\_service stop/start mechanism * remove floating\_ip\_get\_all in nova\_client * [Trivial] Refactor libvirt inspector connection & uri * Fix concurrency issue with snmp pollsters * Drop the executability of http.py * Updated from global requirements * remove deprecated auth type password-ceilometer-legacy * [Trivial] Update Neutron resource status list * [Trivial] Remove CEILOMETER\_API\_LOG\_DIR option for devstack * Update the default log levels * Clean some unused method in ceilometer/keystone\_client.py * remove invalid todo in storage functional test code * return 400 when invalid aggregation function is specified * Replace logging with oslo\_log * remove deprecated option database\_connection * move EventFilter to event storage namespace * remove MultipleResultsFound and NoResultFound exception * Remove useless file * remove todo for OS\_TEST\_PATH * add tempest to test-requirements.txt * Improve the docstring for Swift pollsters * add log decorator for neutron\_client public method * add debtcollector to requirements * Remove direct dependency on babel * Imported Translations from Zanata * Refactor floatingip pollster to use discovery * Fix notification listeners usage * notification: Remove eventlet timers * use static timestamps for api samples * refactor DefinitionException classes * collector: Don't use eventlet thread * fix openstack cli command in doc manual * Add release note link * switch to openstack cli instead of keystone cli * Updated from global requirements * libvirt: fix missing python-libvirt issue * Add status in Ceilometer VPN connection sample * document how to enable ceilometer stable branch in devstack * remove python-ceilometerclient from requirements * Imported Translations from Zanata * Updated from global requirements * Imported Translations from Zanata * Ignore the filter\_service\_activity option if gnocchi project not found * Fix Ceilometer tests config options * Updated from global requirements * Fix doc build if git is absent * Replace tempest-lib by os-testr * Add notes on moving to Gnocchi * delete verbose/redundant/deprecated text * replace fnmatch with oslo.utils.fnmatch * add ceilometer to gnocchi configuration notes * Updated from global requirements * Imported Translations from Zanata * remove complex capabilities for meter, resource and statistics * gnocchi: batch measurements * change keystone to openstack cli * re-org existing manually install notes * messaging: remove RequestContextSerializer * Remove unused context object in vpnaas test * Remove unused object from lbaas\_v2 test * Remove unused context object lbaas test * test: remove unused context object in FWaaS tests * Remove unused context objects in Glance tests * Remove unused context object in test * Remove a useless usage of oslo.context in meters API * Remove the deprecated DB2 driver * Update the Administrator Guide links * mongo: remove unused function * Updated from global requirements * Imported Translations from Zanata * drop magnetodb support * Simplify chained comparison * Enhancing Retry logic to Coordination when joining partitioning grp * publisher: clean out context usage * Disable ceilometer-aipmi by default for devstack * Remove useless context object usage * Imported Translations from Zanata * Imported Translations from Zanata 6.0.0 ----- * collector: never allow to lose data * 'ceilometer-polling' should fail with no valid pollsters * Imported Translations from Zanata * Fix typos in comments and config strings * Updated from global requirements 6.0.0.0rc2 ---------- * abort alarms URLs when Aodh is unavailable * abort alarms URLs when Aodh is unavailable * fix minor typo in test\_generic.py * Imported Translations from Zanata * Add the functional tests for getting events * collector: never allow to lose data * devstack Fix unprocess measure path * Imported Translations from Zanata * devstack: allow ceilometer-api and keystone to run on different hosts * Devstack: install coordination backend for compute agent * remove dns and trove from entry\_points * correct docstring in storage module * Imported Translations from Zanata * Remove gabbi tests that check content-location * Add http publisher * remove dns and trove from entry\_points * Imported Translations from Zanata * Imported Translations from Zanata * Update reno for stable/mitaka * Update .gitreview for stable/mitaka * Remove gabbi tests that check content-location 6.0.0.0rc1 ---------- * Imported Translations from Zanata * remove unused field 'triggers' defined in sample event\_pipeline.yaml * remove SERVICE\_TENANT\_NAME from devstack plugin * clean devstack plugin * add rc1 release notes * Use assertIn and assertNotIn * core status cleanup * tests: remove ceilometer-api bin test cases * gate: add missing sudo * change dns and trove notifications to declarative * Remove en\_GB translations * register the config generator default hook with the right name * Imported Translations from Zanata * Updated from global requirements * tempest: migrate api and scnario tests from tempest * mitaka-3 release notes * Adjust log levels for InstanceShutOffException * Fix event\_type creationg failure due to race condition * Imported Translations from Zanata * Ignoring cpu measurement when instance's state is SHUTOFF * Add validation for polling\_namespaces option * xenapi: support the session when xenserver is slave * Imported Translations from Zanata * gnocchi dispatch: Added new resource type support * remove wrong "#!/usr/bin/env python" header * Fixed corner cases of incorrect use of oslo.config * Updated from global requirements * timedelta plugin for meter definition process * Cast Int64 values to int, float in statistics * Cache getters for the decalarative definitions 6.0.0.0b3 --------- * [sahara] add events definitions regarding new notifications * Moved CORS middleware configuration into oslo-config-generator * Add the meter example file 'lbaas-v2-meter-definitions.yaml' * Change default policy to allow create\_samples * Enable the Load Balancer v2 events * Remove unused pngmath Sphinx extension * Updated from global requirements * Fix a minor missing parameter issue * close services in test * Add an update interval to compute discovery * Docs: Configure meters/events dispatch separately * Fix the typo in the gnocchiclient exception * Updated from global requirements * Add gnocchi dispatcher opts to config * Change the SERVICE\_TENANT\_NAME to SERVICE\_PROJECT\_NAME * Hyper-V: replaces in-tree hyper-v utils usage with os\_win * Initial seed of hacking * Add /usr/local/{sbin,bin} to rootwrap exec\_dirs * Fix SDR file parsing for Intel Node Manager * Gnocchi: fix ResourcesDefinitionException for py3 * Change LOG.warn to LOG.warning * tests: fix unworking debug output * Adds timestamp option to Aggregation transformer * remove default=None for config options * Replace assertEqual(None, \*) with assertIsNone in tests * Trivial: Cleanup unused conf variables * Enable the Load Balancer v2 for the Ceilometer(Part Two) * Remove unused variable * Enable the Load Balancer v2 for the Ceilometer(Part One) * Fix footnote reference to Aodh in docs * Updated from global requirements * Set None explicitly to filter options * KEYSTONE\_CATALOG\_BACKEND is deprecated * Use overtest to setup functional backends * devstack: Fix Keystone v3 configuration typo * Imported Translations from Zanata * Handle malformed resource definitions gracefully * Update the home page * Skip duplicate meter definitions * set higher batching requirement * use retrying to attempt to rejoin group * network: remove deprecated option name * sample: remove deprecated option name * Fix wrong capitalization * rewriting history * Remove unused pytz requirement * devstack: use password with version discovery * fix tempest path * Updated from global requirements * raise coordination error if not registered * do not configure worker specific items in init * integration-gate: fix publicURL retrieval * rolling upgrades * fix locking in ceilometer * enable notification agent partitioning * better support notification coordination * remove useless notification listener helper * Lookup meter definition fields correctly * Enhances get\_meters to return unique meters * Imported Translations from Zanata * Updated from global requirements * Fix ceilometer floatingip pollster * Updated from global requirements * tempest: migrate base class for tests * tempest: add ceilometer tempest plugin * tempest: add telemetry client manager * tempest: migrate conf.py from tempest tree * tempest: copy telemetry client from tempest tree * Fix events rbac 6.0.0.0b2 --------- * Don't store events with Gnocchi * add additional mitaka-2 release notes * Corrects typo "a other" -> "another" * Updated from global requirements * add release notes for mitaka-2 * devstack: add support for Gnocchi backend * notification: Use oslo.messaging batch listener * Cleanup of Translations * Added CORS support to Ceilometer * Don't set keystonemiddleware cache * Set None explicitly to filter options * Add OSprofiler-specific events definitions * collector: Use oslo.messaging batch listener * Updated from global requirements * Changes aggregator transformer to allow retention\_time w/o size * Replace LOG.warn with LOG.warning * Updated from global requirements * wrong accumulative value of "network.services.lb.incoming.bytes" * Trivial: Remove vim header from source files * Trival: Remove unused logging import * Fix the typos in the source code * gnocchi: fix stack resource type * Misspelling in message * Clean pagination related methods of impl\_mongodb * Fix some typos in the snmp.py * remove local hacking check * [MongoDB] add indexes in event collection * Remove unused code in gnocchi dispatcher * remove unnecessary code * recheck cache after acquired gnocchi\_resource\_lock * collector: remove deprecated RPC code * fix case in function name * Catch the EndpointNotFound in keystoneauth1 than in keystoneclient * Log exception if stevedore fails to load module * Updated from global requirements * Revert "Revert "devstack config for dogpile cache"" * add per resource lock * verify gnocchi connection before processing * [refactor] remove redundant import of options * Added unit test cases for pysnmp 4.3 * Add keystoneauth1 in requirements * gnocchi: fix cache hash logic * gnocchi: use gnocchiclient instead of requests * show queue status on integration test * Updated from global requirements * using a consistent uuid as cache namespace * Duplicate information link for writing agent plugins * Use keystoneauth1 instead of manual setup * Do not mock the memcache interface for auth\_token * oslo.messaging option group/name change for notification topics * Correct the host field of instance metadata * fix the bug that gnocchi dispatcher can't process single sample * Replace stackforge with openstack * MAINTAINERS: remove outdated data 6.0.0.0b1 --------- * Remove version from setup.cfg * add initial release notes * fix functional gate * messaging: stop using RequestContextSerializer * Fix ceilometer-test-event.py script * Deduplicate the code about snmp meter loading * Updated from global requirements * Revert "devstack config for dogpile cache" * Revert "Workaround requests/urllib connection leaks" * add cpu.delta to gnocchi resources * simplify collector cache * Consistent publisher\_id for polling agent * build metric list on init * re-implement thread safe fnmatch * clean up integration test urls * tools: fix default resource metadata for instance * don't pass ceilometer options to oslo.db engine facade * Use str(name) instead of name.prettyPrint() * Reduce code duplication * remove config files when run clean.sh * fix some test case wrongly skipped for mysql backend * Add WebTest to test-requirements.txt * tests: remove testscenario usage for storage drivers * Remove eventlet usage * Remove alarming code * Clarify the doc about multiple notification\_topics usage * Reduced source code by extracting duplicated code * devstack config for dogpile cache * Updated from global requirements * Updated from global requirements * Fix an indent nit of enforce\_limit method * Move the content of ReleaseNotes to README.rst * use common cache * A dogpile cache of gnocchi resources * Updated from global requirements * install database when collector is enabled * Updated from global requirements * Updated from global requirements * add reno for release notes management * Updated from global requirements * Support to get hardware's cpu\_util from snmp * add rohit\_ to MAINTAINERS * gnocchi: set the default archive policy to None * Mv gabbi\_pipeline.yaml into test directories * Factorize yaml loading of declarative stuffs * Factorize field definition of declarative code * Wrong result is returned when call events getting API * tox: use pretty\_tox in most places * Updated from global requirements * avoid unnecessary inner join in get\_resources() for SQL backend * Add sql-expire-samples-only to option list * Updated from global requirements * configure Apache only when ceilometer-api is enabled * Imported Translations from Zanata * avoid using isolation level * unquote resource id to support slash in it * specify runtime environment for scripts * Using oslo-config-generator to instead of generate-config-file.sh * Use gnocchiclient for integration script * Enable signature verification for events * Correct the timestamp type when make test samples data * Updated from global requirements * avoid generate temporary table when query samples * Reject posting sample with direct=true if Gnocchi is enabled * make script under tools directory executable * Updated from global requirements * Added the README.rst in devstack folder * fix tools/make\_test\_event\_data.py * fix image\_ref attr in gnocchi resource * support mysql+pymysql in functional test * Updated from global requirements * Fix snmp pollster to not ignore valid meters * Block oslo.messaging 2.6.1 release * reset policy per test * Remove dependency on sphinxcontrib-docbookrestapi * gnocchi: remove possible ending / in URL * api: simplify root controller * api: simplify Pecan config * remove instance:FLAVOR related code and docs * Do collector setup and storage cleanup for all backends * change collector\_workers to [collector]workers * Enable POST samples API when gnocchi enabled * devstack: fix debug info for Gnocchi * Imported Translations from Zanata * Add Liberty release note link * Fix make\_test\_data.sh * Imported Translations from Zanata * Be explicit when copying files to /etc/ceilometer * Deprecate event trait plugin 'split' * Updated from global requirements * Clean some log messages when polling neutron resources * Simplify the validation of required fields of pipeline source * doc: service enablement not necessary when using Devstack plugin * Skip bad meter definitions instead of erroring out * Remove the unused network\_get\_all method * mark logging.info translation accordingly * logging cleanup * Updated from global requirements * Remove last vestiges of devstack from grenade plugin * Add missing ceilometerclient repo location 5.0.0 ----- * Imported Translations from Zanata * Fix for resource polling warnings * SQL: Fix event-list with multiple trait query filters * Fix the bug of "Error spelling of a word" * Imported Translations from Zanata * SQL: Fix event-list with multiple trait query filters * Fix a mistake in a test * Configure collector to only record meter or event * Rename list\_events tests to list\_samples tests * fix elasticsearch script reference * Fix the deprecation note in meter.yaml * Fix the deprecation note in meter.yaml * Remove deprecated archive policy map for Gnocchi * Remove enable\_notification.sh * Parametrize table\_prefix\_separator in hbase * Imported Translations from Zanata * fix typo in storage/impl\_sqlalchemy * devstack: install all configuration files from etc/ * dispatcher: remove deprecated CADF code in HTTP * mongodb: remove deprecated replica\_set support * Ensure the test data sample has correct signature * Open Mitaka development 5.0.0.0rc1 ---------- * gnocchi: Don't raise NotImplementedError * Add missing meter and exchange opts * Imported Translations from Zanata * Add test to cover history rule change * Workaround requests/urllib connection leaks * integration tests: additional debugging infos * Coordinator handles ToozError when joining group * Don't create neutron client at loadtime * Delete its corresponding history data when deleting an alarm * update event filter test to validate multiple trait args * Fix variable typos * Updated from global requirements * Change ignore-errors to ignore\_errors * Fix reconnecting to libvirt * remove batch processing requirement from arithmetic transformer * Cleanup empty dirs from tests * retain existing listeners on refresh * Override dispatcher option for test\_alarm\_redirect\_keystone * [ceilometer] Update links to Cloud Admin Guide * Adds support for dynamic event pipeline * Updated from global requirements * Imported Translations from Zanata * pollster/api now publish to sample queue * tox: generate config file on test run * tox: Allow to pass some OS\_\* variables * Refactor keystone handling in discovery manager * Use make\_sample\_from\_instance for net-pollster * apply limit constraint on storage base interface * gnocchi: add two new resources * Fixed tox -egenconfig Error * Add declarative meters to developer docs * add delta transfomer support * do not recreate main queue listeners on partitioning * Validate required fields in meter definition * deprecate cadf\_only http dispatcher * Fix the heavy time cost of event-list * Update API Doc to deprecate the alarming part * Deprecate config options of the old alarming functionality * update architecture documentation * Add attribute 'state' to meter metadata when source is polling * doc: update devstack usage * Remove useless base class * Split out image non-meters * Make the gabbi tox target work with modern tox * Avoid 500 errors when duplicating limit queries * Correct test\_list\_meters\_meter\_id to work with py3 * Updated from global requirements * Update event\_definitions for Cinder Image Cache * Update install docs * Use b64encode to replace of encodestring * Prevent ceilometer expirer from causing deadlocks * remove duplicate log exception message * Spelling mistake of comment in api/controllers/v2/query.py * Fix typos in gnocchi.py and converter.py * Updated from global requirements * Updated from global requirements * Add a py34-functional tox target * doc: update notification\_driver * polling: remove deprecated agents * Fix string in limit warning * Typo fixing * missed entrypoint for nova\_notifier removal * Imported Translations from Transifex * Fix links in README.rst * integration: Add debugging information * deprecate db2 nosql driver * devstack: add new option to support event-alarm * Sync devstack plugin with devstack:lib/ceilometer * Updated from global requirements * remove old nova\_notifier processing code 5.0.0.0b3 --------- * restrict admin event access * Migrate the old snmp pollsters to new declarative pollster * Support to load pollsters extensions at runtime * Added snmp declarative hardware pollster * Requeuing event with workload\_partitioning on publish failure * Event filtering for non-admin users * integration: fix typo * gnocchi: cleanup instance resource definition * Updated from global requirements * Adding pradk to MAINTAINERS * Adding liusheng to MAINTAINERS * Add index to metadata\_hash column of resource table * Incorrect Links are updated * Removing unused dependency: discover * Use new location of subunit2html * Change tox default targets for quick use * Fixed identity trust event types * gnocchi: quote the resource\_id in url * fix metadata for compute cpu notifications * support custom metadata * Move profiler meters to yaml * Control Events RBAC from policy.json * Events RBAC needs scoped token * make telemetry sample payloads dictionaries * Fix requeue process on event handling error * allow configurable pipeline partitioning * Keep the instance\_type meta from polling and notification consistent * Add user\_id,project\_id traits to audit events * Change json path's to start with $. for consistency * Add validation tests for arithmetic, string and prefix expressions * Fix description for "Inapt spelling of 'MongoDB'" * Create conf directory during devstack install phase * support custom timestamp * Add cpu meters to yaml * Fix description for "Incorrect spelling of a word" * integration: add some new tests * Fix disable\_non\_metric\_meters referencing * Update tests to reflect WSME 0.8 fixes * remove jsonpath-rw requirement * Do not use system config file for test * gnocchi: move to jsonpath\_rw\_ext * Updated from global requirements * Allow to run debug tox job for functional tests * Use jsonpath\_rw\_ext for meter/event definitions * preload jsonpath\_rw parsers * integration test: adjusts timeout * integration test: failfast * Updated from global requirements * Avoid recording whole instance info in log * Fix dependency for doc build * Mark record\_type in PaaS Event Format doc as optional * full multi-meter support * add flexible grouping key * Corrected test\_fallback\_meter\_path test case * Add hypervisor inspector sanity check * handle list payloads in notifications * xenapi: support the session to "unix://local" * Introduce Guru Meditation Reports into Ceilometer * Use start status of coodinator in tooz * Fixed event requeuing/ack on publisher failure * Implement consuming metrics from Magnum * Avoid from storing samples with empty or not numerical volumes * use union all when building trait query * Fixed spelling error, retreive -> retrieve * Use min and max on IntOpt option types * Update install docs with gnocchi dispatcher info * Make it possible to run postgresql functional job * Revert "Remove version from os\_auth\_url in service\_credentials" * Updated from global requirements * Use oslo\_config PortOpt support * integration: chown ceilometer directory properly * add mandatory limit value to complex query list * add test to validate jsonpath * Remove version from os\_auth\_url in service\_credentials * do not translate debug logs * Updated from global requirements * Grenade plugin using devstack plugin for ceilometer * remove alembic requirement * Convert instance, bandwidth and SwiftMiddleware meters * Change and move the workers options to corresponding service section * Drop the downgrade function of migration scripts * start rpc deprecation * support multiple-meter payloads * add poll history to avoid duplicate samples * Add Kilo release note reference * initialise opencontrail client in tests * Make ConnectionRetryTest more reliable * Correct thread handling in TranslationHook * Updated from global requirements * Correctly intialized olso config fixture for TestClientHTTPBasicAuth * Don't start up mongodb for unit test coverage * disable non-metric meter definitions * Cast Int64 values to float * Convert identity, sahara and volume to meters yaml * Enable entry points for new declarative meters * Fix for rgw still throwing errors * group pollsters by interval * Revert "Revert "remove instance: meter"" * api: fix alarm deletion and update * Fixes the kafka publisher * Sync devstack plugin with devstack:lib/ceilometer * integration: use the right user in gate * Imported Translations from Transifex * Initial separating unit and functional tests * Stop using openstack.common from keystoneclient * minimise scope of hmac mocking * Updated from global requirements * gnocchi: retry with a new token on 401 * Fix some gabbi tests * Improve comments in notification.py * mongo: fix last python3 bugs * postgres isolation level produces inconsistent reads * Masks messaging\_urls in logs during debug mode * Corrected unit of snmp based harware disk and memory meters * Provide base method for inspect\_memory\_resident * Fix Python 3 issue in opendaylight client * Fix more tests on Python 3 * Remove the compute inspector choice restriction * [MongoDB] Refactor indexes for meter and resources * tests: add an integration test * Fix WSGI replacement\_start\_response() on Python 3 * gnocchi: reduce the number of patch to gnocchi API * Make the partition coordinator log more readable * Drop out-of-time-sequence rate of change samples 5.0.0.0b2 --------- * [MongoDB] Use a aggregate pipeline in statistics * Instance Cache in Node Discovery Pollster * Instance Caching * Imported Translations from Transifex * fix gnocchi resources yaml * Import the api opt group in gabbi fixture * Add a batch\_polled\_samples configuration item * Remove redundant comma * storage: deprecates mongodb\_replica\_set option * Improves send\_test\_data tools * Replace isotime() with utcnow() and isoformat() * distributed coordinated notifications * Imported Translations from Transifex * Close and dispose test database setup connections * Updated from global requirements * api: Redirect request to aodh if available * api: return 410 if only Gnocchi is enabled * Fix broken IPMI agent * add mandatory limit value to meter list * add mandatory limit value to resource list * add mandatory limit value to event list * Move gnocchi resources definition in yaml file * Send a notification per sample, do not batch * Handles dns.domain.exists event in Ceilometer * Pollsters now send notifications without doing transforms * Imported Translations from Transifex * Switch to the oslo\_utils.fileutils * Updated from global requirements * Use choices for hypervisor\_inspector option * The product name Vsphere should be vSphere * Add necessary executable permission * Store and restore the xtrace option in devstack plugin * gnocchi: Remove useless resources patching * add Trove(DBaaS) events * Set conf.gnocchi\_dispatcher.url explicitly in tests * Declarative meters support * Stop the tests if backend hasn't started * Delay the start of the collector until after apache restart * Clean the re-implemented serializers in Ceilometer * monkey\_patch thread in tests * make notifier default event publisher * Fix gnocchi DispatcherTest tests * Sort metric data before grouping and processing * Namespace functions in devstack plugin * Added valid values of operator to response body * gnocchi: fixes the instance flavor type * gnocchi dispatcher: fix typo in stevedore endpoint * Imported Translations from Transifex * Tolerate alarm actions set to None * Make ceilometer work correctly when hosted with a SCRIPT\_NAME * Implementation of dynamically reloadable pipeline * fix log msg typo in api utils * Updated from global requirements * Add documentation about the usage of api-no-pipline * drop deprecated pipeline * Improve doc strings after changing method for index creation * set default limit to meter/sample queries * collector: fix test raising error * Remove test-requirements-py3.txt * remove unused event query * Create a devstack plugin for ceilometer * Add support for posting samples to notification-agent via API * restore long uuid data type * Revert "Add support for posting samples to notification-agent via API" * Update alarm history only if change in alarm property * test error log - catch dummy error * fix kafka tests from flooding logs * catch warnings from error tests * remove unused notifier * Add support for posting samples to notification-agent via API * Stop dropping deprecated tables while upgrade in mongodb and db2 * Add handler of sample creation notification * Remove the unused get\_targets method of plugin base * Replaces methods deprecated in pymongo3.0 * add oslo.service options * Restricts pipeline to have unique source names * drop use of oslo.db private attribute * Fix oslo.service configuration options building * Add fileutils to openstack-common.conf * disable non-metric meters 5.0.0.0b1 --------- * Remove unnecessary executable permission * Imported Translations from Transifex * Switch to oslo.service * Remove unnecessary wrapping of transformer ExtentionManager * Port test\_complex\_query to Python 3 * Fix expected error message on Python 3 * Fix usage of iterator/list on Python 3 * Replaces ensure\_index for create\_index * pip has its own download cache by default * For sake of future python3 encode FakeMemcache hashes * Make acl\_scenarios tests' keystonemiddleware cache work flexibly * Update version for Liberty * Gnocchi Dispatcher support in Ceilometer 5.0.0a0 ------- * Updated from global requirements * Fix alarm rest notifier logging to include severity * Remove useless execute bit on rst file * Fix unicode/bytes issues in API v2 tests * Fix script name in tox.ini for Elasticsearch * Fix the meter unit types to be consistent * tests: use policy\_file in group oslo\_policy * Fix publisher test\_udp on Python 3 * Fix Ceph object store tests on Python 3 * Port IPMI to Python 3 * Port middleware to Python 3 * [elasticsearch] default trait type to string * Updated from global requirements * Lower down the range for columns which are being used as uuid * Sync with latest oslo-incubator * Fix testing of agent manager with tooz * Remove deprecated Swift middleware * add DNS events * Handle database failures on api startup * Fix more tests on Python 3 * Switch to using pbr's autodoc capability * Remove old oslo.messaging aliases * Remove useless versioninfo and clean ceilometer.conf git exclusion * Register oslo\_log options before using them * Add running functional scripts for defined backend * Remove snapshot.update events as they are not sent * WSME version >=0.7 correctly returns a 405 * TraitText value restricted to max length 255 * Cause gabbi to skip on no storage sooner * Updated from global requirements * Move eventlet using commands into own directory * adjust alarm post ut code to adapt to upstream wsme * Disable rgw pollster when aws module not found * Fixes DiskInfoPollster AttributeError exception * remove useless log message * use oslo.log instead of oslo-incubator code * Port test\_inspector to Python 3 * Fix usage of dictionary methods on Python 3 * Imported Translations from Transifex * Add oslo.vmware to Python 3 test dependencies * Optionally create trust for alarm actions * Remove iso8601 dependency * Enable test\_swift\_middleware on Python 3 * Enable more tests on Python 3 * Skip hbase tests on Python 3 * Clear useless exclude from flake8 ignore in tox * Remove pagination code * Stop importing print\_function * Remove useless release script in tools * Remove useless dependency on posix\_ipc * Remove exceute bit on HTTP dispatcher * Remove oslo.messaging compat from Havana * Fixing event types pattern for Role Noti. handler * Mask database.event\_connection details in logs * Switch from MySQL-python to PyMySQL * Python 3: replace long with int * Python 3: Replace unicode with six.text\_type * Python 3: generalize the usage of the six module * Update Python 3 requirements * Python 3: set \_\_bool\_\_() method on Namespace * Python 3: encode to UTF-8 when needed * Python 3: sort tables by their full name * Python 3: replace sys.maxint with sys.maxsize * Initial commit for functional tests * Update a test to properly anticipate HTTP 405 for RestController * proposal to add Chris Dent to Ceilometer core * rebuild event model only for database writes * cleanup problem events logic in event db storage * fix incorrect docstring for dispatcher * Imported Translations from Transifex * api: record severity change in alarm history * VMware: verify vCenter server certificate * Add hardware memory buffer and cache metrics * Make interval optional in pipeline * Improve ceilometer-api install documentation * empty non-string values are returned as string traits * Trait\_\* models have incorrect type for key * small change to development.rst file * Drop use of 'oslo' namespace package * [unittests] Increase agent module unittests coverage * stop mocking os.path in test\_setup\_events\_default\_config * Remove py33 tox target * made change to mod\_wsgi.rst file * ensure collections created on upgrade * Fix raise error when run "tox -egenconfig" * Updated from global requirements * Fix None TypeError in neutron process notifications 2015.1.0 -------- * Have eventlet monkeypatch the time module * Have eventlet monkeypatch the time module * Add the function of deleting alarm history * Updated from global requirements * Fix valueerror when ceilometer-api start * Override gnocchi\_url configuration in test * Move ceilometer/cli.py to ceilometer/cmd/sample.py * Fix valueerror when ceilometer-api start * remove deprecated partitioned alarm service * use message id to generate hbase unique key * gnocchi: fix typo in the aggregation endpoint * Release Import of Translations from Transifex * Fix Copyright date in docs * Replace 'metrics' with 'meters' in option and doc * use message id to generate hbase unique key * update .gitreview for stable/kilo * gnocchi: fix typo in the aggregation endpoint * broadcast data to relevant queues only * Imported Translations from Transifex * fix combination alarm with operator == 'or' * Updated from global requirements 2015.1.0rc1 ----------- * proposal to add ZhiQiang Fan to Ceilometer core * Open Liberty development * Fix a samples xfail test that now succeeds * Cosmetic changes for system architecture docs * Fix a issue for kafka-publisher and refactor the test code * pymongo 3.0 breaks ci gate * use oslo.messaging dispatch filter * Further mock adjustments to deal with intermittent failure * Adds support for default rule in ceilometer policy.json * Updated from global requirements * limit alarm actions * Use oslo\_vmware instead of deprecated oslo.vmware * Remove 'samples:groupby' from the Capabilities list * Use old name of 'hardware.ipmi.node.temperature' * Revert "remove instance: meter" * Tweak authenticate event definition * Add project and domain ID to event definition for identity CRUD * Fix the event type for trusts * reset croniter to avoid cur time shift * Imported Translations from Transifex * Avoid a error when py27 and py-mysql tests run in sequence * Stop using PYTHONHASHSEED=0 in ceilometer tests * remove instance: meter * Added ipv6 support for udp publisher * Remove the unnecessary dependency to netaddr * Optimize the flow of getting pollster resources * support ability to skip message signing * Avoid conflict with existing gnocchi\_url conf value * Using oslo.db retry decorator for sample create * alarm: Use new gnocchi aggregation API * collector: enable the service to listen on IPv6 * minimise the use of hmac * Typo in pylintrc * Ceilometer retrieve all images by 'all-tenants' * fix incorrect key check in swift notifications * support disabling profiler and http meters * ensure collections created on upgrade * Fix common misspellings * Updated from global requirements * refuse to post sample which is not supported * Enable collector to requeue samples when enabled * drop deprecated novaclient.v1\_1 * exclude precise metaquery in query field 2015.1.0b3 ---------- * Imported Translations from Transifex * remove log message when process notification * Add gabbi tests for resources * Fix typos and format in docstrings in http dispatcher * add ability to dispatch events to http target * doc: fix class name * add ability to publish to multiple topics * make field and value attributes mandatory in API Query * Fix db2 upgrade in multi-thread run issue * Add memory.resident libvirt meter for Ceilometer * Update reference * Check the namespaces duplication for ceilometer-polling * Add gabbi tests to explore the Meter and MetersControllers * Imported Translations from Transifex * mysql doesn't understand intersect * order traits returned within events * add network, kv-store, and http events * Add support for additional identity events * Add a Kafka publisher as a Ceilometer publisher * Fix response POST /v2/meters/(meter\_name) to 201 status * Attempt to set user\_id for identity events * Switch to oslo.policy 0.3.0 * normalise timestamp in query * Add more power and thermal data * Updated from global requirements * Fix formatting error in licence * Added option to allow sample expiration more frequently * add option to store raw notification * use mongodb distinct * remove event\_types ordering assumption * Add gabbi tests to cover the SamplesController * api: fix alarm creation if time\_constraint is null * fix log message format in event.storage.impl\_sqlalchemy * Remove duplications from docco * Tidy up clean-samples.yaml * Fix a few typos in the docs * use default trait type in event list query * fix wrong string format in libvirt inspector * create a developer section and refactor * Do not default pecan\_debug to CONF.debug * Adding Gabbi Tests to Events API * fix config opts in objectstore.rgw * Updated from global requirements * support time to live on event database for sql backend * add an option to disable non-metric meters * add missing objectstore entry points * Initial gabbi testing for alarms * reorganise architecture page * Add ceph object storage meters * Use oslo\_config choices support * fix inline multiple assignment * alarming: add gnocchi alarm rules * Protect agent startup from import errors in plugins * Revert "Add ceph object storage meters" * api: move alarm rules into they directory * compress events notes * Destroy fixture database after each gabbi TestSuite * Fix unittests for supporting py-pgsql env * Adding links API and CLI query examples * correct column types in events * Be explicit about using /tmp for temporary datafiles * Patch for fixing hardware.memory.used metric * Add ceph object storage meters * [PostgreSQL] Fix regexp operator * Add clean\_exit for py-pgsql unit tests * modify events sql schema to reduce empty columns * Remove duplicated resource when pollster polling * check metering\_connection attribute by default * unicode error in event converter * cleanup measurements page * api: add missing combination\_rule field in sample * Fix test case of self-disabled pollster * update event architecture diagram * use configured max\_retries and retry\_interval for database connection * Updated from global requirements * Making utilization the default spelling * Add Disk Meters for ceilometer * correctly leave group when process is stopped * Updated from global requirements * enable oslo namespace check for ceilometer project * Add doc for version list API * Enabling self-disabled pollster * Use werkzeug to run the developement API server * Imported Translations from Transifex * switch to oslo\_serialization * move non-essential libs to test-requirements * Validate default values in config * fix the value of query\_spec.maxSample to advoid to be zero * clean up to use common service code * Add more sql test scenarios * [SQLalchemy] Add regex to complex queries * Fix duplication in sinks names * metering data ttl sql backend breaks resource metadata * Refactor unit test code for disk pollsters * start recording error notifications * Remove no\_resource hack for IPMI pollster * Add local node resource for IPMI pollsters * Use stevedore to load alarm rules api * [MongoDB] Add regex to complex queries * Imported Translations from Transifex * support time to live on event database for MongoDB 2015.1.0b2 ---------- * split api.controllers.v2 * add elasticsearch events db * use debug value for pecan\_debug default * Shuffle agents to send request * Updated from global requirements * Adds disk iops metrics implementation in Hyper-V Inspector * discovery: allow to discover all endpoints * Declarative HTTP testing for the Ceilometer API * add listener to pick up notification from ceilometermiddleware * Drop deprecated namespace for oslo.rootwrap * remove empty module tests.collector * Add disk latency metrics implementation in Hyper-V Inspector * add event listener to collector * add notifier publisher for events * enable event pipeline * Imported Translations from Transifex * deprecate swift middleware * sync oslo and bring in versionutils * Expose alarm severity in Alarm Model * Hyper-V: Adds memory metrics implementation * Remove mox from requirements * Fix IPMI unit test to cover different platforms * adjust import group order in db2 ut code * add event pipeline * remove unexistent module from doc/source/conf.py * Upgrade to hacking 0.10 * Remove the Nova notifier * Remove argparse from requirements * [MongoDB] Improves get\_meter\_statistics method * Fix docs repeating measuring units * [DB2 nosql] Create TIMESTAMP type index for 'timestamp' field * remove pytidylib and netifaces from tox.ini external dependency * Avoid unnecessary API dependency on tooz & ceilometerclient * Correct name of "ipmi" options group * Fix Opencontrail pollster according the API changes * enable tests.storage.test\_impl\_mongodb * Remove lockfile from requirements * Disable eventlet monkey-patching of DNS * Expose vm's metadata to metrics * Adding build folders & sorting gitignore * Disable proxy in unit test case of test\_bin * Add Event and Trait API to document * Refactor ipmi agent manager * Use alarm's evaluation periods in sufficient test * Use oslo\_config instead of deprecated oslo.config * Avoid executing ipmitool in IPMI unit test * Updated from global requirements * Add a direct to database publisher * Fixed MagnetoDB metrics title * Imported Translations from Transifex * Fix incorrect test case name in test\_net.py * Updated from global requirements * notification agent missing CONF option * switch to oslo\_i18n * Use right function to create extension list for agent test * Imported Translations from Transifex * Add an exchange for Zaqar in profiler notification plugin * Remove unused pecan configuration options * Updated from global requirements * Use oslo\_utils instead of deprecated oslo.utils * Match the meter names for network services * stop using private timeutils attribute * Update measurement docs for network services * Catch exception when evaluate single alarm * Return a meaningful value or raise an excpetion for libvirt * Imported Translations from Transifex * make transformers optional in pipeline * Added metering for magnetodb * Add release notes URL for Juno * Fix release notes URL for Icehouse * remove unnecessary str method when log messages * Revert "Remove Sphinx from py33 requirements" * untie pipeline manager from samples * reset listeners on agent refresh * Remove inspect\_instances method from virt * Optimize resource list query * Synchronize Python 3 requirements * Remove unnecessary import\_opt|group * Add test data generator via oslo messaging * Check to skip to poll and publish when no resource * Add oslo.concurrency module to tox --env genconfig * add glance events * add cinder events * Manual update from global requirements * Add cmd.polling.CLI\_OPTS to option list * Ignore ceilometer.conf * Switch to oslo.context library 2015.1.0b1 ---------- * Revert "Skip to poll and publish when no resources found" * Added missing measurements and corrected errors in doc * Remove Sphinx from py33 requirements * Clean up bin directory * Improve tools/make\_test\_data.sh correctness * ensure unique pipeline names * implement notification coordination * Make methods static where possible (except openstack.common) * Fix docs to suit merged compute/central agents concept * Drop anyjson * Move central agent code to the polling agent module * RBAC Support for Ceilometer API Implementation * [SQLalchemy] Add groupby ability resource\_metadata * Improve links in config docs * Make LBaaS total\_connections cumulative * remove useless looping in pipeline * Encompassing one source pollsters with common context * Modify tests to support ordering of wsme types * Make compute discovery pollster-based, not agent-level * Add docs about volume/snapshot measurements * Port to graduated library oslo.i18n * Retry to connect database when DB2 or mongodb is restarted * Updated from global requirements * Standardize timestamp fields of ceilometer API * Workflow documentation is now in infra-manual * Add alarm\_name field to alarm notification * Updated from global requirements * Rely on VM UUID to fetch metrics in libvirt * Imported Translations from Transifex * Initializing a longer resource id in DB2 nosql backend * Sync oslo-incubator code to latest * ensure unique list of consumers created * fix import oslo.concurrency issue * Add some rally scenarios * Do not print snmpd password in logs * Miniscule typo in metering\_connection help string * add http dispatcher * [MongoDB] Add groupby ability on resource\_metadata * [MongoDB] Fix bug with 'bad' chars in metadatas keys * Override retry\_interval in MongoAutoReconnectTest * Exclude tools/lintstack.head.py for pep8 check * Add encoding of rows and qualifiers in impl\_hbase * Database.max\_retries only override on sqlalchemy side * Support to capture network services notifications * Internal error with period overflow * Remove Python 2.6 classifier * Enable pep8 on ./tools directory * Imported Translations from Transifex * Fixes Hyper-V Inspector disk metrics cache issue * fix swift middleware parsing * Fix order of arguments in assertEqual * Updated from global requirements * Adapting pylint runner to the new message format * Validate AdvEnum & return an InvalidInput on error * add sahara and heat events * add keystone events to definitions * Add timeout to all http requests * [MongoDB] Refactor time to live feature * transform samples only when transformers exist * Updated from global requirements * Remove module not really used by Ceilometer * Switch to oslo.concurrency * Skip to poll and publish when no resources found * Change event type for identity trust notifications * Add mysql and postgresql in tox for debug env * Add new notifications types for volumes/snapshots * Add encoding to keys in compute\_signature * Tests for system and network aggregate pollsters * Add bandwidth to measurements * Fix wrong example of capabilities * Correct the mongodb\_replica\_set option's description * Alarms listing based on "timestamp" * Use 'pg\_ctl' utility to start and stop database * Correct alarm timestamp field in unittest code * Refactor kwapi unit test * Remove duplicated config doc * VMware: Enable VMware inspector to support any port * Clean event method difinition in meter storage base * Fix some nits or typos found by chance * Add Sample ReST API path in webapi document * Enable filter alarms by their type * Fix storage.hbase.util.prepare\_key() for 32-bits system * Add event storage for test\_hbase\_table\_utils * Add per device rate metrics for instances * Fix hacking rule H305 imports not grouped correctly * Add \_\_repr\_\_ method for sample.Sample * remove ordereddict requirement * Improve manual.rst file * Imported Translations from Transifex * Fix columns migrating for PostgreSQL * Updated from global requirements * Updated from global requirements * [MongoDB] Fix bug with reconnection to new master node * Updated from global requirements * support request-id * Update coverage job to references correct file * remove reference to model in migration * Use oslo\_debug\_helper and remove our own version * Allow collector service database connection retry * refresh ceilometer architecture documentation * Edits assert methods * Adds memory stats meter to libvirt inspector * Edits assert methods * Edits assert methods * Edits assert methods * Edits assert method * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * add script to generate test event data * Handle poorly formed individual sensor readings * refactor hbase storage code * Avoid clobbering existing class definition * Hoist duplicated AlarmService initialization to super * Clarify deprecation comment to be accurate * Work toward Python 3.4 support and testing 2014.2 ------ * Fix recording failure for system pollster * sync and clean up oslo * Add missing notification options to the documentation * Add missing alarm options to the documentation * Add oslo.db to config generator * Add missed control exchange options to the documentation * Add coordination related options to the documentation * Add missing collector options to the documentation * switch to oslo-config-generator * Edit docs for docs.opentack.org/developer/ * Add oslo.db to config generator * Fix signature validation failure when using qpid message queue * clean capabilities * move db2 and mongo driver to event tree * move sql event driver to event tree * move hbase event driver to event tree * Sets default encoding for PostgreSQL testing * update database dispatcher to use events db * Add role assignment notifications for identity * add mailmap to avoid dup of authors * Add user\_metadata to network samples * Fix recording failure for system pollster 2014.2.rc2 ---------- * Manually updated translations * Updated from global requirements * Creates one database per sql test * Adds pylint check for critical error in new patches * Fix neutron client to catch 404 exceptions * Fix OrderedDict usage for Python 2.6 * Include a 'node' key and value in ipmi metadata * clean path in swift middleware * Implement redesigned separator in names of columns in HBase * [HBase] Add migration script for new row separate design * Imported Translations from Transifex * Include a 'node' key and value in ipmi metadata * Updated from global requirements * Run unit tests against PostgreSQL * create skeleton files for event storage backends * Imported Translations from Transifex * isolate event storage models * Fix neutron client to catch 404 exceptions * Run unit tests against MySQL * Updated from global requirements * Correct JSON-based query examples in documentation * Open Kilo development * Add cfg.CONF.import\_group for service\_credentials * Fix OrderedDict usage for Python 2.6 * clean path in swift middleware 2014.2.rc1 ---------- * Partition static resources defined in pipeline.yaml * Per-source separation of static resources & discovery * dbsync: Acknowledge 'metering\_connection' option * Fix bug in the documentation * Use oslo.msg retry API in rpc publisher * Describe API versions * Change compute agent recurring logs from INFO to DEBUG * Fix bug with wrong bool opt value interpolation * [HBase] Improves speed of unit tests on real HBase backend * Imported Translations from Transifex * Removed unused abc meta class * update references to auth\_token middleware * clean up swift middleware to avoid unicode errors * [HBase] Catch AlreadyExists error in Connection upgrade * Use None instead of mutables in method params default values * Updated from global requirements * Enable to get service types from configuration file * test db2 driver code * Docs: Add description of pipeline discovery section * Typo "possibilites" should be "possibilities" * Modified docs to update DevStack's config filename * Add an API configuration section to docs * Tune up mod\_wsgi settings in example configuration * Allow pecan debug middleware to be turned off * Provide \_\_repr\_\_ for SampleFilter * Eliminate unnecessary search for test cases * Switch to a custom NotImplementedError * minimise ceilometer memory usage * Partition swift pollster resources by tenant * Add IPMI pollster * Add IPMI support * Stop using intersphinx * Use central agent manager's keystone token in discoveries * Handle invalid JSON filters from the input gracefully * Sync jsonutils for namedtuple\_as\_object fix * ceilometer spamming syslog * Timestamp bounds need not be tight (per ceilometer 1288372) * Allow to pass dict from resource discovery * fix network discovery meters * switch to sqlalchemy core * Imported Translations from Transifex * Improve the timestamp validation of ceilometer API * Update docs with Sahara notifications configuration * Migrate the rest of the central agent pollsters to use discoveries * Add documentation for implemented identity meters * Fix tests with testtools>=0.9.39 * Document the standard for PaaS service notifications * Returns 401 when unauthorized project access occurs * Adding another set of hardware metrics * normalise resource data 2014.2.b3 --------- * warn against sorting requirements * Add validate alarm\_actions schema in alarm API * Fix help strings * Imported Translations from Transifex * Switch partitioned alarm evaluation to a hash-based approach * Central agent work-load partitioning * collector: Allows to requeue a sample * Typo fixed * Switch to oslo.serialization * Document pipeline publishers configuration * Alarm: Use stevedore to load the service class * Enhance compute diskio tests to handle multi instance * Adding comparison operators in query for event traits * XenAPI support: Update measurements documentation * update requirements * add documentation for setting up api pipeline * Permit usage of notifications for metering * XenAPI support: Disk rates * XenAPI support: Changes for networking metrics * XenAPI support: Memory Usage * XenAPI support: Changes for cpu\_util * XenAPI support: List the instances * Rebase hardware pollsters to use new inspector interface * Switch to use oslo.db * Remove oslo middleware * Adding quotas on alarms * Add an exchange for Trove in profiler notification plugin * Simplify chained comparisons * In-code comments should start with \`#\`, not with \`"""\` * Remove redundant parentheses * skip polls if service is not registered * re-add hashseed to avoid gate error * Switch to oslo.utils * Switch to oslotest * Handle sqlalchemy connection strings with drivers * Rewrite list creation as a list literal * Rewrite dictionary creation as a dictionary literal * Triple double-quoted strings should be used for docstrings * Add upgrading alarm storage in dbsync * Improving of configuration.rst * Fix typos in transformer docstrings * Update tox.ini pep8 config to ignore i18n functions * Added new hardware inspector interface * compute: fix wrong test assertion * sync olso-incubator code * VMware: Support secret host\_password option * refactor filter code in sql backend * Support for per disk volume measurements * Use a FakeRequest object to test middleware * Imported Translations from Transifex * Improve api\_paste\_config file searching * [Hbase] Add column for source filter in \_get\_meter\_samples * Issue one SQL statement per execute() call * Allow tests to run outside tox * [HBase] Refactor hbase.utils * Set page size when Glance API request is called * Adding init into tools folder * Enhancing the make\_test\_data script * correct DB2 installation supported features documentation * Avoid duplication of discovery for multi-sink sources * Improve performance of libvirt inspector requests * Documented Stevedore usage and source details * Add notifications for identity authenticate events * Add message translate module in vmware inspector * Handle Cinder attach and detach notifications * [HBase] Improve uniqueness for row in meter table * Doc enhancement for API service deployment with mod\_wsgi * Update documentation for new transformer * Add the arithmetic transformer endpoint to setup.cfg * Imported Translations from Transifex * Fix unit for vpn connection metric * Debug env for tox * Change spelling mistakes * Use auth\_token from keystonemiddleware * Fix dict and set order related issues in tests * Fix listener for update.start notifications * Sahara integration with Ceilometer * Add notifications for identity CRUD events * Extracting make\_resource\_metadata method * Fix make\_test\_data tools script * Add cumulative and gauge to aggregator transformer * Enable some tests against py33 * Remove --tmpdir from mktemp * Replace dict.iteritems() with six.iteritems(dict) * Replace iterator.next() with next(iterator) * Fix aggregator flush method * Automatic discovery of TripleO Overcloud hardware * Set python hash seed to 0 in tox.ini * Don't override the original notification message * Remove ConnectionProxy temporary class * Move sqlalchemy alarms driver code to alarm tree * basestring replaced with six.string\_types * Correct misspelled words 2014.2.b2 --------- * Add retry function for alarm REST notifier * Move hbase alarms driver code to alarm tree * Update measurement docs for FWaaS * Update measurement docs for VPNaaS * Follow up fixes to network services pollsters * Updated from global requirements * Implement consuming ipmi notifications from Ironic * Support for metering FWaaS * Adds Content-Type to alarm REST notifier * Multi meter arithmetic transformer * Remove redudent space in doc string * Use None instead of mutables in test method params defaults * Add support for metering VPNaaS * Use resource discovery for Network Services * Change of get\_events and get\_traits method in MongoDB and Hbase * Fix two out-dated links in doc * Move log alarms driver code to alarm tree * Separate the console scripts * clean up event model * improve expirer performance for sql backend * Move mongodb/db2 alarms driver code to alarm tree * Allow to have different DB for alarm and metering * Replace datetime of time\_constraints by aware object * Sync oslo log module and its dependencies * Use hmac.compare\_digest to compare signature * Add testcase for multiple discovery-driven sources * Fixes aggregator transformer timestamp and user input handling * Improves pipeline transformer documentation * Fix incorrect use of timestamp in test * Add keystone control exchange * Fix call to meter-list in measurements doc * Remove redundant parentheses * [Mongodb] Implement events on Mongodb and DB2 * Fix typos in code comments & docstrings * Make the error message of alarm-not-found clear * Fix SQL exception getting statitics with metaquery * Remove docutils pin * update default\_log\_levels set by ceilometer * Fix annoying typo in partition coordinator test * Transform sample\_cnt type to int * Remove useless sources.json * Fix H405 violations and re-enable gating * Fix H904 violations and re-enable gating * Fix H307 violations and re-enable gating * Fix the section name in CONTRIBUTING.rst * Added osprofiler notifications plugin * Improve a bit performance of Ceilometer * Revert "Align to openstack python package index mirror" * Fix aggregator \_get\_unique\_key method * Remove meter hardware.network.bandwidth.bytes * Fix F402 violations and re-enable gating * Fix E265 violations and re-enable gating * Fix E251 violations and re-enable gating * Fix E128 violations and re-enable gating * Fix E126,H104 violations and re-enable gating * Bump hacking to 0.9.x * Fixed various import issues exposed by unittest * use urlparse from six * clean up sample index * Fix HBase available capabilities list * Updated from global requirements * VMware:Update the ceilometer doc with VMware opts * Handle non-ascii character in meter name * Add log output of "x-openstack-request-id" from nova * Imported Translations from Transifex * fix StringIO errors in unit test * Fix hacking rule 302 and enable it * Imported Translations from Transifex * sync oslo code * Fixes ceilometer-compute service start failure * Reenables the testr per test timeout * Avoid reading real config files in unit test * Clean up oslo.middleware.{audit,notifier} * Use hacking from test-requirements * Splits hbase storage code base * Splits mongo storage code base * Separate alarm storage models from other models * Iterates swift response earlier to get the correct status * Fix messaging.get\_transport caching * Fix method mocked in a test * Don't keep a single global TRANSPORT object * Clean up .gitignore * Fix Sphinx directive name in session.py * Fix list of modules not included in auto-gen docs * Downgrade publisher logging to debug level again 2014.2.b1 --------- * remove default=None for config options * [HBase] get\_resource optimization * Fix incorrect trait initialization * Remove unused logging in tests * Revert "Fix the floatingip pollster" * Remove low-value logging from publication codepath * Fix LBaaS connection meter docs * Fix the meter type for LB Bytes * Adding alarm list filtering by state and meter * Adds caches for image and flavor in compute agent * [HBase] Implement events on HBase * Skipping central agent pollster when keystone not available * Respect $TMPDIR environment variable to run tests * Fixed unit test TestRealNotification * Update Measurement Docs for LBaaS * Metering LoadBalancer as a Service * Removes per test testr timeout * Change pipeline\_manager to instance attribute in hooks * Change using of limit argument in get\_sample * Refactor tests to remove direct access to test DBManagers * Fix notification for NotImplemented record\_events * Add missing explicit cfg option import * Fix ceilometer.alarm.notifier.trust import * Use TYPE\_GAUGE rather than TYPE\_CUMULATIVE * Update doc for sample config file issue * Corrects a flaw in the treatment of swift endpoints * use LOG instead of logger as name for the Logger object * Fix doc gate job false success * Improve performance of api requests with hbase scan * Add new 'storage': {'production\_ready': True} capability * Clean tox.ini * Remove (c) and remove unnecessary encoding lines * Fix testing gate due to new keystoneclient release * Ignore the generated file ceilometer.conf.sample * Update the copyright date in doc * Updated from global requirements * reconnect to mongodb on connection failure * refactor sql backend to improve write speed * Don't rely on oslomsg configuration options * replaced unicode() with six.text\_type() * Synced jsonutils from oslo-incubator * Fix the floatingip pollster * Fix project authorization check * Update testrepository configuration * Implemented metering for Cinder's snapshots * Use joins instead of subqueries for metadata filtering * Use None instead of mutables in method params defaults * Remove all mostly untranslated PO files * switch SplitResult to use six * Remove unused db code due to api v1 drop * Updated from global requirements * oslo.messaging context must be a dict * Drop deprecated api v1 * Fix network notifications of neutron bulk creation * mongo: remove \_id in inserted alarm changes * Clean up openstack-common.conf * Revert "oslo.messaging context must be a dict" * Correct class when stopping partitioned alarm eval svc * oslo.messaging context must be a dict * Corrections of spelling, rephrasing for clarity * Adapt failing tests for latest wsme version * Removed StorageEngine class and it's hierarchy * Correcting formatting and adding period in measurement doc * Initialize dispatcher manager in event endpoint * Replaced CONF object with url in storage engine creation * Synced jsonutils from oslo-incubator * Remove gettextutils.\_ imports where they are not used * Remove "# noqa" leftovers for gettextutils.\_ * transformer: Add aggregator transformer * Remove conversion debug message * Fix the return of statistic with getting no sample * Remove eventlet.sleep(0) in collector tests * Don't allow queries with 'IN' predicate with an empty sequence * Check if samples returned by get\_sample\_data are not None * Opencontrail network statistics driver * Add a alarm notification using trusts * Replace hard coded WSGI application creation * Describe storage backends in the collector installation guide * Made get\_capabilities a classmethod instead of object method * Disable reverse dns lookup * Consume notif. from multiple message bus * Use NotificationPlugin as an oslo.msg endpoint * Improve combination rule validation * Remove ceilometer.conf.sample * Use known protocol scheme in keystone tests * cleanup virt pollster code * Add encoding argument to deserialising udp packets in collector * Made get\_engine method module-private * Make entities (Resource, User, Project) able to store lists * Remove duplicate alarm from alarm\_ids * More accurate meter name and unit for host load averages * Replace oslo.rpc by oslo.messaging * Fix a response header bug in the error middleware * Remove unnecessary escape character in string format * Optimize checks to set image properties in metadata * fix statistics query in postgres * Removed useless code from \_\_init\_\_ method * Refactored fake connection URL classes * Replace assert statements with assert methods * Removes direct access of timeutils.override\_time * Disable specifying alarm itself in combination rule * Include instance state in metadata * Allowed nested resource metadata in POST'd samples * Sync oslo-incubator code * Updated from global requirements * Refactor the DB implementation of Capabilities API * Fix Jenkins translation jobs * Align to openstack python package index mirror * User a more accurate max\_delay for reconnects * Open Juno development 2014.1.rc1 ---------- * Imported Translations from Transifex * Add note on aggregate duplication to API docco * Use ConectionPool instead of one Connection in HBase * remove dump tables from previous migrations * De-dupe selectable aggregate list in statistics API * ensure dispatcher service is configured before rpc * improve performance of resource-list in sql * SSL errors thrown with Postgres on multi workers * Remove escape character in string format * Verify user/project ID for alarm created by non-admin user * enable a single worker by default * Fix ceilometer.conf.sample mismatch * Metadata in compute.instance.exists fix * Fix order of arguments in assertEquals * Documenting hypervisor support for nova meters * Ensure idempotency of cardinality reduction in mongo * VMware vSphere: Improve the accuracy of queried samples * Use swob instead of webob in swift unit tests * Disable oslo.messaging debug logs * Fix validation error for invalid field name in simple query * fix create\_or\_update logic to avoid rollbacks * Avoid swallowing AssertionError in test skipping logic * Fix hardware pollster to inspect multiple resources * spawn multiple workers in services * Install global lazy \_() * Fixes Hyper-V metrics units * Ensure intended indices on project\_id are created for mongo * Fix the type of the disk IO rate measurements * Change the sample\_type from tuple to string * Fix order of arguments in assertEquals * Ensure alarm rule conform to alarm type * insecure flag added to novaclient * Fixes duplicated names in alarm time constraints * Use the list when get information from libvirt * Eventlet monkeypatch must be done before anything * 028 migration script incorrectly skips over section * Fix bug in get\_capabilities behavior in DB drivers * Added documentation for selectable aggregates * Make sure use IPv6 sockets for ceilometer in IPv6 environment * VMware vSphere: Bug fixes * Ensure insecure config option propagated by alarm evaluator * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Rationalize get\_resources for mongodb * Ensure insecure config option propagated by alarm service * add host meters to doc * Add field translation to complex query from OldSample to Sample * Extend test case to cover old alarm style conversion * Updated doc with debug instructions * Refactored the way how testscenarios tests are run * Corrected the sample names in hardware pollsters * Prevent alarm\_id in query field of getting history * Make ceilometer work with sqla 0.9.x * Implements monitoring-network-from-opendaylight * Add user-supplied arguments in log\_handler * VMware vSphere support: Disk rates * Fix updating alarm can specify existing alarm name * Changes for networking metrics support for vSphere * VMware vSphere: Changes for cpu\_util * VMware vSphere support: Memory Usage * Fix broken statistics in sqlalchemy * Fixes Hyper-V Inspector network metrics values * Set storage engine for the trait\_type table * Enable monkeypatch for select module * Rename id to alarm\_id of Alarm in SqlAlchemy * Fix some spelling mistakes and a incorrect url * Skip central agent interval\_task when keystone fails 2014.1.b3 --------- * Ensure user metadata mapped for instance notifications * Per pipeline pluggable resource discovery * Wider selection of aggregates for sqlalchemy * Wider selection of aggregates for mongodb * Adds time constraints to alarms * Remove code duplication Part 3 * Decouple source and sink configuration for pipelines * Selectable aggregate support in mongodb * Selectable aggregation functions for statistics * Add simple capabilities API * Removed global state modification by api test * VMware vSphere support: Performance Mgr APIs * Fix typo * move databases to test requirements * Make recording and scanning data more determined * Implements "not" operator for complex query * Implements metadata query for complex query feature * Alarms support in HBase Part 2 * Alarm support in HBase Part 1 * Remove unused variable * Added hardware pollsters for the central agent * Added hardware agent's inspector and snmp implementation * Updated from global requirements * Pluggable resource discovery for agents * Remove code duplication Part 2 * Imported Translations from Transifex * remove audit logging on flush * Tolerate absent recorded\_at on older mongo/db2 samples * api: export recorded\_at in returned samples * Fix the way how metadata is stored in HBase * Set default log level of iso8601 to WARN * Sync latest config file generator from oslo-incubator * Fix typo on testing doc page * Remove code duplication * sample table contains redundant/duplicate data * rename meter table to sample * storage: store recording timestamp * Fixed spelling error in Ceilometer * Adds doc string to query validate functions in V2 API * Updated from global requirements * Remove code that works around a (now-resolved) bug in pecan * Fix missing source field content on /v2/samples API * Refactor timestamp existence validation in V2 API * Use the module units to refer bytes type * sync units.py from oslo to ceilometer * Add comments for \_build\_paginate\_query * Implements monitoring-network * Handle Heat notifications for stack CRUD * Alembic migrations not tested * Modify the discription of combination alarm * check domain state before inspecting nics/disks * Adds gettextutils module in converter * Keep py3.X compatibility for urllib.urlencode * Added missing import * Removed useless prints that pollute tests log * Implements in operator for complex query functionality * Implements field validation for complex query functionality * allow hacking to set dependencies * Implements complex query functionality for alarm history * Implements complex query functionality for alarms * Remove None for dict.get() * Replace assertEqual(None, \*) with assertIsNone in tests * Update notification\_driver * Switch over to oslosphinx * Fix some flaws in ceilometer docstrings * Rename Openstack to OpenStack * Remove start index 0 in range() * Updated from global requirements * Remove blank line in docstring * Use six.moves.urllib.parse instead of urlparse * Propogate cacert and insecure flags to glanceclient * Test case for creating an alarm without auth headers * Refactored run-tests script * Implements complex query functionality for samples * fix column name and alignment * Remove tox locale overrides * Updated from global requirements * Adds flavor\_id in the nova\_notifier * Improve help strings * service: re-enable eventlet just for sockets * Fixes invalid key in Neutron notifications * Replace BoundedInt with WSME's IntegerType * Replace 'Ceilometer' by 'Telemetry' in the generated doc * Doc: Add OldSample to v2.rst * Fixing some simple documentation typos * Updated from global requirements * Fix for a simple typo * Replace 'a alarm' by 'an alarm' * Move ceilometer-send-counter to a console script * sync oslo common code * Handle engine creation inside of Connection object * Adds additional details to alarm notifications * Fix formating of compute-nova measurements table * Fix string-to-boolean casting in queries * nova notifier: disable tests + update sample conf * Update oslo * Refactored session access * Fix the py27 failure because of "ephemeral\_key\_uuid" error * Correct a misuse of RestController in the Event API * Fix docs on what an instance meter represents * Fix measurement docs to correctly represent Existance meters * samples: fix test case status code check * Replace non-ascii symbols in docs * Use swift master * Add table prefix for unit tests with hbase * Add documentation for pipeline configuration * Remove unnecessary code from alarm test * Updated from global requirements * Use stevedore's make\_test\_instance * use common code for migrations * Use explicit http error code for api v2 * Clean .gitignore * Remove unused db engine variable in api * Revert "Ensure we are not exhausting the sqlalchemy pool" * eventlet: stop monkey patching * Update dev docs to include notification-agent * Change meter\_id to meter\_name in generated docs * Correct spelling of logger for dispatcher.file * Fix some typos in architecture doc * Drop foreign key contraints of alarm in sqlalchemy * Re-enable lazy translation * Sync gettextutils from Oslo * Fix wrong doc string for meter type * Fix recursive\_keypairs output * Added abc.ABCMeta metaclass for abstract classes * Removes use of timeutils.set\_time\_override 2014.1.b2 --------- * tests: kill all started processes on exit * Exclude weak datapoints from alarm threshold evaluation * Move enable\_acl and debug config to ceilometer.conf * Fix the Alarm documentation of Web API V2 * StringIO compatibility for python3 * Set the SQL Float precision * Convert alarm timestamp to PrecisionTimestamp * use six.move.xrange replace xrange * Exit expirer earlier if db-ttl is disabled * Added resources support in pollster's interface * Improve consistency of help strings * assertTrue(isinstance) replace by assertIsInstance * Return trait type from Event api * Add new rate-based disk and network pipelines * Name and unit mapping for rate\_of\_change transformer * Update oslo * Remove dependencies on pep8, pyflakes and flake8 * Implement the /v2/samples/ API * Fix to handle null threshold\_rule values * Use DEFAULT section for dispatcher in doc * Insertion in HBase should be fixed * Trivial typo * Update ceilometer.conf.sample * Fix use the fact that empty sequences are false * Remove unused imports * Replace mongo aggregation with plain ol' map-reduce * Remove redundant meter (name,type,unit) tuples from Resource model * Fix work of udp publisher * tests: pass /dev/null as config for mongod * requirements: drop netaddr * tests: allow to skip if no database URL * Fix to tackle instances without an image assigned * Check for pep8 E226 and E24 * Fixed spelling mistake * AlarmChange definition added to doc/source/webapi/v2.rst * 1st & last sample timestamps in Resource representation * Avoid false negatives on message signature comparison * cacert is not picked up correctly by alarm services * Change endpoint\_type parameter * Utilizes assertIsNone and assertIsNotNone * Add missing gettextutils import to ceilometer.storage.base * Remove redundant code in nova\_client.Client * Allow customized reseller\_prefix in Ceilometer middleware for Swift * Fix broken i18n support * Empty files should no longer contain copyright * Add Event API * Ensure we are not exhausting the sqlalchemy pool * Add new meters for swift * Sync config generator workaround from oslo * storage: factorize not implemented methods * Don't assume alarms are returned in insert order * Correct env variable in file oslo.config.generator.rc * Handle the metrics sent by nova notifier * Add a wadl target to the documentation * Sync config generator from oslo-incubator * Convert event timestamp to PrecisionTimestamp * Add metadata query validation limitation * Ensure the correct error message is displayed * Imported Translations from Transifex * Move sphinxcontrib-httpdomain to test-requirements * Ensure that the user/project exist on alarm update * api: raise ClientSideError rather than ValueError * Implement the /v2/sample API * service: fix service alive checking * Oslo sync to recover from db2 server disconnects * Event Storage Layer * config: specify a template for mktemp * test code should be excluded from test coverage summary * doc: remove note about Nova plugin framework * doc: fix formatting of alarm action types * Updated from global requirements * Add configuration-driven conversion to Events * add newly added constraints to expire clear\_expired\_metering\_data * fix unit * Add import for publisher\_rpc option * add more test cases to improve the test code coverage #5 * Create a shared queue for QPID topic consumers * Properly reconnect subscribing clients when QPID broker restarts * Don't need session.flush in context managed by session * sql migration error in 020\_add\_metadata\_tables 2014.1.b1 --------- * Remove rpc service from agent manager * Imported Translations from Transifex * organise requirements files * Add a Trait Type model and db table * No module named MySQLdb bug * Add a note about permissions to ceilometer logging directory * sync with oslo-incubator * Rename OpenStack Metering to OpenStack Telemetry * update docs to adjust for naming change * Add i18n warpping for all LOG messages * Imported Translations from Transifex * Removed unused method in compute agent manger * connection is not close in migration script * Fixed a bug in sql migration script 020 * Fixed nova notifier test * Added resources definition in the pipeline * Change metadata\_int's value field to type bigint * Avoid intermittent integrity error on alarm creation * Simplify the dispatcher method prototype * Use map\_method from stevedore 0.12 * Remove the collector submodule * Move dispatcher a level up * Split collector * Add a specialized Event Type model and db table * Remove old sqlalchemy-migrate workaround * Revert "Support building wheels (PEP-427)" * full pep8 compliance (part 2) * Selectively import RPC backend retry config * Fixes Hyper-V Inspector disk metrics bug * Imported Translations from Transifex * full pep8 compliance (part1) * Replace mox with mock in alarm,central,image tests * Stop ignoring H506 errors * Update hacking for real * Replace mox with mock in tests.collector * Replace mox with mock in publisher and pipeline * Replace mox with mock in novaclient and compute * Remove useless defined Exception in tests * Support building wheels (PEP-427) * Fixes Hyper-V Inspector cpu metrics bug * Replace mox with mock in tests.storage * Document user-defined metadata for swift samples * Replace mox with mock in energy and objectstore * Updated from global requirements * Replace mox with mock in tests.api.v2 * Refactor API error handling * make record\_metering\_data concurrency safe * Move tests into ceilometer module * Replace mox with mock in tests.api.v1 * Replace mox with mock in tests.api.v2.test\_compute * Corrected import order * Use better predicates from testtools instead of plain assert * Stop using openstack.common.exception * Replace mox with mock in tests.network * Replace mox with mocks in test\_inspector * Fix failing nova\_tests tests * Replace mox with mocks in tests.compute.pollsters * Add an insecure option for Keystone client * Sync log from oslo * Cleanup tests.publisher tests * mongodb, db2: do not print full URL in logs * Use wsme ClientSideError to handle unicode string * Use consistant cache key for swift pollster * Fix the developer documentation of the alarm API * Fix the default rpc policy value * Allow Events without traits to be returned * Replace tests.base part8 * Replace tests.base part7 * Replace tests.base part6 * Imported Translations from Transifex * Imported Translations from Transifex * Sync log\_handler from Oslo * Don't use sqlachemy Metadata as global var * enable sql metadata query * Replace tests.base part5 * Replace tests.base part4 * Imported Translations from Transifex * Updated from global requirements * Fix doc typo in volume meter description * Updated from global requirements * Add source to Resource API object * compute: virt: Fix Instance creation * Fix for get\_resources with postgresql * Updated from global requirements * Add tests when admin set alarm owner to its own * Replace tests.base part3 * Replace tests.base part2 * Replace tests.base part1 * Fix wrong using of Metadata in 15,16 migrations * api: update for WSME 0.5b6 compliance * Changes FakeMemcache to set token to expire on utcnow + 5 mins * Change test case get\_alarm\_history\_on\_create * Change alarm\_history.detail to text type * Add support for keystoneclient 0.4.0 * Ceilometer has no such project-list subcommand * Avoid leaking admin-ness into combination alarms * Updated from global requirements * Avoid leaking admin-ness into threshold-oriented alarms * Update Oslo * Set python-six minimum version * Ensure combination alarms can be evaluated * Ensure combination alarm evaluator can be loaded * Apply six for metaclass * add more test cases to improve the test code coverage #6 * Update python-ceilometerclient lower bound to 1.0.6 * Imported Translations from Transifex * add more test cases to improve the test code coverage #4 2013.2.rc1 ---------- * db2 does not allow None as a key for user\_id in user collection * Start Icehouse development * Imported Translations from Transifex * Disable lazy translation * Add notifications for alarm changes * Updated from global requirements * api: allow alarm creation for others project by admins * assertEquals is deprecated, use assertEqual * Imported Translations from Transifex * update alarm service setup in dev doc * Add bug number of some wsme issue * api: remove useless comments * issue an error log when cannot import libvirt * add coverage config file to control module coverage report * tests: fix rounding issue in timestamp comparison * api: return 404 if a alarm is not found * remove locals() for stringformat * add more test cases to improve the test code coverage #3 * Remove extraneous vim configuration comments * Return 401 when action is not authorized * api: return 404 if a resource is not found * keystone client changes in AuthProtocol made our test cases failing * Don't load into alarms evaluators disabled alarms * Remove MANIFEST.in * Allow to get a disabled alarm * Add example with return values in API v2 docs * Avoid imposing alembic 6.0 requirement on all distros * tests: fix places check for timestamp equality * Don't publish samples if resource\_id in missing * Require oslo.config 1.2.0 final * Don't send unuseful rpc alarm notification * service: check that timestamps are almost equals * Test the response body when deleting a alarm * Change resource.resource\_metadata to text type * Adding region name to service credentials * Fail tests early if mongod is not found * add more test cases to improve the test code coverage #2 * add more test cases to improve the test code coverage #1 * Imported Translations from Transifex * Replace OpenStack LLC with OpenStack Foundation * Use built-in print() instead of print statement * Simple alarm partitioning protocol based on AMQP fanout RPC * Handle manually mandatory field * Provide new API endpoint for alarm state * Implement the combination evaluator * Add alarm combination API * Notify with string representation of alarm reason * Convert BoundedInt value from json into int * Fix for timestamp precision in SQLAlchemy * Add source field to Meter model * Refactor threshold evaluator * Alarm API update * Update requirements * WSME 0.5b5 breaking unit tests * Fix failed downgrade in migrations * refactor db2 get\_meter\_statistics method to support mongodb and db2 * tests: import pipeline config * Fix a tiny mistake in api doc * collector-udp: use dispatcher rather than storage * Imported Translations from Transifex * Drop sitepackages=False from tox.ini * Update sphinxcontrib-pecanwsme to 0.3 * Architecture enhancements * Force MySQL to use InnoDB/utf8 * Update alembic requirement to 0.6.0 version * Correctly output the sample content in the file publisher * Pecan assuming meter names are extensions * Handle inst not found exceptions in pollsters * Catch exceptions from nova client in poll\_and\_publish * doc: fix storage backend features status * Add timestamp filtering cases in storage tests * Imported Translations from Transifex * Use global openstack requirements * Add group by statistics examples in API v2 docs * Add docstrings to some methods * add tests for \_query\_to\_kwargs func * validate counter\_type when posting samples * Include auth\_token middleware in sample config * Update config generator * run-tests: fix MongoDB start wait * Imported Translations from Transifex * Fix handling of bad paths in Swift middleware * Drop the \*.create.start notification for Neutron * Make the Swift-related doc more explicit * Fix to return latest resource metadata * Update the high level architecture * Alarm history storage implementation for sqlalchemy * Improve libvirt vnic parsing with missing mac! * Handle missing libvirt vnic targets! * Make type guessing for query args more robust * add MAINTAINERS file * nova\_notifier: fix tests * Update openstack.common.policy from oslo-incubator * Clean-ups related to alarm history patches * Improved MongoClient pooling to avoid out of connections error * Disable the pymongo pooling feature for tests * Fix wrong migrations * Fixed nova notifier unit test * Add group by statistics in API v2 * Update to tox 1.6 and setup.py develop * Add query support to alarm history API * Reject duplicate events * Fixes a bug in Kwapi pollster * alarm api: rename counter\_name to meter\_name * Fixes service startup issue on Windows * Handle volume.resize.\* notifications * Network: process metering reports from Neutron * Alarm history storage implementation for mongodb * Fix migration with fkeys * Fixes two typos in this measurements.rst * Add a fake UUID to Meter on API level * Append /usr/sbin:/sbin to the path for searching mongodb * Plug alarm history logic into the API * Added upper version boundry for six * db2 distinct call results are different from mongodb call * Sync rpc from oslo-incubator * Imported Translations from Transifex * Add pagination parameter to the database backends of storage * Base Alarm history persistence model * Fix empty metadata issue of instance * alarm: generate alarm\_id in API * Import middleware from Oslo * Imported Translations from Transifex * Adds group by statistics for MongoDB driver * Fix wrong UniqueConstraint name * Adds else and TODO in statistics storage tests * Imported Translations from Transifex * Extra indexes cleanup * API FunctionalTest class lacks doc strings * install manual last few sections format needs to be fixed * api: update v1 for Flask >= 0.10 * Use system locale when Accept-Language header is not provided * Adds Hyper-V compute inspector * missing resource in middleware notification * Support for wildcard in pipeline * Refactored storage tests to use testscenarios * doc: replace GitHub by git.openstack.org * api: allow usage of resource\_metadata in query * Remove useless doc/requirements * Fixes non-string metadata query issue * rpc: reduce sleep time * Move sqlachemy tests only in test\_impl\_sqlachemy * Raise Error when pagination/groupby is missing * Raise Error when pagination support is missing * Use timeutils.utcnow in alarm threshold evaluation * db2 support * plugin: remove is\_enabled * Doc: improve doc about Nova measurements * Storing events via dispatchers * Imported Translations from Transifex * ceilometer-agent-compute did not catch exception for disk error * Change counter to sample in network tests * Change counter to sample in objectstore tests * Remove no more used code in test\_notifier * Change counter to sample vocable in cm.transformer * Change counter to sample vocable in cm.publisher * Change counter to sample vocable in cm.image * Change counter to sample vocable in cm.compute * Change counter to sample vocable in cm.energy * Use samples vocable in cm.publisher.test * Change counter to sample vocable in volume tests * Change counter to sample vocable in api tests * Add the source=None to from\_notification * Make RPCPublisher flush method threadsafe * Enhance delayed message translation when \_ is imported * Remove use\_greenlets argument to MongoClient * Enable concurrency on nova notifier tests * Imported Translations from Transifex * Close database connection for alembic env * Fix typo in 17738166b91 migration * Don't call publisher without sample * message\_id is not allowed to be submitted via api * Api V2 post sample refactoring * Add SQLAlchemy implementation of groupby * Fixes failed notification when deleting instance * Reinitialize pipeline manager for service restart * Sync gettextutils from oslo-incubator * Doc: clearly state that one can filter on metadata * Add HTTP request/reply samples * Use new olso fixture in CM tests * Imported Translations from Transifex * Bump hacking to 0.7.0 * Fix the dict type metadata missing issue * Raise error when period with negative value * Imported Translations from Transifex * Import missing gettext \_ * Remove 'counter' occurences in pipeline * Remove the mongo auth warning during tests * Change the error message of resource listing in mongodb * Change test\_post\_alarm case in test\_alarm\_scenarios * Skeletal alarm history API * Reorg alarms controller to facilitate history API * Fix Jenkins failed due to missing \_ * Fix nova test\_notifier wrt new notifier API * Remove counter occurences from documentation * Updated from global requirements * Fixes dict metadata query issue of HBase * s/alarm/alarm\_id/ in alarm notification * Remove unused abstract class definitions * Removed unused self.counters in storage test class * Initial alarming documentation * Include previous state in alarm notification * Consume notification from the default queue * Change meter.resource\_metadata column type * Remove MongoDB TTL support for MongoDB < 2.2 * Add first and last sample timestamp * Use MongoDB aggregate to get resources list * Fix resources/meters pagination test * Handle more Nova and Neutron events * Add support for API message localization * Add the alarm id to the rest notifier body * fix alarm notifier tests * Sync gettextutils from oslo * Fix generating coverage on MacOSX * Use the new nova Instance class * Return message\_id in POSTed samples * rpc: remove source argument from message conversion * Remove source as a publisher argument * Add repeat\_actions to alarm * Rename get\_counters to get\_samples * Add pagination support for MongoDB * Doc: measurements: add doc on Cinder/Swift config * Update nova\_client.py * objectstore: trivial cleanup in \_Base * Add support for CA authentication in Keystone * add unit attribute to statistics * Fix notify method signature on LogAlarmNotifier * Fix transformer's LOG TypeError * Update openstack.common * Fixes Hbase metadata query return wrong result * Fix Hacking 0.6 warnings * Make middleware.py Python 2.6 compatible * Call alembic migrations after sqlalchemy-migrate * Rename ceilometer.counter to ceilometer.sample * Added separate MongoDB database for each test * Relax OpenStack upper capping of client versions * Refactored MongoDB connection pool to use weakrefs * Centralized backends tests scenarios in one place * Added tests to verify that local time is correctly handled * Refactored impl\_mongodb to use full connection url * calling distinct on \_id field against a collection is slow * Use configured endpoint\_type everywhere * Allow use of local conductor * Update nova configuration doc to use notify\_on\_state\_change * doc: how to inject user-defined data * Add documentation on nova user defined metadata * Refactored API V2 tests to use testscenarios * Refactored API V1 tests to use testscenarios * alarm: Per user setting to disable ssl verify * alarm: Global setting to disable ssl verification * Imported Translations from Transifex * Implementation of the alarm RPCAlarmNotifier * Always init cfg.CONF before running a test * Sets storage\_conn in CollectorService * Remove replace/preserve logic from rate of change transformer * storage: remove per-driver options * hbase: do not register table\_prefix as a global option * mongodb: do not set replica\_set as a global option * Change nose to testr in the documentation * Fixed timestamp creation in MongoDB mapreduce * Ensure url is a string for requests.post * Implement a https:// in REST alarm notification * Implement dot in matching\_metadata key for mongodb * trailing slash in url causes 404 error * Fix missing foreign keys * Add cleanup migration for indexes * Sync models with migrations * Avoid dropping cpu\_util for multiple instances * doc: /statistics fields are not queryable (you cannot filter on them) * fix resource\_metadata failure missing image data * Standardize on X-Project-Id over X-Tenant-Id * Default to ctx user/project ID in sample POST API * Multiple dispatcher enablement * storage: fix clear/upgrade order * Lose weight for Ceilometer log in verbose mode * publisher.rpc: queing policies * Remove useless mongodb connection pool comment * Add index for db.meter by descending timestamp * doc: add a bunch of functional examples for the API * api: build the storage connection once and for all * Fix the argument of UnknownArgument exception * make publisher procedure call configurable * Disable mongod prealloc, wait for it to start * Added alembic migrations * Allow to enable time to live on metering sample * Implement a basic REST alarm notification * Imported Translations from Transifex * Ensure correct return code of run-tests.sh * File based publisher * Unset OS\_xx variable before generate configuration * Use run-tests.sh for tox coverage tests * Emit cpu\_util from transformer instead of pollster * Allow simpler scale exprs in transformer.conversions * Use a real MongoDB instance to run unit tests * Allow to specify the endpoint type to use * Rename README.md to README.rst * Use correct hostname to get instances * Provide CPU number as additional metadata * Remove get\_counter\_names from the pollster plugins * Sync SQLAlchemy models with migrations * Transformer to measure rate of change * Make sure plugins are named after their meters * Break up the swift pollsters * Split up the glance pollsters * Make visual coding style consistent * Separate power and energy pollsters * Break up compute pollsters * Implement a basic alarm notification service * Optionally store Events in Collector * Fix issue with pip installing oslo.config-1.2.0 * Transformer to convert between units * publisher.rpc: make per counter topic optional * ceilometer tests need to be enabled/cleaned * Also accept timeout parameter in FakeMemCache * Fix MongoDB backward compat wrt units * Use oslo.sphinx and remove local copy of doc theme * Reference setuptools and not distribute * enable v2 api hbase tests * Register all interesting events * Unify Counter generation from notifications * doc: enhance v2 examples * Update glossary * Imported Translations from Transifex * Imported Translations from Transifex * Filter query op:gt does not work as expected * sqlalchemy: fix performance issue on get\_meters() * enable v2 api sqlalchemy tests * Update compute vnic pollster to use cache * Update compute CPU pollster to use cache * Update compute disk I/O pollster to use cache * update Quantum references to Neutron * Update swift pollster to use cache * Update kwapi pollster to use cache * Update floating-ip pollster to use cache * Update glance pollster to use cache * Add pollster data cache * Fix flake8 errors * Update Oslo * Enable Ceilometer to support mongodb replication set * Fix return error when resource can't be found * Simple service for singleton threshold eval * Basic alarm threshold evaluation logic * add metadata to nova\_client results * Bring in oslo-common rpc ack() changes * Pin the keystone client version * Fix auth logic for PUT /v2/alarms * Imported Translations from Transifex * Change period type in alarms API to int * mongodb: fix limit value not being an integer * Check that the config file sample is always up to date * api: enable v2 tests on SQLAlchemy & HBase * Remove useless periodic\_interval option * doc: be more explicit about network counters * Capture instance metadata in reserved namespace * Imported Translations from Transifex * pep8: enable E125 checks * pep8: enable F403 checks * pep8: enable H302 checks * pep8: enable H304 checks * pep8: enable H401 * pep8: enable H402 checks * Rename the MeterPublisher to RPCPublisher * Replace publisher name by URL * Enable pep8 H403 checks * Activate H404 checks * Ceilometer may generate wrong format swift url in some situations * Code cleanup * Update Oslo * Use Flake8 gating for bin/ceilometer-\* * Update requirements to fix devstack installation * Update to the latest stevedore * Start gating on H703 * Remove disabled\_notification\_listeners option * Remove disabled\_compute\_pollsters option * Remove disabled\_central\_pollsters option * Longer string columns for Trait and UniqueNames * Fix nova notifier tests * pipeline: switch publisher loading model to driver * Enforce reverse time-order for sample return * Remove explicit distribute depend * Use Python 3.x compatible octal literals * Improve Python 3.x compatibility * Fix requirements * Corrected path for test requirements in docs * Fix some typo in documentation * Add instance\_scheduled in entry points * fix session connection * Remove useless imports, reenable F401 checks * service: run common initialization stuff * Use console scripts for ceilometer-api * Use console scripts for ceilometer-dbsync * Use console scripts for ceilometer-agent-compute * Use console scripts for ceilometer-agent-central * agent-central: use CONF.import\_opt rather than import * Move os\_\* options into a group * Use console scripts for ceilometer-collector * sqlalchemy: migration error when running db-sync * session flushing error * api: add limit parameters to meters * python3: Introduce py33 to tox.ini * Start to use Hacking * Session does not use ceilometer.conf's database\_connection * Add support for limiting the number of samples returned * Imported Translations from Transifex * Add support policy to installation instructions * sql: fix 003 downgrade * service: remove useless PeriodicService class * Fix nova notifier tests * Explicitly set downloadcache in tox.ini * Imported Translations from Transifex 2013.2.b1 --------- * Switch to sphinxcontrib-pecanwsme for API docs * Update oslo, use new configuration generator * doc: fix hyphens instead of underscores for 'os\*' conf options * Allow specifying a listen IP * Log configuration values on API startup * Don't use pecan to configure logging * Mark sensitive config options as secret * Imported Translations from Transifex * ImagePollster record duplicate counter during one poll * Rename requires files to standard names * Add an UDP publisher and receiver * hbase metaquery support * Imported Translations from Transifex * Fix and update extract\_opts group extraction * Fix the sample name of 'resource\_metadata' * Added missing source variable in storage drivers * Add Event methods to db api * vnics: don't presume existence of filterref/filter * force the test path to a str (sometimes is unicode) * Make sure that v2 api tests have the policy file configured * Imported Translations from Transifex * setup.cfg misses swift filter * Add a counter for instance scheduling * Move recursive\_keypairs into utils * Replace nose with testr * Use fixtures in the tests * fix compute units in measurement doc * Allow suppression of v1 API * Restore default interval * Change from unittest to testtools * remove unused tests/skip module * Imported Translations from Transifex * Get all tests to use tests.base.TestCase * Allow just a bit longer to wait for the server to startup * Document keystone\_authtoken section * Restore test dependency on Ming * Set the default pipline config file for tests * Imported Translations from Transifex * Fix cross-document references * Fix config setting references in API tests * Restrict pep8 & co to pep8 target * Fix meter\_publisher in setup.cfg * Use flake8 instead of pep8 * Imported Translations from Transifex * Use sqlalchemy session code from oslo * Switch to pbr * fix the broken ceilometer.conf.sample link * Add a direct Ceilometer notifier * Do the same auth checks in the v2 API as in the v1 API * Add the sqlalchemy implementation of the alarms collection * Allow posting samples via the rest API (v2) * Updated the ceilometer.conf.sample * Don't use trivial alarm\_id's like "1" in the test cases * Fix the nova notifier tests after a nova rename * Document HBase configuration * alarm: fix MongoDB alarm id * Use jsonutils instead of json in test/api.py * Connect the Alarm API to the db * Add the mongo implementation of alarms collection * Move meter signature computing into meter\_publish * Update WSME dependency * Imported Translations from Transifex * Add Alarm DB API and models * Imported Translations from Transifex * Remove "extras" again * add links to return values from API methods * Modify limitation on request version * Doc improvements * Rename EventFilter to SampleFilter * Fixes AttributeError of FloatingIPPollster * Add just the most minimal alarm API * Update oslo before bringing in exceptions * Enumerate the meter type in the API Meter class * Remove "extras" as it is not used * Adds examples of CLI and API queries to the V2 documentation * Measurements documentation update * update the ceilometer.conf.sample * Set hbase table\_prefix default to None * glance/cinder/quantum counter units are not accurate/consistent * Add some recommendations about database * Pin SQLAlchemy to 0.7.x * Ceilometer configuration.rst file not using right param names for logging * Fix require\_map\_reduce mim import * Extend swift middleware to collect number of requests * instances: fix counter unit * Remove Folsom support * transformer, publisher: move down base plugin classes * pipeline, publisher, transformer: reorganize code * Fix tests after nova changes * Update to the lastest loopingcall from oslo * Imported Translations from Transifex * update devstack instructions for cinder * Update openstack.common * Reformat openstack-common.conf * storage: move nose out of global imports * storage: get rid of get\_event\_interval * Remove gettext.install from ceilometer/\_\_init\_\_.py * Prepare for future i18n use of \_() in nova notifier * Update part of openstack.common * Convert storage drivers to return models * Adpated to nova's gettext changes * add v2 query examples * storage: remove get\_volume\_sum and get\_volume\_max * api: run tests against HBase too * api: run sum unit tests against SQL backend too * Split and fix live db tests * Remove impl\_test * api: run max\_resource\_volume test on SQL backend * Refactor DB tests * fix volume tests to utilize VOLUME\_DELETE notification * Open havana development, bump to 2013.2 2013.1 ------ * Change the column counter\_volume to Float * tests: disable Ming test if Ming unavailable * Imported Translations from Transifex * enable arguments in tox * api: run max\_volume tests on SQL backend too * api: run list\_sources tests on SQL and Mongo backend * api: run list\_resources test against SQL * api: handle case where metadata is None * Fix statistics period computing with start/end time * Allow publishing arbitrary headers via the "storage.objects.\*.bytes" counter * Updated the description of get\_counters routine * enable xml error message response * Swift pollster silently return no counter if keystone endpoint is not present * Try to get rid of the "events" & "raw events" naming in the code * Switch to python-keystoneclient 0.2.3 * include a copy of the ASL 2.0 * add keystone configuration instructions to manual install docs * Update openstack.common * remove unused dependencies * Set the default\_log\_levels to include keystoneclient * Switch to final 1.1.0 oslo.config release * Add deprecation warnings for V1 API * Raise stevedore requirement to 0.7 * Fixed the blocking unittest issues * Fix a pep/hacking error in a swift import * Add sample configuration files for mod\_wsgi * Add a tox target for building documentation * Use a non-standard port for the test server * Ensure the statistics are sorted * Start both v1 and v2 api from one daemon * Handle missing units values in mongodb data * Imported Translations from Transifex * Make HACKING compliant * Update manual installation instructions * Fix oslo.config and unittest * Return something sane from the log impl * Fix an invalid test in the storage test suite * Add the etc directory to the sdist manifest * api: run compute duration by resource on SQL backend * api: run list\_projects tests against SQL backend too * api: run list users test against SQL backend too * api: run list meters tests against SQL backend too * Kwapi pollster silently return no probre if keystone endpoint is not present * HBase storage driver, initial version * Exclude tests directory from installation * Ensure missing period is treated consistently * Exclude tests when installing ceilometer * Run some APIv1 tests on different backends * Remove old configuration metering\_storage\_engine * Set where=tests * Decouple the nova notifier from ceilometer code * send-counter: fix & test * Remove nose wrapper script * Fix count type in MongoDB * Make sure that the period is returned as an int as the api expects an int * Imported Translations from Transifex * Remove compat cfg wrapper * compute: fix unknown flavor handling * Allow empty dict as metaquery param for sqlalchemy * Add glossary definitions for additional terms * Support different publisher interval * Fix message envelope keys * Revert recent rpc wire format changes * Document the rules for units * Fix a bug in compute manager test case * plugin: don't use @staticmethod with abc * Support list/tuple as meter message value * Imported Translations from Transifex * Update common to get new kombu serialization code * Disable notifier tests * pipeline: manager publish multiple counters * Imported Translations from Transifex * Use oslo-config-2013.1b3 * mongodb: make count an integer explicitely * tests: allow to run API tests on live db * Update to latest oslo-version * Imported Translations from Transifex * Add directive to MANIFEST.in to include all the html files * Use join\_consumer\_pool() for notifications * Update openstack.common * Add period support in storage drivers and API * Update openstack/common tree * storage: fix mongo live tests * swift: configure RPC service correctly * Fix tox python version for Folsom * api: use delta\_seconds() * transformer: add acculumator transformer * Import service when cfg.CONF.os\_\* is used * pipeline: flush after publishing call * plugin: format docstring as rst * Use Mongo finalize to compute avg and duration * Code cleanup, remove useless import * api: fix a test * compute: fix notifications test * Move counter\_source definition * Allow to publish several counters in a row * Fixed resource api in v2-api * Update meter publish with pipeline framework * Use the same Keystone client instance for pollster * pipeline: fix format error in logging * More robust mocking of nova conductor * Mock more conductor API methods to unblock tests * Update pollsters to return counter list * Update V2 API documentation * Added hacking.py support to pep8 portion of tox * setup: fix typo in package data * Fix formatting issue with v1 API parameters * Multiple publisher pipeline framework * Remove setuptools\_git from setup\_requires * Removed unused param for get\_counters() * Use WSME 0.5b1 * Factorize agent code * Fixed the TemplateNotFound error in v1 api * Ceilometer-api is crashing due to pecan module missing * Clean class variable in compute manager test case * Update nova notifier test after nova change * Fix documentation formatting issues * Simplify ceilometer-api and checks Keystone middleware parsing * Fix nova conf compute\_manager unavailable * Rename run\_tests.sh to wrap\_nosetests.sh * Update openstack.common * Corrected get\_raw\_event() in sqlalchemy * Higher level test for db backends * Remove useless imports * Flatten the v2 API * Update v2 API for WSME code reorg * Update WebOb version specification * Remove the ImageSizePollster * Add Kwapi pollster (energy monitoring) * Fixes a minor documentation typo * Peg the version of Ming used in tests * Update pep8 to 1.3.3 * Remove leftover useless import * Enhance policy test for init() * Provide the meters unit's in /meters * Fix keystoneclient auth\_token middleware changes * policy: fix policy\_file finding * Remove the \_initialize\_config\_options * Add pyflakes * Make the v2 API date query parameters consistent * Fix test blocking issue and pin docutils version * Apply the official OpenStack stylesheets and templates to the Doc build * Fixed erroneous source filter in SQLAlchemy * Fix warnings in the documentation build * Handle finish and revert resize notifications * Add support for Folsom version of Swift * Implement user-api * Add support for Swift incoming/outgoing trafic metering * Pass a dict configuration file to auth\_keystone * Import only once in nova\_notifier * Fix MySQL charset error * Use default configuration file to make test data * Fix Glance control exchange * Move back api-v1 to the main api * Fix WSME arguments handling change * Remove useless gettext call in sql engine * Ground work for transifex-ify ceilometer * Add instance\_type information to NetPollster * Fix dbsync API change * Fix image\_id in instance resource metadata * Instantiate inspector in compute manager * remove direct nova db access from ceilometer * Make debugging the wsme app a bit easier * Implements database upgrade as storage engine independent * Fix the v1 api importing of acl * Add the ability to filter on metadata * Virt inspector directly layered over hypervisor API * Move meter.py into collector directory * Change mysql schema from latin1 to utf8 * Change default os-username to 'ceilometer' * Restore some metadata to the events and resources * Update documentation URL * Add sql db option to devstack for ceilometer * Remove debug print in V2 API * Start updating documentation for V2 API * Implement V2 API with Pecan and WSME * Move v1 API files into a subdirectory * Add test storage driver * Implement /meters to make discovery "nicer" from the client * Fix sqlalchemy for show\_data and v1 web api * Implement object store metering * Make Impl of mongodb and sqlalchemy consistent * add migration migrate.cfg file to the python package * Fixes to enable the jenkins doc job to work * Lower the minimum required version of anyjson * Fix blocking test for nova notifier * network: remove left-over useless nova import * tools: set novaclient minimum version * libvirt: fix Folsom compatibility * Lower pymongo dependency * Remove rickshaw subproject * Remove unused rpc import * Adapted to nova's compute\_driver moving * doc: fix cpu counter unit * tools: use tarballs rather than git for Folsom tests * Used auth\_token middleware from keystoneclient * Remove cinderclient dependency * Fix latest nova changes * api: replace minified files by complete version * Add Folsom tests to tox * Handle nova.flags removal * Provide default configuration file * Fix mysql\_engine option type * Remove nova.flags usage * api: add support for timestamp in \_list\_resources() * api: add timestamp interval support in \_list\_events() * tests: simplify api list\_resources * Update openstack.common(except policy) * Adopted the oslo's rpc.Service change * Use libvirt num\_cpu for CPU utilization calculation * Remove obsolete reference to instance.vcpus * Change references of /etc/ceilometer-{agent,collector}.conf to /etc/ceilometer/ceilometer.conf * Determine instance cores from public flavors API * Determine flavor type from the public nova API * Add comment about folsom compatibility change * Add keystone requirement for doc build * Avoid TypeError when loading libvirt.LibvirtDriver * Don't re-import flags and do parse\_args instead of flags.FLAGS() * doc: rename stackforge to openstack * Fix pymongo requirements * Update .gitreview for openstack * Update use of nova config to work with folsom * compute: remove get\_disks work-around * Use openstack versioning * Fix documentation build * document utc naive timestamp * Remove database access from agent pollsters * Fix merge error in central/manager.py * Fix nova config parsing * pollster trap error due to zero floating ip * Use the service.py in openstack-common * Allow no configured sources, provide a default file * Add service.py from openstack-common * Update common (except policy) * nova fake libvirt library breaking tests * Move db access out into a seperate file * Remove invalid fixme comments * Add new cpu\_util meter recording CPU utilization % * Fix TypeError from old-style publish\_counter calls * Fix auth middleware configuration * pin sqlalchemy to 0.7.x but not specifically 0.7.8 * add mongo index names * set tox to ignore global packages * Provide a way to disable some plugins * Use stevedore to load all plugins * implement get\_volume\_max for sqlalchemy * Add basic text/html renderer * network: floating IP account in Quantum * add unit test for CPUPollster * Clean up context usage * Add dependencies on clients used by pollsters * add ceilometer-send-counter * Update openstack.common.cfg * Fix tests broken by API change with Counter class * api: add source detail retrieval * Set source at publish time * Instance pollster emits instance. meter * timestamp columns in sqlalchemy not timezone aware * Remove obsolete/incorrect install instructions * network: emit router meter * Fix sqlalchemy performance problem * Added a working release-bugs.py script to tools/ * Change default API port * sqlalchemy record\_meter merge objs not string * Use glance public API as opposed to registry API * Add OpenStack trove classifier for PyPI * bump version number to 0.2 0.1 --- * Nova libvirt release note * Update metadata for PyPI registration * tox: add missing venv * Fixes a couple typos * Counter renaming * Set correct timestamp on floatingip counter * Fix API change in make\_test\_data.py * Fix Nova URL in doc * Some more doc fixes * Ignore instances in the ERROR state * Use the right version number in documentation * doc: fix network.\*.\* resource id * image: handle glance delete notifications * image: handle glance upload notifications * image: add update event, fix ImageServe owner * network: fix create/update counter type & doc * Assorted doc fixes * add max/sum project volume and fix tests * Add general options * compute.libvirt: split read/write counters * API: add Keystone ACL and policy support * Add documentation for configuration options * network: do not emit counter on exists event, fix resource id * Move net function in class method and fix instance id * Prime counter table * Fix the configuration for the nova notifier * Initialize the control\_exchange setting * Set version 0.1 * Make the instance counters use the same type * Restore manual install documentation * add quantum release note * Add release notes to docs * Update readme and create release notes * Remove duration field in Counter * Add counter for number of packets per vif * Move instance counter into its own pollster * Add a request counter for instance I/O * Rename instance disk I/O counter * Rename instances network counters * Use constant rather than string from counter type * Update the architecture diagram * Increase default polling interval * Fix compute agent publishing call * network: listen for Quantum exists event * Correct requirements filename * Fix notification subscription logic * Fix quantum notification subscriptions * Split meter publishing from the global config obj * network: add counter for actions * network: listen for Quantum notifications * Rename absolute to gauge * Fix typo in control exchanges help texts * Rework RPC notification mechanism * Update packaging files * Update URL list * Update openstack.common * Add volume/sum API endpoint for resource meters * Add resource volume/max api call * Fix dependency on anyjson * Listen for volume.delete.start instead of end * implement sqlalchemy dbengine backend * Add a notification handler for image downloads * Allow glance pollster tests to run * Create tox env definition for using a live db * Picking up dependencies from pip-requires file * Specify a new queue in manager * Rework RPC connection * Stop using nova's rpc module * Add configuration script to turn on notifications * Pep8 fixes, implement pep8 check on tests subdir * Use standard CLI options & env vars for creds * compute: remove get\_metadata\_from\_event() * Listen for volume notifications * Add pollster for Glance * Fix Nova notifier test case * Fix nova flag parsing * Add nova\_notifier notification driver for nova * Split instance polling code * Use stevedore to load storage engine drivers * Implement duration calculation API * Create tool for generating test meter data * Update openstack-common code to latest * Add bin/ceilometer-api for convenience * Add local copy of architecture diagram * Add timestamp parameters to the API docs * Check for doc build dependency before building * Pollster for network internal traffic (n1,n2) * Fix PEP8 issues * Add archicture diagram to documentation * added mongodb auth * Change timestamp management for resources * Log the instance causing the error when a pollster fails * Document how to install with devstack * Remove test skipping logic * Remove dependency on nova test modules * Add date range parameters to resource API * Add setuptools-git support * Add separate notification handler for instance flavor * Change instance meter type * Split the existing notification handlers up * Remove redundancy in the API * Separate the tox coverage test setup from py27 * Do not require user or project argument for event query * Add pymongo dependency for readthedocs.org build * Update openstack.common * Add API documentation * Be explicit about test dir * Add list projects API * Sort list of users and projects returned from queries * Add project arg to event and resource queries * Fix "meter" literal in event list API * collector exception on record\_metering\_data * Add API endpoint for listing raw event data * Change compute pollster API to work on one instance at a time * Create "central" agent * Skeleton for API server * fix use of source value in mongdb driver * Add {root,ephemeral}\_disk\_size counters * Implements vcpus counter * Fix nova configuration loading * Implements memory counter * Fix and document counter types * Check compute driver using new flag * Add openstack.common.{context,notifier,log} and update .rpc * Update review server link * Add link to roadmap * Add indexes to MongoDB driver * extend developer documentation * Reset the correct nova dependency URL * Switch .gitreview to use OpenStack gerrit * Add MongoDB engine * Convert timestamps to datetime objects before storing * Reduce complexity of storage engine API * Remove usage of nova.log * Documentation edits: * fix typo in instance properties list * Add Sphinx wrapper around existing docs * Configure nova.flags as well as openstack.common.cfg * First draft of plugin/agent documentation. Fixes bug 1018311 * Essex: update Nova to 2012.1.1, add python-novaclient * Split service preparation, periodic interval configurable * Use the same instance metadata everywhere * Emit meter event for instance "exists" * Start defining DB engine API * Fallback on nova.rpc for Essex * Add instance metadata from notification events * Combined fix to get past broken state of repo * Add more metadata to instance counter * Register storage options on import * Add Essex tests * log more than ceilometer * Remove event\_type field from meter messages * fix message signatures for nested dicts * Remove nova.flags usage * Copy openstack.common.cfg * check message signatures in the collector * Sketch out a plugin system for saving metering data * refactor meter event publishing code * Add and use ceilometer own log module * add counter type field * Use timestamp instead of datetime when creating Counter * Use new flag API * Fix a PEP8 error * Make the stand-alone test script mimic tox * Remove unneeded eventlet test requirement * Add listeners for other instance-related events * Add tox configuration * Use openstack.common.cfg for ceilometer options * Publish and receive metering messages * Add floating IP pollster * Fix tests based on DB by importing nova.tests * make the pollsters in the agent plugins * Build ceilometer-agent and ceilometer-collector * Add plugin support to the notification portion of the collector daemon * Add CPU time fetching * Add an example function for converting a nova notification to a counter * add a tool for recording notifications and replaying them * Add an exception handler to deal with errors that occur when the info in nova is out of sync with reality (as on my currently broken system). Also adds a nova prefix to the logger for now so messages from this module make it into the log file * Periodically fetch for disk io stats * Use nova.service, add a manager class * Change license to Apache 2.0 * Add setup.py * Import ceilometer-nova-compute * Ignore pyc files * Add link to blueprint * Add .gitreview file * initial commit ceilometer-10.0.0/etc/0000775000175100017510000000000013236733440014524 5ustar zuulzuul00000000000000ceilometer-10.0.0/etc/ceilometer/0000775000175100017510000000000013236733440016654 5ustar zuulzuul00000000000000ceilometer-10.0.0/etc/ceilometer/examples/0000775000175100017510000000000013236733440020472 5ustar zuulzuul00000000000000ceilometer-10.0.0/etc/ceilometer/examples/osprofiler_event_definitions.yaml0000666000175100017510000000130213236733243027335 0ustar zuulzuul00000000000000--- - event_type: profiler.* traits: project: fields: payload.project service: fields: payload.service name: fields: payload.name base_id: fields: payload.base_id trace_id: fields: payload.trace_id parent_id: fields: payload.parent_id timestamp: fields: payload.timestamp host: fields: payload.info.host path: fields: payload.info.request.path query: fields: payload.info.request.query method: fields: payload.info.request.method scheme: fields: payload.info.request.scheme db.statement: fields: payload.info.db.statement db.params: fields: payload.info.db.params ceilometer-10.0.0/etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml0000666000175100017510000002137513236733243030176 0ustar zuulzuul00000000000000metric: # LBaaS V2 - name: "loadbalancer.create" event_type: - "loadbalancer.create.end" type: "delta" unit: "loadbalancer" volume: 1 resource_id: $.payload.loadbalancer.id project_id: $.payload.loadbalancer.tenant_id metadata: name: $.payload.loadbalancer.name description: $.payload.loadbalancer.description listeners: $.payload.loadbalancer.listeners operating_status: $.payload.loadbalancer.operating_status vip_address: $.payload.loadbalancer.vip_address vip_subnet_id: $.payload.loadbalancer.vip_subnet_id admin_state_up: $.payload.loadbalancer.admin_state_up - name: "loadbalancer.update" event_type: - "loadbalancer.update.end" type: "delta" unit: "loadbalancer" volume: 1 resource_id: $.payload.loadbalancer.id project_id: $.payload.loadbalancer.tenant_id metadata: name: $.payload.loadbalancer.name description: $.payload.loadbalancer.description listeners: $.payload.loadbalancer.listeners operating_status: $.payload.loadbalancer.operating_status vip_address: $.payload.loadbalancer.vip_address vip_subnet_id: $.payload.loadbalancer.vip_subnet_id admin_state_up: $.payload.loadbalancer.admin_state_up - name: "loadbalancer.delete" event_type: - "loadbalancer.delete.end" type: "delta" unit: "loadbalancer" volume: 1 resource_id: $.payload.loadbalancer.id project_id: $.payload.loadbalancer.tenant_id metadata: name: $.payload.loadbalancer.name description: $.payload.loadbalancer.description listeners: $.payload.loadbalancer.listeners operating_status: $.payload.loadbalancer.operating_status vip_address: $.payload.loadbalancer.vip_address vip_subnet_id: $.payload.loadbalancer.vip_subnet_id admin_state_up: $.payload.loadbalancer.admin_state_up - name: "listener.create" event_type: - "listener.create.end" type: "delta" unit: "listener" volume: 1 resource_id: $.payload.listener.id project_id: $.payload.listener.tenant_id metadata: name: $.payload.listener.name description: $.payload.listener.description admin_state_up: $.payload.listener.admin_state_up loadbalancers: $.payload.listener.loadbalancers default_pool_id: $.payload.listener.default_pool_id protocol: $.payload.listener.protocol connection_limit: $.payload.listener.connection_limit - name: "listener.update" event_type: - "listener.update.end" type: "delta" unit: "listener" volume: 1 resource_id: $.payload.listener.id project_id: $.payload.listener.tenant_id metadata: name: $.payload.listener.name description: $.payload.listener.description admin_state_up: $.payload.listener.admin_state_up loadbalancers: $.payload.listener.loadbalancers default_pool_id: $.payload.listener.default_pool_id protocol: $.payload.listener.protocol connection_limit: $.payload.listener.connection_limit - name: "listener.delete" event_type: - "listener.delete.end" type: "delta" unit: "listener" volume: 1 resource_id: $.payload.listener.id project_id: $.payload.listener.tenant_id metadata: name: $.payload.listener.name description: $.payload.listener.description admin_state_up: $.payload.listener.admin_state_up loadbalancers: $.payload.listener.loadbalancers default_pool_id: $.payload.listener.default_pool_id protocol: $.payload.listener.protocol connection_limit: $.payload.listener.connection_limit - name: "healthmonitor.create" event_type: - "healthmonitor.create.end" type: "delta" unit: "healthmonitor" volume: 1 resource_id: $.payload.healthmonitor.id project_id: $.payload.healthmonitor.tenant_id metadata: name: $.payload.healthmonitor.name description: $.payload.healthmonitor.description admin_state_up: $.payload.healthmonitor.admin_state_up max_retries: $.payload.healthmonitor.max_retries delay: $.payload.healthmonitor.delay timeout: $.payload.healthmonitor.timeout pools: $.payload.healthmonitor.pools type: $.payload.healthmonitor.type - name: "healthmonitor.update" event_type: - "healthmonitor.update.end" type: "delta" unit: "healthmonitor" volume: 1 resource_id: $.payload.healthmonitor.id project_id: $.payload.healthmonitor.tenant_id metadata: name: $.payload.healthmonitor.name description: $.payload.healthmonitor.description admin_state_up: $.payload.healthmonitor.admin_state_up max_retries: $.payload.healthmonitor.max_retries delay: $.payload.healthmonitor.delay timeout: $.payload.healthmonitor.timeout pools: $.payload.healthmonitor.pools type: $.payload.healthmonitor.type - name: "healthmonitor.delete" event_type: - "healthmonitor.delete.end" type: "delta" unit: "healthmonitor" volume: 1 resource_id: $.payload.healthmonitor.id project_id: $.payload.healthmonitor.tenant_id metadata: name: $.payload.healthmonitor.name description: $.payload.healthmonitor.description admin_state_up: $.payload.healthmonitor.admin_state_up max_retries: $.payload.healthmonitor.max_retries delay: $.payload.healthmonitor.delay timeout: $.payload.healthmonitor.timeout pools: $.payload.healthmonitor.pools type: $.payload.healthmonitor.type - name: "pool.create" event_type: - "pool.create.end" type: "delta" unit: "pool" volume: 1 resource_id: $.payload.pool.id project_id: $.payload.pool.tenant_id metadata: name: $.payload.pool.name description: $.payload.pool.description admin_state_up: $.payload.pool.admin_state_up lb_method: $.payload.pool.lb_method protocol: $.payload.pool.protocol subnet_id: $.payload.pool.subnet_id vip_id: $.payload.pool.vip_id status: $.payload.pool.status status_description: $.payload.pool.status_description - name: "pool.update" event_type: - "pool.update.end" type: "delta" unit: "pool" volume: 1 resource_id: $.payload.pool.id project_id: $.payload.pool.tenant_id metadata: name: $.payload.pool.name description: $.payload.pool.description admin_state_up: $.payload.pool.admin_state_up lb_method: $.payload.pool.lb_method protocol: $.payload.pool.protocol subnet_id: $.payload.pool.subnet_id vip_id: $.payload.pool.vip_id status: $.payload.pool.status status_description: $.payload.pool.status_description - name: "pool.delete" event_type: - "pool.delete.end" type: "delta" unit: "pool" volume: 1 resource_id: $.payload.pool.id project_id: $.payload.pool.tenant_id metadata: name: $.payload.pool.name description: $.payload.pool.description admin_state_up: $.payload.pool.admin_state_up lb_method: $.payload.pool.lb_method protocol: $.payload.pool.protocol subnet_id: $.payload.pool.subnet_id vip_id: $.payload.pool.vip_id status: $.payload.pool.status status_description: $.payload.pool.status_description - name: "member.create" event_type: - "member.create.end" type: "delta" unit: "member" volume: 1 resource_id: $.payload.member.id project_id: $.payload.member.tenant_id metadata: address: $.payload.member.address status: $.payload.member.status status_description: $.payload.member.status_description weight: $.payload.member.weight admin_state_up: $.payload.member.admin_state_up protocol_port: $.payload.member.protocol_port pool_id: $.payload.member.pool_id - name: "member.update" event_type: - "member.update.end" type: "delta" unit: "member" volume: 1 resource_id: $.payload.member.id project_id: $.payload.member.tenant_id metadata: address: $.payload.member.address status: $.payload.member.status status_description: $.payload.member.status_description weight: $.payload.member.weight admin_state_up: $.payload.member.admin_state_up protocol_port: $.payload.member.protocol_port pool_id: $.payload.member.pool_id - name: "member.delete" event_type: - "member.delete.end" type: "delta" unit: "member" volume: 1 resource_id: $.payload.member.id project_id: $.payload.member.tenant_id metadata: address: $.payload.member.address status: $.payload.member.status status_description: $.payload.member.status_description weight: $.payload.member.weight admin_state_up: $.payload.member.admin_state_up protocol_port: $.payload.member.protocol_port pool_id: $.payload.member.pool_id ceilometer-10.0.0/etc/ceilometer/polling.yaml0000666000175100017510000000151113236733243021205 0ustar zuulzuul00000000000000--- sources: - name: some_pollsters interval: 300 meters: - cpu - cpu_l3_cache - memory.usage - network.incoming.bytes - network.incoming.packets - network.outgoing.bytes - network.outgoing.packets - disk.device.read.bytes - disk.device.read.requests - disk.device.write.bytes - disk.device.write.requests - hardware.cpu.util - hardware.memory.used - hardware.memory.total - hardware.memory.buffer - hardware.memory.cached - hardware.memory.swap.avail - hardware.memory.swap.total - hardware.system_stats.io.outgoing.blocks - hardware.system_stats.io.incoming.blocks - hardware.network.ip.incoming.datagrams - hardware.network.ip.outgoing.datagrams ceilometer-10.0.0/etc/ceilometer/polling_all.yaml0000666000175100017510000000012713236733243022037 0ustar zuulzuul00000000000000--- sources: - name: all_pollsters interval: 300 meters: - "*" ceilometer-10.0.0/etc/ceilometer/rootwrap.d/0000775000175100017510000000000013236733440020753 5ustar zuulzuul00000000000000ceilometer-10.0.0/etc/ceilometer/rootwrap.d/ipmi.filters0000666000175100017510000000036013236733243023305 0ustar zuulzuul00000000000000# ceilometer-rootwrap command filters for IPMI capable nodes # This file should be owned by (and only-writeable by) the root user [Filters] # ceilometer/ipmi/nodemanager/node_manager.py: 'ipmitool' ipmitool: CommandFilter, ipmitool, root ceilometer-10.0.0/etc/ceilometer/rootwrap.conf0000666000175100017510000000172713236733243021412 0ustar zuulzuul00000000000000# Configuration for ceilometer-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/ceilometer/rootwrap.d,/usr/share/ceilometer/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, user0, user1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR ceilometer-10.0.0/etc/ceilometer/ceilometer-config-generator.conf0000666000175100017510000000035013236733243025103 0ustar zuulzuul00000000000000[DEFAULT] output_file = etc/ceilometer/ceilometer.conf wrap_width = 79 namespace = ceilometer namespace = ceilometer-auth namespace = oslo.concurrency namespace = oslo.log namespace = oslo.messaging namespace = oslo.service.service ceilometer-10.0.0/README.rst0000666000175100017510000000224013236733243015441 0ustar zuulzuul00000000000000========== Ceilometer ========== -------- Overview -------- Ceilometer is a data collection service that collects event and metering data by monitoring notifications sent from OpenStack services. It publishes collected data to various targets including data stores and message queues. Ceilometer is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. ------------- Documentation ------------- Release notes are available at https://releases.openstack.org/teams/telemetry.html Developer documentation is available at https://docs.openstack.org/ceilometer/latest/ Launchpad Projects ------------------ - Server: https://launchpad.net/ceilometer Code Repository --------------- - Server: https://github.com/openstack/ceilometer Bug Tracking ------------ - Bugs: https://bugs.launchpad.net/ceilometer IRC --- IRC Channel: #openstack-telemetry on `Freenode`_. Mailinglist ----------- Project use http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev as the mailinglist. Please use tag ``[Ceilometer]`` in the subject for new threads. .. _Freenode: https://freenode.net/ ceilometer-10.0.0/test-requirements.txt0000666000175100017510000000135413236733243020220 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. coverage>=3.6 # Apache-2.0 fixtures<2.0,>=1.3.1 # Apache-2.0/BSD mock>=1.2 # BSD os-win>=0.2.3 # Apache-2.0 # Docs Requirements openstackdocstheme>=1.11.0 # Apache-2.0 reno>=1.6.2 # Apache2 oslotest>=2.15.0 # Apache-2.0 oslo.vmware>=1.16.0 # Apache-2.0 pyOpenSSL>=0.14 # Apache-2.0 sphinx>=1.6.2 # BSD testrepository>=0.0.18 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT gabbi>=1.30.0 # Apache-2.0 requests-aws>=0.1.4 # BSD License (3 clause) os-testr>=0.4.1 # Apache-2.0 kafka-python>=1.3.2 # Apache-2.0 ceilometer-10.0.0/setup.py0000666000175100017510000000200413236733243015462 0ustar zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) ceilometer-10.0.0/MAINTAINERS0000666000175100017510000000065513236733243015457 0ustar zuulzuul00000000000000= Generalist Code Reviewers = The current members of ceilometer-core are listed here: https://launchpad.net/~ceilometer-drivers/+members#active This group can +2 and approve patches in Ceilometer. However, they may choose to seek feedback from the appropriate specialist maintainer before approving a patch if it is in any way controversial or risky. = IRC handles of maintainers = gordc jd__ lhx liusheng llu pradk sileht ceilometer-10.0.0/CONTRIBUTING.rst0000666000175100017510000000106513236733243016417 0ustar zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/ceilometer ceilometer-10.0.0/LICENSE0000666000175100017510000002363713236733243014774 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ceilometer-10.0.0/.mailmap0000666000175100017510000000370613236733243015403 0ustar zuulzuul00000000000000# Format is: # # Adam Gandelman Alan Pevec Alexei Kornienko ChangBo Guo(gcb) Chang Bo Guo Chinmaya Bharadwaj chinmay Clark Boylan Doug Hellmann Fei Long Wang Fengqian Gao Fengqian Fengqian Gao Fengqian.Gao Gordon Chung gordon chung Gordon Chung Gordon Chung Gordon Chung gordon chung Ildiko Vancsa Ildiko John H. Tran John Tran Julien Danjou LiuSheng liu-sheng Mehdi Abaakouk Nejc Saje Nejc Saje Nicolas Barcet (nijaba) Pádraig Brady Rich Bowen Sandy Walsh Sascha Peilicke Sean Dague Shengjie Min shengjie-min Shuangtai Tian shuangtai Swann Croiset ZhiQiang Fan ceilometer-10.0.0/playbooks/0000775000175100017510000000000013236733440015754 5ustar zuulzuul00000000000000ceilometer-10.0.0/playbooks/legacy/0000775000175100017510000000000013236733440017220 5ustar zuulzuul00000000000000ceilometer-10.0.0/playbooks/legacy/telemetry-dsvm-integration-ceilometer/0000775000175100017510000000000013236733440026650 5ustar zuulzuul00000000000000ceilometer-10.0.0/playbooks/legacy/telemetry-dsvm-integration-ceilometer/post.yaml0000666000175100017510000000455113236733243030531 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*nose_results.html - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testr_results.html.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.testrepository/tmp* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testrepository.subunit.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}/tox' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.tox/*/log/* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ceilometer-10.0.0/playbooks/legacy/telemetry-dsvm-integration-ceilometer/run.yaml0000666000175100017510000000512213236733243030343 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-telemetry-dsvm-integration-ceilometer from old job gate-telemetry-dsvm-integration-ceilometer-ubuntu-xenial tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_HEAT=1 export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_EXERCISES=0 export DEVSTACK_GATE_INSTALL_TESTONLY=1 export DEVSTACK_GATE_TEMPEST_NOTESTS=1 export PROJECTS="openstack/ceilometer openstack/aodh openstack/panko openstack/telemetry-tempest-plugin" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin aodh git://git.openstack.org/openstack/aodh" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin heat git://git.openstack.org/openstack/heat" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin telemetry-tempest-plugin git://git.openstack.org/openstack/telemetry-tempest-plugin" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_BACKEND=gnocchi" export DEVSTACK_LOCAL_CONFIG+=$'\n'"GNOCCHI_ARCHIVE_POLICY=high" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_PIPELINE_INTERVAL=15" export DEVSTACK_PROJECT_FROM_GIT=$ZUUL_SHORT_PROJECT_NAME function post_test_hook { cd /opt/stack/new/telemetry-tempest-plugin/telemetry_tempest_plugin/integration/hooks/ ./post_test_hook.sh } export -f post_test_hook cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ceilometer-10.0.0/playbooks/legacy/grenade-dsvm-ceilometer/0000775000175100017510000000000013236733440023722 5ustar zuulzuul00000000000000ceilometer-10.0.0/playbooks/legacy/grenade-dsvm-ceilometer/post.yaml0000666000175100017510000000063313236733243025600 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ceilometer-10.0.0/playbooks/legacy/grenade-dsvm-ceilometer/run.yaml0000666000175100017510000000315413236733243025420 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-grenade-dsvm-ceilometer from old job gate-grenade-dsvm-ceilometer-ubuntu-xenial tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PROJECTS="openstack-dev/grenade $PROJECTS" export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_GRENADE=pullup export BRANCH_OVERRIDE=default if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi export GRENADE_PLUGINRC="enable_grenade_plugin ceilometer https://git.openstack.org/openstack/ceilometer" export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_BACKEND=none" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ceilometer-10.0.0/run-tests.sh0000777000175100017510000000035413236733243016261 0ustar zuulzuul00000000000000#!/bin/bash set -e set -x echo echo "OS_TEST_PATH: $OS_TEST_PATH" echo "CEILOMETER_TEST_DEBUG: $CEILOMETER_TEST_DEBUG" echo if [ "$CEILOMETER_TEST_DEBUG" == "True" ]; then oslo_debug_helper $* else ./tools/pretty_tox.sh $* fi ceilometer-10.0.0/.testr.conf0000666000175100017510000000074313236733243016046 0ustar zuulzuul00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-600} \ ${PYTHON:-python} -m subunit.run discover ${OS_TEST_PATH:-./ceilometer/tests} -t . $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list # NOTE(chdent): Only used/matches on gabbi-related tests. group_regex=(gabbi\.(suitemaker|driver)\.test_gabbi_(?:prefix_|)[^_]+)_ ceilometer-10.0.0/bindep.txt0000666000175100017510000000027013236733243015755 0ustar zuulzuul00000000000000libxml2-dev [platform:dpkg test] libxslt-devel [platform:rpm test] libxslt1-dev [platform:dpkg test] build-essential [platform:dpkg] libffi-dev [platform:dpkg] gettext [platform:dpkg]