ceilometer-6.0.0/ 0000775 0005670 0005671 00000000000 12701406364 015011 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/devstack/ 0000775 0005670 0005671 00000000000 12701406364 016615 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/devstack/upgrade/ 0000775 0005670 0005671 00000000000 12701406364 020244 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/devstack/upgrade/shutdown.sh 0000775 0005670 0005671 00000001211 12701406223 022443 0 ustar jenkins jenkins 0000000 0000000 #!/bin/bash
#
#
set -o errexit
source $GRENADE_DIR/grenaderc
source $GRENADE_DIR/functions
source $BASE_DEVSTACK_DIR/functions
source $BASE_DEVSTACK_DIR/stackrc # needed for status directory
source $BASE_DEVSTACK_DIR/lib/tls
source $BASE_DEVSTACK_DIR/lib/apache
# Locate the ceilometer plugin and get its functions
CEILOMETER_DEVSTACK_DIR=$(dirname $(dirname $0))
source $CEILOMETER_DEVSTACK_DIR/plugin.sh
set -o xtrace
stop_ceilometer
# ensure everything is stopped
SERVICES_DOWN="ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api"
ensure_services_stopped $SERVICES_DOWN
ceilometer-6.0.0/devstack/upgrade/settings 0000664 0005670 0005671 00000001054 12701406223 022021 0 ustar jenkins jenkins 0000000 0000000 register_project_for_upgrade ceilometer
devstack_localrc base enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer
devstack_localrc base enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api tempest
devstack_localrc target enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer
devstack_localrc target enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api tempest
ceilometer-6.0.0/devstack/upgrade/upgrade.sh 0000775 0005670 0005671 00000005735 12701406224 022237 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env bash
# ``upgrade-ceilometer``
echo "*********************************************************************"
echo "Begin $0"
echo "*********************************************************************"
# Clean up any resources that may be in use
cleanup() {
set +o errexit
echo "*********************************************************************"
echo "ERROR: Abort $0"
echo "*********************************************************************"
# Kill ourselves to signal any calling process
trap 2; kill -2 $$
}
trap cleanup SIGHUP SIGINT SIGTERM
# Keep track of the grenade directory
RUN_DIR=$(cd $(dirname "$0") && pwd)
# Source params
source $GRENADE_DIR/grenaderc
# Import common functions
source $GRENADE_DIR/functions
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Save mongodb state (replace with snapshot)
# TODO(chdent): There used to be a 'register_db_to_save ceilometer'
# which may wish to consider putting back in.
if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then
mongodump --db ceilometer --out $SAVE_DIR/ceilometer-dump.$BASE_RELEASE
fi
# Upgrade Ceilometer
# ==================
# Locate ceilometer devstack plugin, the directory above the
# grenade plugin.
CEILOMETER_DEVSTACK_DIR=$(dirname $(dirname $0))
# Get functions from current DevStack
source $TARGET_DEVSTACK_DIR/functions
source $TARGET_DEVSTACK_DIR/stackrc
source $TARGET_DEVSTACK_DIR/lib/apache
# Get ceilometer functions from devstack plugin
source $CEILOMETER_DEVSTACK_DIR/settings
# Print the commands being run so that we can see the command that triggers
# an error.
set -o xtrace
# Install the target ceilometer
source $CEILOMETER_DEVSTACK_DIR/plugin.sh stack install
# calls upgrade-ceilometer for specific release
upgrade_project ceilometer $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH
# Migrate the database
# NOTE(chdent): As we evolve BIN_DIR is likely to be defined, but
# currently it is not.
CEILOMETER_BIN_DIR=$(dirname $(which ceilometer-dbsync))
$CEILOMETER_BIN_DIR/ceilometer-dbsync || die $LINENO "DB sync error"
# Start Ceilometer
start_ceilometer
# Note these are process names, not service names
ensure_services_started "ceilometer-polling --polling-namespaces compute" \
"ceilometer-polling --polling-namespaces central" \
"ceilometer-polling --polling-namespaces ipmi" \
ceilometer-agent-notification \
ceilometer-api \
ceilometer-collector
# Save mongodb state (replace with snapshot)
if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then
mongodump --db ceilometer --out $SAVE_DIR/ceilometer-dump.$TARGET_RELEASE
fi
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End $0"
echo "*********************************************************************"
ceilometer-6.0.0/devstack/settings 0000664 0005670 0005671 00000004274 12701406224 020402 0 ustar jenkins jenkins 0000000 0000000 # turn on all the ceilometer services by default
# Pollsters
enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi
# Notification Agent
enable_service ceilometer-anotification
# Data Collector
enable_service ceilometer-collector
# API service
enable_service ceilometer-api
# Default directories
CEILOMETER_DIR=$DEST/ceilometer
CEILOMETER_CONF_DIR=/etc/ceilometer
CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf
CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api
CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer}
CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer}
# Set up database backend
CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql}
# Gnocchi default archive_policy for Ceilometer
GNOCCHI_ARCHIVE_POLICY=${GNOCCHI_ARCHIVE_POLICY:-low}
# Ceilometer connection info.
CEILOMETER_SERVICE_PROTOCOL=http
CEILOMETER_SERVICE_HOST=$SERVICE_HOST
CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777}
CEILOMETER_USE_MOD_WSGI=${CEILOMETER_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}}
# To enable OSprofiler change value of this variable to "notifications,profiler"
CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications}
CEILOMETER_EVENTS=${CEILOMETER_EVENTS:-True}
CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-redis://localhost:6379}
CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-}
# Cache Options
# NOTE(cdent): These are incomplete and specific for this testing.
CEILOMETER_CACHE_BACKEND=${CEILOMETER_CACHE_BACKEND:-dogpile.cache.redis}
CEILOMETER_CACHE_URL=${CEILOMETER_CACHE_URL:-redis://localhost:6379}
CEILOMETER_EVENT_ALARM=${CEILOMETER_EVENT_ALARM:-False}
# Tell Tempest this project is present
TEMPEST_SERVICES+=,ceilometer
# Set up default directories for client and middleware
GITREPO["python-ceilometerclient"]=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git}
GITBRANCH["python-ceilometerclient"]=${CEILOMETERCLIENT_BRANCH:-master}
GITDIR["python-ceilometerclient"]=$DEST/python-ceilometerclient
GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware
# Get rid of this before done.
# Tell emacs to use shell-script-mode
## Local variables:
## mode: shell-script
## End:
ceilometer-6.0.0/devstack/apache-ceilometer.template 0000664 0005670 0005671 00000000762 12701406223 023720 0 ustar jenkins jenkins 0000000 0000000 Listen %PORT%
WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV%
WSGIProcessGroup ceilometer-api
WSGIScriptAlias / %WSGIAPP%
WSGIApplicationGroup %{GLOBAL}
= 2.4>
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/%APACHE_NAME%/ceilometer.log
CustomLog /var/log/%APACHE_NAME%/ceilometer_access.log combined
WSGISocketPrefix /var/run/%APACHE_NAME%
ceilometer-6.0.0/devstack/plugin.sh 0000664 0005670 0005671 00000046765 12701406224 020464 0 ustar jenkins jenkins 0000000 0000000 # Install and start **Ceilometer** service in devstack
#
# To enable Ceilometer in devstack add an entry to local.conf that
# looks like
#
# [[local|localrc]]
# enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer
#
# By default all ceilometer services are started (see
# devstack/settings). To disable a specific service use the
# disable_service function.
#
# NOTE: Currently, there are two ways to get the IPMI based meters in
# OpenStack. One way is to configure Ironic conductor to report those meters
# for the nodes managed by Ironic and to have Ceilometer notification
# agent to collect them. Ironic by default does NOT enable that reporting
# functionality. So in order to do so, users need to set the option of
# conductor.send_sensor_data to true in the ironic.conf configuration file
# for the Ironic conductor service, and also enable the
# ceilometer-anotification service. If you do this disable the IPMI
# polling agent:
#
# disable_service ceilometer-aipmi
#
# The other way is to use Ceilometer ipmi agent only to get the IPMI based
# meters. To avoid duplicated meters, users need to make sure to set the
# option of conductor.send_sensor_data to false in the ironic.conf
# configuration file if the node on which Ceilometer ipmi agent is running
# is also managed by Ironic.
#
# Several variables set in the localrc section adjust common behaviors
# of Ceilometer (see within for additional settings):
#
# CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600.
# CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es')
# CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz.
# CEILOMETER_EVENTS: Set to True to enable event collection
# CEILOMETER_EVENT_ALARM: Set to True to enable publisher for event alarming
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set -o xtrace
# TODO(liusheng) Temporarily add this to avoid integration test failue, see bug1548634
export SERVICE_TENANT_NAME=$SERVICE_PROJECT_NAME
# Support potential entry-points console scripts in VENV or not
if [[ ${USE_VENV} = True ]]; then
PROJECT_VENV["ceilometer"]=${CEILOMETER_DIR}.venv
CEILOMETER_BIN_DIR=${PROJECT_VENV["ceilometer"]}/bin
else
CEILOMETER_BIN_DIR=$(get_python_exec_prefix)
fi
# Test if any Ceilometer services are enabled
# is_ceilometer_enabled
function is_ceilometer_enabled {
[[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0
return 1
}
function ceilometer_service_url {
echo "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT"
}
# _ceilometer_install_mongdb - Install mongodb and python lib.
function _ceilometer_install_mongodb {
# Server package is the same on all
local packages=mongodb-server
if is_fedora; then
# mongodb client
packages="${packages} mongodb"
fi
install_package ${packages}
if is_fedora; then
restart_service mongod
else
restart_service mongodb
fi
# give time for service to restart
sleep 5
}
# _ceilometer_install_redis() - Install the redis server and python lib.
function _ceilometer_install_redis {
if is_ubuntu; then
install_package redis-server
restart_service redis-server
else
# This will fail (correctly) where a redis package is unavailable
install_package redis
restart_service redis
fi
pip_install_gr redis
}
# Configure mod_wsgi
function _ceilometer_config_apache_wsgi {
sudo mkdir -p $CEILOMETER_WSGI_DIR
local ceilometer_apache_conf=$(apache_site_config_for ceilometer)
local apache_version=$(get_apache_version)
local venv_path=""
# Copy proxy vhost and wsgi file
sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app
if [[ ${USE_VENV} = True ]]; then
venv_path="python-path=${PROJECT_VENV["ceilometer"]}/lib/$(python_version)/site-packages"
fi
sudo cp $CEILOMETER_DIR/devstack/apache-ceilometer.template $ceilometer_apache_conf
sudo sed -e "
s|%PORT%|$CEILOMETER_SERVICE_PORT|g;
s|%APACHE_NAME%|$APACHE_NAME|g;
s|%WSGIAPP%|$CEILOMETER_WSGI_DIR/app|g;
s|%USER%|$STACK_USER|g;
s|%VIRTUALENV%|$venv_path|g
" -i $ceilometer_apache_conf
}
# Install required services for coordination
function _ceilometer_prepare_coordination {
if echo $CEILOMETER_COORDINATION_URL | grep -q '^memcached:'; then
install_package memcached
elif [[ "${CEILOMETER_COORDINATOR_URL%%:*}" == "redis" || "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then
_ceilometer_install_redis
fi
}
# Install required services for storage backends
function _ceilometer_prepare_storage_backend {
if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
pip_install_gr pymongo
_ceilometer_install_mongodb
fi
if [ "$CEILOMETER_BACKEND" = 'es' ] ; then
${TOP_DIR}/pkg/elasticsearch.sh download
${TOP_DIR}/pkg/elasticsearch.sh install
fi
}
# Install the python modules for inspecting nova virt instances
function _ceilometer_prepare_virt_drivers {
# Only install virt drivers if we're running nova compute
if is_service_enabled n-cpu ; then
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
pip_install_gr libvirt-python
fi
if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
pip_install_gr oslo.vmware
fi
fi
}
# Create ceilometer related accounts in Keystone
function _ceilometer_create_accounts {
if is_service_enabled ceilometer-api; then
create_service_user "ceilometer" "admin"
get_or_create_service "ceilometer" "metering" "OpenStack Telemetry Service"
get_or_create_endpoint "metering" \
"$REGION_NAME" \
"$(ceilometer_service_url)" \
"$(ceilometer_service_url)" \
"$(ceilometer_service_url)"
if is_service_enabled swift; then
# Ceilometer needs ResellerAdmin role to access Swift account stats.
get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_PROJECT_NAME
fi
fi
}
# Activities to do before ceilometer has been installed.
function preinstall_ceilometer {
echo_summary "Preinstall not in virtualenv context. Skipping."
}
# Remove WSGI files, disable and remove Apache vhost file
function _ceilometer_cleanup_apache_wsgi {
if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
sudo rm -f "$CEILOMETER_WSGI_DIR"/*
sudo rmdir "$CEILOMETER_WSGI_DIR"
sudo rm -f $(apache_site_config_for ceilometer)
fi
}
function _drop_database {
if is_service_enabled ceilometer-collector ceilometer-api ; then
if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
mongo ceilometer --eval "db.dropDatabase();"
elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
curl -XDELETE "localhost:9200/events_*"
fi
fi
}
# cleanup_ceilometer() - Remove residual data files, anything left over
# from previous runs that a clean run would need to clean up
function cleanup_ceilometer {
_ceilometer_cleanup_apache_wsgi
_drop_database
sudo rm -f "$CEILOMETER_CONF_DIR"/*
sudo rmdir "$CEILOMETER_CONF_DIR"
if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "False" ]; then
sudo rm -f "$CEILOMETER_API_LOG_DIR"/*
sudo rmdir "$CEILOMETER_API_LOG_DIR"
fi
}
# Set configuraiton for cache backend.
# NOTE(cdent): This currently only works for redis. Still working
# out how to express the other backends.
function _ceilometer_configure_cache_backend {
iniset $CEILOMETER_CONF cache backend $CEILOMETER_CACHE_BACKEND
iniset $CEILOMETER_CONF cache backend_argument url:$CEILOMETER_CACHE_URL
iniadd_literal $CEILOMETER_CONF cache backend_argument distributed_lock:True
if [[ "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then
iniadd_literal $CEILOMETER_CONF cache backend_argument db:0
iniadd_literal $CEILOMETER_CONF cache backend_argument redis_expiration_time:600
fi
}
# Set configuration for storage backend.
function _ceilometer_configure_storage_backend {
if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
iniset $CEILOMETER_CONF database event_connection $(database_connection_url ceilometer)
iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
# es is only supported for events. we will use sql for metering.
iniset $CEILOMETER_CONF database event_connection es://localhost:9200
iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
${TOP_DIR}/pkg/elasticsearch.sh start
elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer
iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer
elif [ "$CEILOMETER_BACKEND" = 'gnocchi' ] ; then
gnocchi_url=$(gnocchi_service_url)
iniset $CEILOMETER_CONF DEFAULT meter_dispatchers gnocchi
# FIXME(sileht): We shouldn't load event_dispatchers if store_event is False
iniset $CEILOMETER_CONF DEFAULT event_dispatchers ""
iniset $CEILOMETER_CONF notification store_events False
# NOTE(gordc): set higher retry in case gnocchi is started after ceilometer on a slow machine
iniset $CEILOMETER_CONF storage max_retries 20
# NOTE(gordc): set batching to better handle recording on a slow machine
iniset $CEILOMETER_CONF collector batch_size 50
iniset $CEILOMETER_CONF collector batch_timeout 5
iniset $CEILOMETER_CONF dispatcher_gnocchi url $gnocchi_url
iniset $CEILOMETER_CONF dispatcher_gnocchi archive_policy ${GNOCCHI_ARCHIVE_POLICY}
if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then
iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "True"
iniset $CEILOMETER_CONF dispatcher_gnocchi filter_project "gnocchi_swift"
else
iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "False"
fi
else
die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND"
fi
_drop_database
}
# Configure Ceilometer
function configure_ceilometer {
local conffile
iniset_rpc_backend ceilometer $CEILOMETER_CONF
iniset $CEILOMETER_CONF DEFAULT notification_topics "$CEILOMETER_NOTIFICATION_TOPICS"
iniset $CEILOMETER_CONF DEFAULT verbose True
iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
if [[ -n "$CEILOMETER_COORDINATION_URL" ]]; then
iniset $CEILOMETER_CONF coordination backend_url $CEILOMETER_COORDINATION_URL
iniset $CEILOMETER_CONF compute workload_partitioning True
iniset $CEILOMETER_CONF notification workload_partitioning True
iniset $CEILOMETER_CONF notification workers $API_WORKERS
fi
if [[ -n "$CEILOMETER_CACHE_BACKEND" ]]; then
_ceilometer_configure_cache_backend
fi
# Install the policy file and declarative configuration files to
# the conf dir.
# NOTE(cdent): Do not make this a glob as it will conflict
# with rootwrap installation done elsewhere and also clobber
# ceilometer.conf settings that have already been made.
# Anyway, explicit is better than implicit.
for conffile in policy.json api_paste.ini pipeline.yaml \
event_definitions.yaml event_pipeline.yaml \
gnocchi_resources.yaml; do
cp $CEILOMETER_DIR/etc/ceilometer/$conffile $CEILOMETER_CONF_DIR
done
iniset $CEILOMETER_CONF oslo_policy policy_file $CEILOMETER_CONF_DIR/policy.json
if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then
sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml
fi
if [ "$CEILOMETER_EVENT_ALARM" == "True" ]; then
if ! grep -q '^ *- notifier://?topic=alarm.all$' $CEILOMETER_CONF_DIR/event_pipeline.yaml; then
sed -i '/^ *publishers:$/,+1s|^\( *\)-.*$|\1- notifier://?topic=alarm.all\n&|' $CEILOMETER_CONF_DIR/event_pipeline.yaml
fi
fi
# The compute and central agents need these credentials in order to
# call out to other services' public APIs.
iniset $CEILOMETER_CONF service_credentials auth_type password
iniset $CEILOMETER_CONF service_credentials user_domain_id default
iniset $CEILOMETER_CONF service_credentials project_domain_id default
iniset $CEILOMETER_CONF service_credentials project_name $SERVICE_PROJECT_NAME
iniset $CEILOMETER_CONF service_credentials username ceilometer
iniset $CEILOMETER_CONF service_credentials password $SERVICE_PASSWORD
iniset $CEILOMETER_CONF service_credentials region_name $REGION_NAME
iniset $CEILOMETER_CONF service_credentials auth_url $KEYSTONE_SERVICE_URI
configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR
iniset $CEILOMETER_CONF notification store_events $CEILOMETER_EVENTS
# Configure storage
if is_service_enabled ceilometer-collector ceilometer-api; then
_ceilometer_configure_storage_backend
iniset $CEILOMETER_CONF collector workers $API_WORKERS
fi
if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere
iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP"
iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER"
iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD"
fi
if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
iniset $CEILOMETER_CONF api pecan_debug "False"
_ceilometer_config_apache_wsgi
fi
if is_service_enabled ceilometer-aipmi; then
# Configure rootwrap for the ipmi agent
configure_rootwrap ceilometer
fi
}
# init_ceilometer() - Initialize etc.
function init_ceilometer {
# Get ceilometer keystone settings in place
_ceilometer_create_accounts
# Create cache dir
sudo install -d -o $STACK_USER $CEILOMETER_AUTH_CACHE_DIR
rm -f $CEILOMETER_AUTH_CACHE_DIR/*
if is_service_enabled ceilometer-collector ceilometer-api && is_service_enabled mysql postgresql ; then
if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'es' ] ; then
recreate_database ceilometer
$CEILOMETER_BIN_DIR/ceilometer-dbsync
fi
fi
}
# Install Ceilometer.
# The storage and coordination backends are installed here because the
# virtualenv context is active at this point and python drivers need to be
# installed. The context is not active during preinstall (when it would
# otherwise makes sense to do the backend services).
function install_ceilometer {
if is_service_enabled ceilometer-acentral ceilometer-anotification ceilometer-alarm-evaluator ; then
_ceilometer_prepare_coordination
fi
if is_service_enabled ceilometer-collector ceilometer-api; then
_ceilometer_prepare_storage_backend
fi
if is_service_enabled ceilometer-acompute ; then
_ceilometer_prepare_virt_drivers
fi
install_ceilometerclient
setup_develop $CEILOMETER_DIR
sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR
if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "False" ]; then
sudo install -d -o $STACK_USER -m 755 $CEILOMETER_API_LOG_DIR
fi
}
# install_ceilometerclient() - Collect source and prepare
function install_ceilometerclient {
if use_library_from_git "python-ceilometerclient"; then
git_clone_by_name "python-ceilometerclient"
setup_dev_lib "python-ceilometerclient"
sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-ceilometerclient"]}/tools/,/etc/bash_completion.d/}ceilometer.bash_completion
else
pip_install_gr python-ceilometerclient
fi
}
# start_ceilometer() - Start running processes, including screen
function start_ceilometer {
run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF"
run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF"
run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces ipmi --config-file $CEILOMETER_CONF"
if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then
run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
elif is_service_enabled ceilometer-api; then
enable_apache_site ceilometer
restart_apache_server
tail_log ceilometer /var/log/$APACHE_NAME/ceilometer.log
tail_log ceilometer-api /var/log/$APACHE_NAME/ceilometer_access.log
fi
# run the collector after restarting apache as it needs
# operational keystone if using gnocchi
run_process ceilometer-collector "$CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF"
# Start the compute agent late to allow time for the collector to
# fully wake up and connect to the message bus. See bug #1355809
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP
fi
if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF"
fi
# Only die on API if it was actually intended to be turned on
if is_service_enabled ceilometer-api; then
echo "Waiting for ceilometer-api to start..."
if ! wait_for_service $SERVICE_TIMEOUT $(ceilometer_service_url)/v2/; then
die $LINENO "ceilometer-api did not start"
fi
fi
}
# stop_ceilometer() - Stop running processes
function stop_ceilometer {
if is_service_enabled ceilometer-api ; then
if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
disable_apache_site ceilometer
restart_apache_server
else
stop_process ceilometer-api
fi
fi
# Kill the ceilometer screen windows
for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector; do
stop_process $serv
done
}
# This is the main for plugin.sh
if is_service_enabled ceilometer; then
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
# Set up other services
echo_summary "Configuring system services for Ceilometer"
preinstall_ceilometer
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing Ceilometer"
# Use stack_install_service here to account for vitualenv
stack_install_service ceilometer
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring Ceilometer"
configure_ceilometer
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
echo_summary "Initializing Ceilometer"
# Tidy base for ceilometer
init_ceilometer
# Start the services
start_ceilometer
fi
if [[ "$1" == "unstack" ]]; then
echo_summary "Shutting Down Ceilometer"
stop_ceilometer
fi
if [[ "$1" == "clean" ]]; then
echo_summary "Cleaning Ceilometer"
cleanup_ceilometer
fi
fi
# Restore xtrace
$XTRACE
ceilometer-6.0.0/devstack/files/ 0000775 0005670 0005671 00000000000 12701406364 017717 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/devstack/files/rpms/ 0000775 0005670 0005671 00000000000 12701406364 020700 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/devstack/files/rpms/ceilometer 0000664 0005670 0005671 00000000030 12701406223 022736 0 ustar jenkins jenkins 0000000 0000000 selinux-policy-targeted
ceilometer-6.0.0/devstack/README.rst 0000664 0005670 0005671 00000000606 12701406223 020300 0 ustar jenkins jenkins 0000000 0000000 ===============================
Enabling Ceilometer in DevStack
===============================
1. Download Devstack::
git clone https://git.openstack.org/openstack-dev/devstack
cd devstack
2. Add this repo as an external repository in ``local.conf`` file::
[[local|localrc]]
enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer
3. Run ``stack.sh``.
ceilometer-6.0.0/etc/ 0000775 0005670 0005671 00000000000 12701406364 015564 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/etc/ceilometer/ 0000775 0005670 0005671 00000000000 12701406364 017714 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/etc/ceilometer/event_definitions.yaml 0000664 0005670 0005671 00000035505 12701406224 024317 0 ustar jenkins jenkins 0000000 0000000 ---
- event_type: compute.instance.*
traits: &instance_traits
tenant_id:
fields: payload.tenant_id
user_id:
fields: payload.user_id
instance_id:
fields: payload.instance_id
host:
fields: publisher_id.`split(., 1, 1)`
service:
fields: publisher_id.`split(., 0, -1)`
memory_mb:
type: int
fields: payload.memory_mb
disk_gb:
type: int
fields: payload.disk_gb
root_gb:
type: int
fields: payload.root_gb
ephemeral_gb:
type: int
fields: payload.ephemeral_gb
vcpus:
type: int
fields: payload.vcpus
instance_type_id:
type: int
fields: payload.instance_type_id
instance_type:
fields: payload.instance_type
state:
fields: payload.state
os_architecture:
fields: payload.image_meta.'org.openstack__1__architecture'
os_version:
fields: payload.image_meta.'org.openstack__1__os_version'
os_distro:
fields: payload.image_meta.'org.openstack__1__os_distro'
launched_at:
type: datetime
fields: payload.launched_at
deleted_at:
type: datetime
fields: payload.deleted_at
- event_type: compute.instance.exists
traits:
<<: *instance_traits
audit_period_beginning:
type: datetime
fields: payload.audit_period_beginning
audit_period_ending:
type: datetime
fields: payload.audit_period_ending
- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*']
traits: &cinder_traits
user_id:
fields: payload.user_id
project_id:
fields: payload.tenant_id
availability_zone:
fields: payload.availability_zone
display_name:
fields: payload.display_name
replication_status:
fields: payload.replication_status
status:
fields: payload.status
created_at:
fields: payload.created_at
- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*']
traits:
<<: *cinder_traits
resource_id:
fields: payload.volume_id
host:
fields: payload.host
size:
fields: payload.size
type:
fields: payload.volume_type
replication_status:
fields: payload.replication_status
- event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*']
traits:
<<: *cinder_traits
resource_id:
fields: payload.snapshot_id
volume_id:
fields: payload.volume_id
- event_type: ['image_volume_cache.*']
traits:
image_id:
fields: payload.image_id
host:
fields: payload.host
- event_type: ['image.update', 'image.upload', 'image.delete']
traits: &glance_crud
project_id:
fields: payload.owner
resource_id:
fields: payload.id
name:
fields: payload.name
status:
fields: payload.status
created_at:
fields: payload.created_at
user_id:
fields: payload.owner
deleted_at:
fields: payload.deleted_at
size:
fields: payload.size
- event_type: image.send
traits: &glance_send
receiver_project:
fields: payload.receiver_tenant_id
receiver_user:
fields: payload.receiver_user_id
user_id:
fields: payload.owner_id
image_id:
fields: payload.image_id
destination_ip:
fields: payload.destination_ip
bytes_sent:
fields: payload.bytes_sent
- event_type: orchestration.stack.*
traits: &orchestration_crud
project_id:
fields: payload.tenant_id
user_id:
fields: ['_context_trustor_user_id', '_context_user_id']
resource_id:
fields: payload.stack_identity
- event_type: sahara.cluster.*
traits: &sahara_crud
project_id:
fields: payload.project_id
user_id:
fields: _context_user_id
resource_id:
fields: payload.cluster_id
- event_type: sahara.cluster.health
traits: &sahara_health
<<: *sahara_crud
verification_id:
fields: payload.verification_id
health_check_status:
fields: payload.health_check_status
health_check_name:
fields: payload.health_check_name
health_check_description:
fields: payload.health_check_description
created_at:
type: datetime
fields: payload.created_at
updated_at:
type: datetime
fields: payload.updated_at
- event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*',
'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*']
traits: &identity_crud
resource_id:
fields: payload.resource_info
initiator_id:
fields: payload.initiator.id
project_id:
fields: payload.initiator.project_id
domain_id:
fields: payload.initiator.domain_id
- event_type: identity.role_assignment.*
traits: &identity_role_assignment
role:
fields: payload.role
group:
fields: payload.group
domain:
fields: payload.domain
user:
fields: payload.user
project:
fields: payload.project
- event_type: identity.authenticate
traits: &identity_authenticate
typeURI:
fields: payload.typeURI
id:
fields: payload.id
action:
fields: payload.action
eventType:
fields: payload.eventType
eventTime:
fields: payload.eventTime
outcome:
fields: payload.outcome
initiator_typeURI:
fields: payload.initiator.typeURI
initiator_id:
fields: payload.initiator.id
initiator_name:
fields: payload.initiator.name
initiator_host_agent:
fields: payload.initiator.host.agent
initiator_host_addr:
fields: payload.initiator.host.address
target_typeURI:
fields: payload.target.typeURI
target_id:
fields: payload.target.id
observer_typeURI:
fields: payload.observer.typeURI
observer_id:
fields: payload.observer.id
- event_type: objectstore.http.request
traits: &objectstore_request
typeURI:
fields: payload.typeURI
id:
fields: payload.id
action:
fields: payload.action
eventType:
fields: payload.eventType
eventTime:
fields: payload.eventTime
outcome:
fields: payload.outcome
initiator_typeURI:
fields: payload.initiator.typeURI
initiator_id:
fields: payload.initiator.id
initiator_project_id:
fields: payload.initiator.project_id
target_typeURI:
fields: payload.target.typeURI
target_id:
fields: payload.target.id
target_action:
fields: payload.target.action
target_metadata_path:
fields: payload.target.metadata.path
target_metadata_version:
fields: payload.target.metadata.version
target_metadata_container:
fields: payload.target.metadata.container
target_metadata_object:
fields: payload.target.metadata.object
observer_id:
fields: payload.observer.id
- event_type: magnetodb.table.*
traits: &kv_store
resource_id:
fields: payload.table_uuid
user_id:
fields: _context_user_id
project_id:
fields: _context_tenant
- event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*']
traits: &network_traits
user_id:
fields: _context_user_id
project_id:
fields: _context_tenant_id
- event_type: network.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.network.id', 'payload.id']
- event_type: subnet.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.subnet.id', 'payload.id']
- event_type: port.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.port.id', 'payload.id']
- event_type: router.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.router.id', 'payload.id']
- event_type: floatingip.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.floatingip.id', 'payload.id']
- event_type: pool.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.pool.id', 'payload.id']
- event_type: vip.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.vip.id', 'payload.id']
- event_type: member.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.member.id', 'payload.id']
- event_type: health_monitor.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.health_monitor.id', 'payload.id']
- event_type: healthmonitor.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.healthmonitor.id', 'payload.id']
- event_type: listener.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.listener.id', 'payload.id']
- event_type: loadbalancer.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.loadbalancer.id', 'payload.id']
- event_type: firewall.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.firewall.id', 'payload.id']
- event_type: firewall_policy.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.firewall_policy.id', 'payload.id']
- event_type: firewall_rule.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.firewall_rule.id', 'payload.id']
- event_type: vpnservice.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.vpnservice.id', 'payload.id']
- event_type: ipsecpolicy.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.ipsecpolicy.id', 'payload.id']
- event_type: ikepolicy.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.ikepolicy.id', 'payload.id']
- event_type: ipsec_site_connection.*
traits:
<<: *network_traits
resource_id:
fields: ['payload.ipsec_site_connection.id', 'payload.id']
- event_type: '*http.*'
traits: &http_audit
project_id:
fields: payload.initiator.project_id
user_id:
fields: payload.initiator.id
typeURI:
fields: payload.typeURI
eventType:
fields: payload.eventType
action:
fields: payload.action
outcome:
fields: payload.outcome
id:
fields: payload.id
eventTime:
fields: payload.eventTime
requestPath:
fields: payload.requestPath
observer_id:
fields: payload.observer.id
target_id:
fields: payload.target.id
target_typeURI:
fields: payload.target.typeURI
target_name:
fields: payload.target.name
initiator_typeURI:
fields: payload.initiator.typeURI
initiator_id:
fields: payload.initiator.id
initiator_name:
fields: payload.initiator.name
initiator_host_address:
fields: payload.initiator.host.address
- event_type: '*http.response'
traits:
<<: *http_audit
reason_code:
fields: payload.reason.reasonCode
- event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete']
traits: &dns_domain_traits
status:
fields: payload.status
retry:
fields: payload.retry
description:
fields: payload.description
expire:
fields: payload.expire
email:
fields: payload.email
ttl:
fields: payload.ttl
action:
fields: payload.action
name:
fields: payload.name
resource_id:
fields: payload.id
created_at:
fields: payload.created_at
updated_at:
fields: payload.updated_at
version:
fields: payload.version
parent_domain_id:
fields: parent_domain_id
serial:
fields: payload.serial
- event_type: dns.domain.exists
traits:
<<: *dns_domain_traits
audit_period_beginning:
type: datetime
fields: payload.audit_period_beginning
audit_period_ending:
type: datetime
fields: payload.audit_period_ending
- event_type: trove.*
traits: &trove_base_traits
state:
fields: payload.state_description
instance_type:
fields: payload.instance_type
user_id:
fields: payload.user_id
resource_id:
fields: payload.instance_id
instance_type_id:
fields: payload.instance_type_id
launched_at:
type: datetime
fields: payload.launched_at
instance_name:
fields: payload.instance_name
state:
fields: payload.state
nova_instance_id:
fields: payload.nova_instance_id
service_id:
fields: payload.service_id
created_at:
type: datetime
fields: payload.created_at
region:
fields: payload.region
- event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete']
traits: &trove_common_traits
name:
fields: payload.name
availability_zone:
fields: payload.availability_zone
instance_size:
type: int
fields: payload.instance_size
volume_size:
type: int
fields: payload.volume_size
nova_volume_id:
fields: payload.nova_volume_id
- event_type: trove.instance.create
traits:
<<: [*trove_base_traits, *trove_common_traits]
- event_type: trove.instance.modify_volume
traits:
<<: [*trove_base_traits, *trove_common_traits]
old_volume_size:
type: int
fields: payload.old_volume_size
modify_at:
type: datetime
fields: payload.modify_at
- event_type: trove.instance.modify_flavor
traits:
<<: [*trove_base_traits, *trove_common_traits]
old_instance_size:
type: int
fields: payload.old_instance_size
modify_at:
type: datetime
fields: payload.modify_at
- event_type: trove.instance.delete
traits:
<<: [*trove_base_traits, *trove_common_traits]
deleted_at:
type: datetime
fields: payload.deleted_at
- event_type: trove.instance.exists
traits:
<<: *trove_base_traits
display_name:
fields: payload.display_name
audit_period_beginning:
type: datetime
fields: payload.audit_period_beginning
audit_period_ending:
type: datetime
fields: payload.audit_period_ending
- event_type: profiler.*
traits:
project:
fields: payload.project
service:
fields: payload.service
name:
fields: payload.name
base_id:
fields: payload.base_id
trace_id:
fields: payload.trace_id
parent_id:
fields: payload.parent_id
timestamp:
fields: payload.timestamp
host:
fields: payload.info.host
path:
fields: payload.info.request.path
query:
fields: payload.info.request.query
method:
fields: payload.info.request.method
scheme:
fields: payload.info.request.scheme
db.statement:
fields: payload.info.db.statement
db.params:
fields: payload.info.db.params
ceilometer-6.0.0/etc/ceilometer/gnocchi_resources.yaml 0000664 0005670 0005671 00000013705 12701406223 024304 0 ustar jenkins jenkins 0000000 0000000 ---
resources:
- resource_type: identity
archive_policy: low
metrics:
- 'identity.authenticate.success'
- 'identity.authenticate.pending'
- 'identity.authenticate.failure'
- 'identity.user.created'
- 'identity.user.deleted'
- 'identity.user.updated'
- 'identity.group.created'
- 'identity.group.deleted'
- 'identity.group.updated'
- 'identity.role.created'
- 'identity.role.deleted'
- 'identity.role.updated'
- 'identity.project.created'
- 'identity.project.deleted'
- 'identity.project.updated'
- 'identity.trust.created'
- 'identity.trust.deleted'
- 'identity.role_assignment.created'
- 'identity.role_assignment.deleted'
- resource_type: ceph_account
metrics:
- 'radosgw.objects'
- 'radosgw.objects.size'
- 'radosgw.objects.containers'
- 'radosgw.api.request'
- 'radosgw.containers.objects'
- 'radosgw.containers.objects.size'
- resource_type: instance
metrics:
- 'instance'
- 'memory'
- 'memory.usage'
- 'memory.resident'
- 'vcpus'
- 'cpu'
- 'cpu.delta'
- 'cpu_util'
- 'disk.root.size'
- 'disk.ephemeral.size'
- 'disk.read.requests'
- 'disk.read.requests.rate'
- 'disk.write.requests'
- 'disk.write.requests.rate'
- 'disk.read.bytes'
- 'disk.read.bytes.rate'
- 'disk.write.bytes'
- 'disk.write.bytes.rate'
- 'disk.latency'
- 'disk.iops'
- 'disk.capacity'
- 'disk.allocation'
- 'disk.usage'
attributes:
host: resource_metadata.host
image_ref: resource_metadata.image_ref
display_name: resource_metadata.display_name
flavor_id: resource_metadata.(instance_flavor_id|(flavor.id))
server_group: resource_metadata.user_metadata.server_group
- resource_type: instance_network_interface
metrics:
- 'network.outgoing.packets.rate'
- 'network.incoming.packets.rate'
- 'network.outgoing.packets'
- 'network.incoming.packets'
- 'network.outgoing.bytes.rate'
- 'network.incoming.bytes.rate'
- 'network.outgoing.bytes'
- 'network.incoming.bytes'
attributes:
name: resource_metadata.vnic_name
instance_id: resource_metadata.instance_id
- resource_type: instance_disk
metrics:
- 'disk.device.read.requests'
- 'disk.device.read.requests.rate'
- 'disk.device.write.requests'
- 'disk.device.write.requests.rate'
- 'disk.device.read.bytes'
- 'disk.device.read.bytes.rate'
- 'disk.device.write.bytes'
- 'disk.device.write.bytes.rate'
- 'disk.device.latency'
- 'disk.device.iops'
- 'disk.device.capacity'
- 'disk.device.allocation'
- 'disk.device.usage'
attributes:
name: resource_metadata.disk_name
instance_id: resource_metadata.instance_id
- resource_type: image
metrics:
- 'image'
- 'image.size'
- 'image.download'
- 'image.serve'
attributes:
name: resource_metadata.name
container_format: resource_metadata.container_format
disk_format: resource_metadata.disk_format
- resource_type: ipmi
metrics:
- 'hardware.ipmi.node.power'
- 'hardware.ipmi.node.temperature'
- 'hardware.ipmi.node.inlet_temperature'
- 'hardware.ipmi.node.outlet_temperature'
- 'hardware.ipmi.node.fan'
- 'hardware.ipmi.node.current'
- 'hardware.ipmi.node.voltage'
- 'hardware.ipmi.node.airflow'
- 'hardware.ipmi.node.cups'
- 'hardware.ipmi.node.cpu_util'
- 'hardware.ipmi.node.mem_util'
- 'hardware.ipmi.node.io_util'
- resource_type: network
metrics:
- 'bandwidth'
- 'network'
- 'network.create'
- 'network.update'
- 'subnet'
- 'subnet.create'
- 'subnet.update'
- 'port'
- 'port.create'
- 'port.update'
- 'router'
- 'router.create'
- 'router.update'
- 'ip.floating'
- 'ip.floating.create'
- 'ip.floating.update'
- resource_type: stack
metrics:
- 'stack.create'
- 'stack.update'
- 'stack.delete'
- 'stack.resume'
- 'stack.suspend'
- resource_type: swift_account
metrics:
- 'storage.objects.incoming.bytes'
- 'storage.objects.outgoing.bytes'
- 'storage.api.request'
- 'storage.objects.size'
- 'storage.objects'
- 'storage.objects.containers'
- 'storage.containers.objects'
- 'storage.containers.objects.size'
- resource_type: volume
metrics:
- 'volume'
- 'volume.size'
- 'volume.create'
- 'volume.delete'
- 'volume.update'
- 'volume.resize'
- 'volume.attach'
- 'volume.detach'
attributes:
display_name: resource_metadata.display_name
- resource_type: host
metrics:
- 'hardware.cpu.load.1min'
- 'hardware.cpu.load.5min'
- 'hardware.cpu.load.15min'
- 'hardware.cpu.util'
- 'hardware.memory.total'
- 'hardware.memory.used'
- 'hardware.memory.swap.total'
- 'hardware.memory.swap.avail'
- 'hardware.memory.buffer'
- 'hardware.memory.cached'
- 'hardware.network.ip.outgoing.datagrams'
- 'hardware.network.ip.incoming.datagrams'
- 'hardware.system_stats.cpu.idle'
- 'hardware.system_stats.io.outgoing.blocks'
- 'hardware.system_stats.io.incoming.blocks'
attributes:
host_name: resource_metadata.resource_url
- resource_type: host_disk
metrics:
- 'hardware.disk.size.total'
- 'hardware.disk.size.used'
attributes:
host_name: resource_metadata.resource_url
device_name: resource_metadata.device
- resource_type: host_network_interface
metrics:
- 'hardware.network.incoming.bytes'
- 'hardware.network.outgoing.bytes'
- 'hardware.network.outgoing.errors'
attributes:
host_name: resource_metadata.resource_url
device_name: resource_metadata.name
ceilometer-6.0.0/etc/ceilometer/rootwrap.d/ 0000775 0005670 0005671 00000000000 12701406364 022013 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/etc/ceilometer/rootwrap.d/ipmi.filters 0000664 0005670 0005671 00000000360 12701406223 024334 0 ustar jenkins jenkins 0000000 0000000 # ceilometer-rootwrap command filters for IPMI capable nodes
# This file should be owned by (and only-writeable by) the root user
[Filters]
# ceilometer/ipmi/nodemanager/node_manager.py: 'ipmitool'
ipmitool: CommandFilter, ipmitool, root
ceilometer-6.0.0/etc/ceilometer/rootwrap.conf 0000664 0005670 0005671 00000001727 12701406223 022441 0 ustar jenkins jenkins 0000000 0000000 # Configuration for ceilometer-rootwrap
# This file should be owned by (and only-writeable by) the root user
[DEFAULT]
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/ceilometer/rootwrap.d,/usr/share/ceilometer/rootwrap
# List of directories to search executables in, in case filters do not
# explicitely specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin
# Enable logging to syslog
# Default value is False
use_syslog=False
# Which syslog facility to use.
# Valid values include auth, authpriv, syslog, user0, user1...
# Default value is 'syslog'
syslog_log_facility=syslog
# Which messages to log.
# INFO means log all usage
# ERROR means only log unsuccessful attempts
syslog_log_level=ERROR
ceilometer-6.0.0/etc/ceilometer/policy.json 0000664 0005670 0005671 00000000660 12701406223 022102 0 ustar jenkins jenkins 0000000 0000000 {
"context_is_admin": "role:admin",
"segregation": "rule:context_is_admin",
"telemetry:get_samples": "",
"telemetry:get_sample": "",
"telemetry:query_sample": "",
"telemetry:create_samples": "",
"telemetry:compute_statistics": "",
"telemetry:get_meters": "",
"telemetry:get_resource": "",
"telemetry:get_resources": "",
"telemetry:events:index": "",
"telemetry:events:show": ""
}
ceilometer-6.0.0/etc/ceilometer/examples/ 0000775 0005670 0005671 00000000000 12701406364 021532 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml 0000664 0005670 0005671 00000021375 12701406223 031225 0 ustar jenkins jenkins 0000000 0000000 metric:
# LBaaS V2
- name: "loadbalancer.create"
event_type:
- "loadbalancer.create.end"
type: "delta"
unit: "loadbalancer"
volume: 1
resource_id: $.payload.loadbalancer.id
project_id: $.payload.loadbalancer.tenant_id
metadata:
name: $.payload.loadbalancer.name
description: $.payload.loadbalancer.description
listeners: $.payload.loadbalancer.listeners
operating_status: $.payload.loadbalancer.operating_status
vip_address: $.payload.loadbalancer.vip_address
vip_subnet_id: $.payload.loadbalancer.vip_subnet_id
admin_state_up: $.payload.loadbalancer.admin_state_up
- name: "loadbalancer.update"
event_type:
- "loadbalancer.update.end"
type: "delta"
unit: "loadbalancer"
volume: 1
resource_id: $.payload.loadbalancer.id
project_id: $.payload.loadbalancer.tenant_id
metadata:
name: $.payload.loadbalancer.name
description: $.payload.loadbalancer.description
listeners: $.payload.loadbalancer.listeners
operating_status: $.payload.loadbalancer.operating_status
vip_address: $.payload.loadbalancer.vip_address
vip_subnet_id: $.payload.loadbalancer.vip_subnet_id
admin_state_up: $.payload.loadbalancer.admin_state_up
- name: "loadbalancer.delete"
event_type:
- "loadbalancer.delete.end"
type: "delta"
unit: "loadbalancer"
volume: 1
resource_id: $.payload.loadbalancer.id
project_id: $.payload.loadbalancer.tenant_id
metadata:
name: $.payload.loadbalancer.name
description: $.payload.loadbalancer.description
listeners: $.payload.loadbalancer.listeners
operating_status: $.payload.loadbalancer.operating_status
vip_address: $.payload.loadbalancer.vip_address
vip_subnet_id: $.payload.loadbalancer.vip_subnet_id
admin_state_up: $.payload.loadbalancer.admin_state_up
- name: "listener.create"
event_type:
- "listener.create.end"
type: "delta"
unit: "listener"
volume: 1
resource_id: $.payload.listener.id
project_id: $.payload.listener.tenant_id
metadata:
name: $.payload.listener.name
description: $.payload.listener.description
admin_state_up: $.payload.listener.admin_state_up
loadbalancers: $.payload.listener.loadbalancers
default_pool_id: $.payload.listener.default_pool_id
protocol: $.payload.listener.protocol
connection_limit: $.payload.listener.connection_limit
- name: "listener.update"
event_type:
- "listener.update.end"
type: "delta"
unit: "listener"
volume: 1
resource_id: $.payload.listener.id
project_id: $.payload.listener.tenant_id
metadata:
name: $.payload.listener.name
description: $.payload.listener.description
admin_state_up: $.payload.listener.admin_state_up
loadbalancers: $.payload.listener.loadbalancers
default_pool_id: $.payload.listener.default_pool_id
protocol: $.payload.listener.protocol
connection_limit: $.payload.listener.connection_limit
- name: "listener.delete"
event_type:
- "listener.delete.end"
type: "delta"
unit: "listener"
volume: 1
resource_id: $.payload.listener.id
project_id: $.payload.listener.tenant_id
metadata:
name: $.payload.listener.name
description: $.payload.listener.description
admin_state_up: $.payload.listener.admin_state_up
loadbalancers: $.payload.listener.loadbalancers
default_pool_id: $.payload.listener.default_pool_id
protocol: $.payload.listener.protocol
connection_limit: $.payload.listener.connection_limit
- name: "healthmonitor.create"
event_type:
- "healthmonitor.create.end"
type: "delta"
unit: "healthmonitor"
volume: 1
resource_id: $.payload.healthmonitor.id
project_id: $.payload.healthmonitor.tenant_id
metadata:
name: $.payload.healthmonitor.name
description: $.payload.healthmonitor.description
admin_state_up: $.payload.healthmonitor.admin_state_up
max_retries: $.payload.healthmonitor.max_retries
delay: $.payload.healthmonitor.delay
timeout: $.payload.healthmonitor.timeout
pools: $.payload.healthmonitor.pools
type: $.payload.healthmonitor.type
- name: "healthmonitor.update"
event_type:
- "healthmonitor.update.end"
type: "delta"
unit: "healthmonitor"
volume: 1
resource_id: $.payload.healthmonitor.id
project_id: $.payload.healthmonitor.tenant_id
metadata:
name: $.payload.healthmonitor.name
description: $.payload.healthmonitor.description
admin_state_up: $.payload.healthmonitor.admin_state_up
max_retries: $.payload.healthmonitor.max_retries
delay: $.payload.healthmonitor.delay
timeout: $.payload.healthmonitor.timeout
pools: $.payload.healthmonitor.pools
type: $.payload.healthmonitor.type
- name: "healthmonitor.delete"
event_type:
- "healthmonitor.delete.end"
type: "delta"
unit: "healthmonitor"
volume: 1
resource_id: $.payload.healthmonitor.id
project_id: $.payload.healthmonitor.tenant_id
metadata:
name: $.payload.healthmonitor.name
description: $.payload.healthmonitor.description
admin_state_up: $.payload.healthmonitor.admin_state_up
max_retries: $.payload.healthmonitor.max_retries
delay: $.payload.healthmonitor.delay
timeout: $.payload.healthmonitor.timeout
pools: $.payload.healthmonitor.pools
type: $.payload.healthmonitor.type
- name: "pool.create"
event_type:
- "pool.create.end"
type: "delta"
unit: "pool"
volume: 1
resource_id: $.payload.pool.id
project_id: $.payload.pool.tenant_id
metadata:
name: $.payload.pool.name
description: $.payload.pool.description
admin_state_up: $.payload.pool.admin_state_up
lb_method: $.payload.pool.lb_method
protocol: $.payload.pool.protocol
subnet_id: $.payload.pool.subnet_id
vip_id: $.payload.pool.vip_id
status: $.payload.pool.status
status_description: $.payload.pool.status_description
- name: "pool.update"
event_type:
- "pool.update.end"
type: "delta"
unit: "pool"
volume: 1
resource_id: $.payload.pool.id
project_id: $.payload.pool.tenant_id
metadata:
name: $.payload.pool.name
description: $.payload.pool.description
admin_state_up: $.payload.pool.admin_state_up
lb_method: $.payload.pool.lb_method
protocol: $.payload.pool.protocol
subnet_id: $.payload.pool.subnet_id
vip_id: $.payload.pool.vip_id
status: $.payload.pool.status
status_description: $.payload.pool.status_description
- name: "pool.delete"
event_type:
- "pool.delete.end"
type: "delta"
unit: "pool"
volume: 1
resource_id: $.payload.pool.id
project_id: $.payload.pool.tenant_id
metadata:
name: $.payload.pool.name
description: $.payload.pool.description
admin_state_up: $.payload.pool.admin_state_up
lb_method: $.payload.pool.lb_method
protocol: $.payload.pool.protocol
subnet_id: $.payload.pool.subnet_id
vip_id: $.payload.pool.vip_id
status: $.payload.pool.status
status_description: $.payload.pool.status_description
- name: "member.create"
event_type:
- "member.create.end"
type: "delta"
unit: "member"
volume: 1
resource_id: $.payload.member.id
project_id: $.payload.member.tenant_id
metadata:
address: $.payload.member.address
status: $.payload.member.status
status_description: $.payload.member.status_description
weight: $.payload.member.weight
admin_state_up: $.payload.member.admin_state_up
protocol_port: $.payload.member.protocol_port
pool_id: $.payload.member.pool_id
- name: "member.update"
event_type:
- "member.update.end"
type: "delta"
unit: "member"
volume: 1
resource_id: $.payload.member.id
project_id: $.payload.member.tenant_id
metadata:
address: $.payload.member.address
status: $.payload.member.status
status_description: $.payload.member.status_description
weight: $.payload.member.weight
admin_state_up: $.payload.member.admin_state_up
protocol_port: $.payload.member.protocol_port
pool_id: $.payload.member.pool_id
- name: "member.delete"
event_type:
- "member.delete.end"
type: "delta"
unit: "member"
volume: 1
resource_id: $.payload.member.id
project_id: $.payload.member.tenant_id
metadata:
address: $.payload.member.address
status: $.payload.member.status
status_description: $.payload.member.status_description
weight: $.payload.member.weight
admin_state_up: $.payload.member.admin_state_up
protocol_port: $.payload.member.protocol_port
pool_id: $.payload.member.pool_id
ceilometer-6.0.0/etc/ceilometer/examples/osprofiler_event_definitions.yaml 0000664 0005670 0005671 00000001302 12701406223 030364 0 ustar jenkins jenkins 0000000 0000000 ---
- event_type: profiler.*
traits:
project:
fields: payload.project
service:
fields: payload.service
name:
fields: payload.name
base_id:
fields: payload.base_id
trace_id:
fields: payload.trace_id
parent_id:
fields: payload.parent_id
timestamp:
fields: payload.timestamp
host:
fields: payload.info.host
path:
fields: payload.info.request.path
query:
fields: payload.info.request.query
method:
fields: payload.info.request.method
scheme:
fields: payload.info.request.scheme
db.statement:
fields: payload.info.db.statement
db.params:
fields: payload.info.db.params
ceilometer-6.0.0/etc/ceilometer/ceilometer-config-generator.conf 0000664 0005670 0005671 00000000503 12701406223 026132 0 ustar jenkins jenkins 0000000 0000000 [DEFAULT]
output_file = etc/ceilometer/ceilometer.conf
wrap_width = 79
namespace = ceilometer
namespace = oslo.concurrency
namespace = oslo.db
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.middleware.cors
namespace = oslo.policy
namespace = oslo.service.service
namespace = keystonemiddleware.auth_token
ceilometer-6.0.0/etc/ceilometer/README-ceilometer.conf.txt 0000664 0005670 0005671 00000000200 12701406223 024446 0 ustar jenkins jenkins 0000000 0000000 To generate the sample ceilometer.conf file, run the following
command from the top-level ceilometer directory:
tox -egenconfig ceilometer-6.0.0/etc/ceilometer/event_pipeline.yaml 0000664 0005670 0005671 00000000324 12701406224 023600 0 ustar jenkins jenkins 0000000 0000000 ---
sources:
- name: event_source
events:
- "*"
sinks:
- event_sink
sinks:
- name: event_sink
transformers:
triggers:
publishers:
- notifier://
ceilometer-6.0.0/etc/ceilometer/pipeline.yaml 0000664 0005670 0005671 00000005003 12701406223 022375 0 ustar jenkins jenkins 0000000 0000000 ---
sources:
- name: meter_source
interval: 600
meters:
- "*"
sinks:
- meter_sink
- name: cpu_source
interval: 600
meters:
- "cpu"
sinks:
- cpu_sink
- cpu_delta_sink
- name: disk_source
interval: 600
meters:
- "disk.read.bytes"
- "disk.read.requests"
- "disk.write.bytes"
- "disk.write.requests"
- "disk.device.read.bytes"
- "disk.device.read.requests"
- "disk.device.write.bytes"
- "disk.device.write.requests"
sinks:
- disk_sink
- name: network_source
interval: 600
meters:
- "network.incoming.bytes"
- "network.incoming.packets"
- "network.outgoing.bytes"
- "network.outgoing.packets"
sinks:
- network_sink
sinks:
- name: meter_sink
transformers:
publishers:
- notifier://
- name: cpu_sink
transformers:
- name: "rate_of_change"
parameters:
target:
name: "cpu_util"
unit: "%"
type: "gauge"
scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
publishers:
- notifier://
- name: cpu_delta_sink
transformers:
- name: "delta"
parameters:
target:
name: "cpu.delta"
growth_only: True
publishers:
- notifier://
- name: disk_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)"
unit: "(B|request)"
target:
map_to:
name: "\\1.\\2.\\3.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- notifier://
- name: network_sink
transformers:
- name: "rate_of_change"
parameters:
source:
map_from:
name: "network\\.(incoming|outgoing)\\.(bytes|packets)"
unit: "(B|packet)"
target:
map_to:
name: "network.\\1.\\2.rate"
unit: "\\1/s"
type: "gauge"
publishers:
- notifier://
ceilometer-6.0.0/etc/ceilometer/api_paste.ini 0000664 0005670 0005671 00000001304 12701406223 022352 0 ustar jenkins jenkins 0000000 0000000 # Ceilometer API WSGI Pipeline
# Define the filters that make up the pipeline for processing WSGI requests
# Note: This pipeline is PasteDeploy's term rather than Ceilometer's pipeline
# used for processing samples
# Remove authtoken from the pipeline if you don't want to use keystone authentication
[pipeline:main]
pipeline = cors request_id authtoken api-server
[app:api-server]
paste.app_factory = ceilometer.api.app:app_factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
[filter:request_id]
paste.filter_factory = oslo_middleware:RequestId.factory
[filter:cors]
paste.filter_factory = oslo_middleware.cors:filter_factory
oslo_config_project = ceilometer
ceilometer-6.0.0/etc/apache2/ 0000775 0005670 0005671 00000000000 12701406364 017067 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/etc/apache2/ceilometer 0000664 0005670 0005671 00000002642 12701406223 021140 0 ustar jenkins jenkins 0000000 0000000 # Copyright (c) 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is an example Apache2 configuration file for using the
# ceilometer API through mod_wsgi.
# Note: If you are using a Debian-based system then the paths
# "/var/log/httpd" and "/var/run/httpd" will use "apache2" instead
# of "httpd".
#
# The number of processes and threads is an example only and should
# be adjusted according to local requirements.
Listen 8777
WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP}
WSGIProcessGroup ceilometer-api
WSGIScriptAlias / /var/www/ceilometer/app
WSGIApplicationGroup %{GLOBAL}
= 2.4>
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/ceilometer_error.log
CustomLog /var/log/httpd/ceilometer_access.log combined
WSGISocketPrefix /var/run/httpd
ceilometer-6.0.0/babel.cfg 0000664 0005670 0005671 00000000021 12701406223 016522 0 ustar jenkins jenkins 0000000 0000000 [python: **.py]
ceilometer-6.0.0/.coveragerc 0000664 0005670 0005671 00000000142 12701406223 017121 0 ustar jenkins jenkins 0000000 0000000 [run]
branch = True
source = ceilometer
omit = ceilometer/tests/*
[report]
ignore_errors = True
ceilometer-6.0.0/MAINTAINERS 0000664 0005670 0005671 00000000714 12701406223 016502 0 ustar jenkins jenkins 0000000 0000000 = Generalist Code Reviewers =
The current members of ceilometer-core are listed here:
https://launchpad.net/~ceilometer-drivers/+members#active
This group can +2 and approve patches in Ceilometer. However, they may
choose to seek feedback from the appropriate specialist maintainer before
approving a patch if it is in any way controversial or risky.
= IRC handles of maintainers =
cdent
gordc
ildikov
jd__
liusheng
llu
_nadya_
pradk
rohit_
sileht
zqfan
ceilometer-6.0.0/setup.cfg 0000664 0005670 0005671 00000040435 12701406364 016640 0 ustar jenkins jenkins 0000000 0000000 [metadata]
name = ceilometer
summary = OpenStack Telemetry
description-file =
README.rst
author = OpenStack
author-email = openstack-dev@lists.openstack.org
home-page = http://docs.openstack.org/developer/ceilometer/
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Topic :: System :: Monitoring
[global]
setup-hooks =
pbr.hooks.setup_hook
[files]
packages =
ceilometer
[entry_points]
ceilometer.notification =
instance = ceilometer.compute.notifications.instance:Instance
instance_scheduled = ceilometer.compute.notifications.instance:InstanceScheduled
network = ceilometer.network.notifications:Network
subnet = ceilometer.network.notifications:Subnet
port = ceilometer.network.notifications:Port
router = ceilometer.network.notifications:Router
floatingip = ceilometer.network.notifications:FloatingIP
http.request = ceilometer.middleware:HTTPRequest
http.response = ceilometer.middleware:HTTPResponse
hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification
hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification
hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification
hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification
network.services.lb.pool = ceilometer.network.notifications:Pool
network.services.lb.vip = ceilometer.network.notifications:Vip
network.services.lb.member = ceilometer.network.notifications:Member
network.services.lb.health_monitor = ceilometer.network.notifications:HealthMonitor
network.services.firewall = ceilometer.network.notifications:Firewall
network.services.firewall.policy = ceilometer.network.notifications:FirewallPolicy
network.services.firewall.rule = ceilometer.network.notifications:FirewallRule
network.services.vpn = ceilometer.network.notifications:VPNService
network.services.vpn.ipsecpolicy = ceilometer.network.notifications:IPSecPolicy
network.services.vpn.ikepolicy = ceilometer.network.notifications:IKEPolicy
network.services.vpn.connections = ceilometer.network.notifications:IPSecSiteConnection
_sample = ceilometer.telemetry.notifications:TelemetryIpc
meter = ceilometer.meter.notifications:ProcessMeterNotifications
ceilometer.discover =
local_instances = ceilometer.compute.discovery:InstanceDiscovery
endpoint = ceilometer.agent.discovery.endpoint:EndpointDiscovery
tenant = ceilometer.agent.discovery.tenant:TenantDiscovery
local_node = ceilometer.agent.discovery.localnode:LocalNodeDiscovery
lb_pools = ceilometer.network.services.discovery:LBPoolsDiscovery
lb_vips = ceilometer.network.services.discovery:LBVipsDiscovery
lb_members = ceilometer.network.services.discovery:LBMembersDiscovery
lb_listeners = ceilometer.network.services.discovery:LBListenersDiscovery
lb_loadbalancers = ceilometer.network.services.discovery:LBLoadBalancersDiscovery
lb_health_probes = ceilometer.network.services.discovery:LBHealthMonitorsDiscovery
vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery
ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery
fw_services = ceilometer.network.services.discovery:FirewallDiscovery
fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery
tripleo_overcloud_nodes = ceilometer.hardware.discovery:NodesDiscoveryTripleO
ceilometer.poll.compute =
disk.read.requests = ceilometer.compute.pollsters.disk:ReadRequestsPollster
disk.write.requests = ceilometer.compute.pollsters.disk:WriteRequestsPollster
disk.read.bytes = ceilometer.compute.pollsters.disk:ReadBytesPollster
disk.write.bytes = ceilometer.compute.pollsters.disk:WriteBytesPollster
disk.read.requests.rate = ceilometer.compute.pollsters.disk:ReadRequestsRatePollster
disk.write.requests.rate = ceilometer.compute.pollsters.disk:WriteRequestsRatePollster
disk.read.bytes.rate = ceilometer.compute.pollsters.disk:ReadBytesRatePollster
disk.write.bytes.rate = ceilometer.compute.pollsters.disk:WriteBytesRatePollster
disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster
disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster
disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster
disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster
disk.device.read.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsRatePollster
disk.device.write.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsRatePollster
disk.device.read.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceReadBytesRatePollster
disk.device.write.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesRatePollster
disk.latency = ceilometer.compute.pollsters.disk:DiskLatencyPollster
disk.device.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskLatencyPollster
disk.iops = ceilometer.compute.pollsters.disk:DiskIOPSPollster
disk.device.iops = ceilometer.compute.pollsters.disk:PerDeviceDiskIOPSPollster
cpu = ceilometer.compute.pollsters.cpu:CPUPollster
cpu_util = ceilometer.compute.pollsters.cpu:CPUUtilPollster
network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster
network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster
network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster
network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster
network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster
network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster
instance = ceilometer.compute.pollsters.instance:InstancePollster
memory.usage = ceilometer.compute.pollsters.memory:MemoryUsagePollster
memory.resident = ceilometer.compute.pollsters.memory:MemoryResidentPollster
disk.capacity = ceilometer.compute.pollsters.disk:CapacityPollster
disk.allocation = ceilometer.compute.pollsters.disk:AllocationPollster
disk.usage = ceilometer.compute.pollsters.disk:PhysicalPollster
disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster
disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster
disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster
ceilometer.poll.ipmi =
hardware.ipmi.node.power = ceilometer.ipmi.pollsters.node:PowerPollster
hardware.ipmi.node.temperature = ceilometer.ipmi.pollsters.node:InletTemperaturePollster
hardware.ipmi.node.outlet_temperature = ceilometer.ipmi.pollsters.node:OutletTemperaturePollster
hardware.ipmi.node.airflow = ceilometer.ipmi.pollsters.node:AirflowPollster
hardware.ipmi.node.cups = ceilometer.ipmi.pollsters.node:CUPSIndexPollster
hardware.ipmi.node.cpu_util = ceilometer.ipmi.pollsters.node:CPUUtilPollster
hardware.ipmi.node.mem_util = ceilometer.ipmi.pollsters.node:MemUtilPollster
hardware.ipmi.node.io_util = ceilometer.ipmi.pollsters.node:IOUtilPollster
hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster
hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster
hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster
hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster
ceilometer.poll.central =
ip.floating = ceilometer.network.floatingip:FloatingIPPollster
image = ceilometer.image.glance:ImagePollster
image.size = ceilometer.image.glance:ImageSizePollster
rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster
rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster
rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster
rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster
rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster
rgw.usage = ceilometer.objectstore.rgw:UsagePollster
storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster
storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster
storage.objects = ceilometer.objectstore.swift:ObjectsPollster
storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster
storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster
energy = ceilometer.energy.kwapi:EnergyPollster
power = ceilometer.energy.kwapi:PowerPollster
switch.port = ceilometer.network.statistics.port:PortPollster
switch.port.receive.packets = ceilometer.network.statistics.port:PortPollsterReceivePackets
switch.port.transmit.packets = ceilometer.network.statistics.port:PortPollsterTransmitPackets
switch.port.receive.bytes = ceilometer.network.statistics.port:PortPollsterReceiveBytes
switch.port.transmit.bytes = ceilometer.network.statistics.port:PortPollsterTransmitBytes
switch.port.receive.drops = ceilometer.network.statistics.port:PortPollsterReceiveDrops
switch.port.transmit.drops = ceilometer.network.statistics.port:PortPollsterTransmitDrops
switch.port.receive.errors = ceilometer.network.statistics.port:PortPollsterReceiveErrors
switch.port.transmit.errors = ceilometer.network.statistics.port:PortPollsterTransmitErrors
switch.port.receive.frame_error = ceilometer.network.statistics.port:PortPollsterReceiveFrameErrors
switch.port.receive.overrun_error = ceilometer.network.statistics.port:PortPollsterReceiveOverrunErrors
switch.port.receive.crc_error = ceilometer.network.statistics.port:PortPollsterReceiveCRCErrors
switch.port.collision.count = ceilometer.network.statistics.port:PortPollsterCollisionCount
switch.table = ceilometer.network.statistics.table:TablePollster
switch.table.active.entries = ceilometer.network.statistics.table:TablePollsterActiveEntries
switch.table.lookup.packets = ceilometer.network.statistics.table:TablePollsterLookupPackets
switch.table.matched.packets = ceilometer.network.statistics.table:TablePollsterMatchedPackets
switch = ceilometer.network.statistics.switch:SWPollster
switch.flow = ceilometer.network.statistics.flow:FlowPollster
switch.flow.bytes = ceilometer.network.statistics.flow:FlowPollsterBytes
switch.flow.duration.nanoseconds = ceilometer.network.statistics.flow:FlowPollsterDurationNanoseconds
switch.flow.duration.seconds = ceilometer.network.statistics.flow:FlowPollsterDurationSeconds
switch.flow.packets = ceilometer.network.statistics.flow:FlowPollsterPackets
network.services.lb.pool = ceilometer.network.services.lbaas:LBPoolPollster
network.services.lb.vip = ceilometer.network.services.lbaas:LBVipPollster
network.services.lb.member = ceilometer.network.services.lbaas:LBMemberPollster
network.services.lb.listener = ceilometer.network.services.lbaas:LBListenerPollster
network.services.lb.loadbalancer = ceilometer.network.services.lbaas:LBLoadBalancerPollster
network.services.lb.health_monitor = ceilometer.network.services.lbaas:LBHealthMonitorPollster
network.services.lb.total.connections = ceilometer.network.services.lbaas:LBTotalConnectionsPollster
network.services.lb.active.connections = ceilometer.network.services.lbaas:LBActiveConnectionsPollster
network.services.lb.incoming.bytes = ceilometer.network.services.lbaas:LBBytesInPollster
network.services.lb.outgoing.bytes = ceilometer.network.services.lbaas:LBBytesOutPollster
network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster
network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster
network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster
network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster
ceilometer.builder.poll.central =
hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster
ceilometer.event.storage =
es = ceilometer.event.storage.impl_elasticsearch:Connection
log = ceilometer.event.storage.impl_log:Connection
mongodb = ceilometer.event.storage.impl_mongodb:Connection
mysql = ceilometer.event.storage.impl_sqlalchemy:Connection
postgresql = ceilometer.event.storage.impl_sqlalchemy:Connection
sqlite = ceilometer.event.storage.impl_sqlalchemy:Connection
hbase = ceilometer.event.storage.impl_hbase:Connection
db2 = ceilometer.event.storage.impl_db2:Connection
ceilometer.metering.storage =
log = ceilometer.storage.impl_log:Connection
mongodb = ceilometer.storage.impl_mongodb:Connection
mysql = ceilometer.storage.impl_sqlalchemy:Connection
postgresql = ceilometer.storage.impl_sqlalchemy:Connection
sqlite = ceilometer.storage.impl_sqlalchemy:Connection
hbase = ceilometer.storage.impl_hbase:Connection
db2 = ceilometer.storage.impl_db2:Connection
ceilometer.compute.virt =
libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector
hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector
vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector
xenapi = ceilometer.compute.virt.xenapi.inspector:XenapiInspector
ceilometer.hardware.inspectors =
snmp = ceilometer.hardware.inspector.snmp:SNMPInspector
ceilometer.transformer =
accumulator = ceilometer.transformer.accumulator:TransformerAccumulator
delta = ceilometer.transformer.conversions:DeltaTransformer
unit_conversion = ceilometer.transformer.conversions:ScalingTransformer
rate_of_change = ceilometer.transformer.conversions:RateOfChangeTransformer
aggregator = ceilometer.transformer.conversions:AggregatorTransformer
arithmetic = ceilometer.transformer.arithmetic:ArithmeticTransformer
ceilometer.publisher =
test = ceilometer.publisher.test:TestPublisher
notifier = ceilometer.publisher.messaging:SampleNotifierPublisher
udp = ceilometer.publisher.udp:UDPPublisher
file = ceilometer.publisher.file:FilePublisher
direct = ceilometer.publisher.direct:DirectPublisher
kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher
ceilometer.event.publisher =
test = ceilometer.publisher.test:TestPublisher
direct = ceilometer.publisher.direct:DirectPublisher
notifier = ceilometer.publisher.messaging:EventNotifierPublisher
kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher
ceilometer.event.trait_plugin =
split = ceilometer.event.trait_plugins:SplitterTraitPlugin
bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin
timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin
console_scripts =
ceilometer-api = ceilometer.cmd.api:main
ceilometer-polling = ceilometer.cmd.polling:main
ceilometer-agent-notification = ceilometer.cmd.agent_notification:main
ceilometer-send-sample = ceilometer.cmd.sample:send_sample
ceilometer-dbsync = ceilometer.cmd.storage:dbsync
ceilometer-expirer = ceilometer.cmd.storage:expirer
ceilometer-rootwrap = oslo_rootwrap.cmd:main
ceilometer-collector = ceilometer.cmd.collector:main
ceilometer.dispatcher.meter =
database = ceilometer.dispatcher.database:DatabaseDispatcher
file = ceilometer.dispatcher.file:FileDispatcher
http = ceilometer.dispatcher.http:HttpDispatcher
gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher
ceilometer.dispatcher.event =
database = ceilometer.dispatcher.database:DatabaseDispatcher
file = ceilometer.dispatcher.file:FileDispatcher
http = ceilometer.dispatcher.http:HttpDispatcher
network.statistics.drivers =
opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver
opencontrail = ceilometer.network.statistics.opencontrail.driver:OpencontrailDriver
oslo.config.opts =
ceilometer = ceilometer.opts:list_opts
oslo.config.opts.defaults =
ceilometer = ceilometer.conf.defaults:set_cors_middleware_defaults
keystoneauth1.plugin =
password-ceilometer-legacy = ceilometer.keystone_client:LegacyCeilometerKeystoneLoader
tempest.test_plugins =
ceilometer_tests = ceilometer.tests.tempest.plugin:CeilometerTempestPlugin
[build_sphinx]
all_files = 1
build-dir = doc/build
source-dir = doc/source
[pbr]
warnerrors = true
autodoc_index_modules = true
[extract_messages]
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = ceilometer/locale/ceilometer.pot
[compile_catalog]
directory = ceilometer/locale
domain = ceilometer
[update_catalog]
domain = ceilometer
output_dir = ceilometer/locale
input_file = ceilometer/locale/ceilometer.pot
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0
ceilometer-6.0.0/tools/ 0000775 0005670 0005671 00000000000 12701406364 016151 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/tools/pretty_tox.sh 0000775 0005670 0005671 00000000652 12701406223 020726 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env bash
set -o pipefail
TESTRARGS=$1
# --until-failure is not compatible with --subunit see:
#
# https://bugs.launchpad.net/testrepository/+bug/1411804
#
# this work around exists until that is addressed
if [[ "$TESTARGS" =~ "until-failure" ]]; then
python setup.py testr --slowest --testr-args="$TESTRARGS"
else
python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f
fi
ceilometer-6.0.0/tools/lintstack.py 0000775 0005670 0005671 00000014334 12701406223 020521 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env python
# Copyright (c) 2012, AT&T Labs, Yun Mao
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""pylint error checking."""
from __future__ import print_function
import json
import re
import sys
from pylint import lint
from six.moves import cStringIO as StringIO # noqa
# These variables will be useful if we will need to skip some pylint checks
ignore_codes = []
ignore_messages = []
ignore_modules = []
KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions"
class LintOutput(object):
_cached_filename = None
_cached_content = None
def __init__(self, filename, lineno, line_content, code, message,
lintoutput):
self.filename = filename
self.lineno = lineno
self.line_content = line_content
self.code = code
self.message = message
self.lintoutput = lintoutput
@classmethod
def from_line(cls, line):
m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line)
matched = m.groups()
filename, lineno, code, message = (matched[0], int(matched[1]),
matched[2], matched[-1])
if cls._cached_filename != filename:
with open(filename) as f:
cls._cached_content = list(f.readlines())
cls._cached_filename = filename
line_content = cls._cached_content[lineno - 1].rstrip()
return cls(filename, lineno, line_content, code, message,
line.rstrip())
@classmethod
def from_msg_to_dict(cls, msg):
"""From the output of pylint msg, to a dict.
Each key is a unique error identifier, value is a list of LintOutput
"""
result = {}
for line in msg.splitlines():
obj = cls.from_line(line)
if obj.is_ignored():
continue
key = obj.key()
if key not in result:
result[key] = []
result[key].append(obj)
return result
def is_ignored(self):
if self.code in ignore_codes:
return True
if any(self.filename.startswith(name) for name in ignore_modules):
return True
if any(msg in self.message for msg in ignore_messages):
return True
return False
def key(self):
if self.code in ["E1101", "E1103"]:
# These two types of errors are like Foo class has no member bar.
# We discard the source code so that the error will be ignored
# next time another Foo.bar is encountered.
return self.message, ""
return self.message, self.line_content.strip()
def json(self):
return json.dumps(self.__dict__)
def review_str(self):
return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n"
"%(code)s: %(message)s" % {
"filename": self.filename,
"lineno": self.lineno,
"line_content": self.line_content,
"code": self.code,
"message": self.message,
})
class ErrorKeys(object):
@classmethod
def print_json(cls, errors, output=sys.stdout):
print("# automatically generated by tools/lintstack.py", file=output)
for i in sorted(errors.keys()):
print(json.dumps(i), file=output)
@classmethod
def from_file(cls, filename):
keys = set()
for line in open(filename):
if line and line[0] != "#":
d = json.loads(line)
keys.add(tuple(d))
return keys
def run_pylint():
buff = StringIO()
args = ["--msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}",
"-E",
"ceilometer"]
lint.Run(args, exit=False)
val = buff.getvalue()
buff.close()
return val
def generate_error_keys(msg=None):
print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE)
if msg is None:
msg = run_pylint()
errors = LintOutput.from_msg_to_dict(msg)
with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f:
ErrorKeys.print_json(errors, output=f)
def validate(newmsg=None):
print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE)
known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE)
if newmsg is None:
print("Running pylint. Be patient...")
newmsg = run_pylint()
errors = LintOutput.from_msg_to_dict(newmsg)
print("Unique errors reported by pylint: was %d, now %d."
% (len(known), len(errors)))
passed = True
for err_key, err_list in errors.items():
for err in err_list:
if err_key not in known:
print(err.lintoutput)
print()
passed = False
if passed:
print("Congrats! pylint check passed.")
redundant = known - set(errors.keys())
if redundant:
print("Extra credit: some known pylint exceptions disappeared.")
for i in sorted(redundant):
print(json.dumps(i))
print("Consider regenerating the exception file if you will.")
else:
print("Please fix the errors above. If you believe they are false"
" positives, run 'tools/lintstack.py generate' to overwrite.")
sys.exit(1)
def usage():
print("""Usage: tools/lintstack.py [generate|validate]
To generate pylint_exceptions file: tools/lintstack.py generate
To validate the current commit: tools/lintstack.py
""")
def main():
option = "validate"
if len(sys.argv) > 1:
option = sys.argv[1]
if option == "generate":
generate_error_keys()
elif option == "validate":
validate()
else:
usage()
if __name__ == "__main__":
main()
ceilometer-6.0.0/tools/__init__.py 0000664 0005670 0005671 00000000000 12701406223 020242 0 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/tools/make_test_data.py 0000775 0005670 0005671 00000016471 12701406223 021476 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env python
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command line tool for creating test data for Ceilometer.
Usage:
Generate testing data for e.g. for default time span
source .tox/py27/bin/activate
./tools/make_test_data.py --user 1 --project 1 --resource 1 --counter cpu_util
--volume 20
"""
import argparse
import datetime
import logging
import random
import sys
import uuid
from oslo_config import cfg
from oslo_utils import timeutils
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer import storage
def make_test_data(name, meter_type, unit, volume, random_min,
random_max, user_id, project_id, resource_id, start,
end, interval, resource_metadata=None, source='artificial'):
resource_metadata = resource_metadata or {'display_name': 'toto',
'host': 'tata',
'image_ref': 'test',
'instance_flavor_id': 'toto',
'server_group': 'toto',
}
# Compute start and end timestamps for the new data.
if isinstance(start, datetime.datetime):
timestamp = start
else:
timestamp = timeutils.parse_strtime(start)
if not isinstance(end, datetime.datetime):
end = timeutils.parse_strtime(end)
increment = datetime.timedelta(minutes=interval)
print('Adding new samples for meter %s.' % (name))
# Generate samples
n = 0
total_volume = volume
while timestamp <= end:
if (random_min >= 0 and random_max >= 0):
# If there is a random element defined, we will add it to
# user given volume.
if isinstance(random_min, int) and isinstance(random_max, int):
total_volume += random.randint(random_min, random_max)
else:
total_volume += random.uniform(random_min, random_max)
c = sample.Sample(name=name,
type=meter_type,
unit=unit,
volume=total_volume,
user_id=user_id,
project_id=project_id,
resource_id=resource_id,
timestamp=timestamp.isoformat(),
resource_metadata=resource_metadata,
source=source,
)
data = utils.meter_message_from_counter(
c, cfg.CONF.publisher.telemetry_secret)
# timestamp should be string when calculating signature, but should be
# datetime object when calling record_metering_data.
data['timestamp'] = timestamp
yield data
n += 1
timestamp = timestamp + increment
if (meter_type == 'gauge' or meter_type == 'delta'):
# For delta and gauge, we don't want to increase the value
# in time by random element. So we always set it back to
# volume.
total_volume = volume
print('Added %d new samples for meter %s.' % (n, name))
def record_test_data(conn, *args, **kwargs):
for data in make_test_data(*args, **kwargs):
conn.record_metering_data(data)
def get_parser():
parser = argparse.ArgumentParser(
description='generate metering data',
)
parser.add_argument(
'--interval',
default=10,
type=int,
help='The period between samples, in minutes.',
)
parser.add_argument(
'--start',
default=31,
help='Number of days to be stepped back from now or date in the past ('
'"YYYY-MM-DDTHH:MM:SS" format) to define timestamps start range.',
)
parser.add_argument(
'--end',
default=2,
help='Number of days to be stepped forward from now or date in the '
'future ("YYYY-MM-DDTHH:MM:SS" format) to define timestamps end '
'range.',
)
parser.add_argument(
'--type',
choices=('gauge', 'cumulative'),
default='gauge',
dest='meter_type',
help='Counter type.',
)
parser.add_argument(
'--unit',
default=None,
help='Counter unit.',
)
parser.add_argument(
'--project',
dest='project_id',
help='Project id of owner.',
)
parser.add_argument(
'--user',
dest='user_id',
help='User id of owner.',
)
parser.add_argument(
'--random_min',
help='The random min border of amount for added to given volume.',
type=int,
default=0,
)
parser.add_argument(
'--random_max',
help='The random max border of amount for added to given volume.',
type=int,
default=0,
)
parser.add_argument(
'--resource',
dest='resource_id',
default=str(uuid.uuid4()),
help='The resource id for the meter data.',
)
parser.add_argument(
'--counter',
default='instance',
dest='name',
help='The counter name for the meter data.',
)
parser.add_argument(
'--volume',
help='The amount to attach to the meter.',
type=int,
default=1,
)
return parser
def main():
cfg.CONF([], project='ceilometer')
args = get_parser().parse_args()
# Set up logging to use the console
console = logging.StreamHandler(sys.stderr)
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
root_logger = logging.getLogger('')
root_logger.addHandler(console)
root_logger.setLevel(logging.DEBUG)
# Connect to the metering database
conn = storage.get_connection_from_config(cfg.CONF)
# Find the user and/or project for a real resource
if not (args.user_id or args.project_id):
for r in conn.get_resources():
if r.resource_id == args.resource_id:
args.user_id = r.user_id
args.project_id = r.project_id
break
# Compute the correct time span
format = '%Y-%m-%dT%H:%M:%S'
try:
start = datetime.datetime.utcnow() - datetime.timedelta(
days=int(args.start))
except ValueError:
try:
start = datetime.datetime.strptime(args.start, format)
except ValueError:
raise
try:
end = datetime.datetime.utcnow() + datetime.timedelta(
days=int(args.end))
except ValueError:
try:
end = datetime.datetime.strptime(args.end, format)
except ValueError:
raise
args.start = start
args.end = end
record_test_data(conn=conn, **args.__dict__)
return 0
if __name__ == '__main__':
main()
ceilometer-6.0.0/tools/make_test_data.sh 0000775 0005670 0005671 00000002737 12701406223 021460 0 ustar jenkins jenkins 0000000 0000000 #!/bin/bash
bindir=$(dirname $0)
project_name="$1"
if [ -z "$project_name" ]
then
project_name=demo
fi
if [ -z "$OS_USERNAME" ]
then
user=demo
else
user=$OS_USERNAME
fi
# Convert a possible project name to an id, if we have
# keystone installed.
if which keystone >/dev/null
then
project=$(keystone tenant-list | grep " $project_name " | cut -f2 -d'|' | cut -f2 -d' ')
else
# Assume they gave us the project id as argument.
project="$project_name"
fi
if [ -z "$project" ]
then
echo "Could not determine project id for \"$project_name\"" 1>&2
exit 1
fi
early1="2012-08-27T07:00:00"
early2="2012-08-27T17:00:00"
start="2012-08-28T00:00:00"
middle1="2012-08-28T08:00:00"
middle2="2012-08-28T18:00:00"
middle3="2012-08-29T09:00:00"
middle4="2012-08-29T19:00:00"
end="2012-08-31T23:59:00"
late1="2012-08-31T10:00:00"
late2="2012-08-31T20:00:00"
mkdata() {
${bindir}/make_test_data.py --project "$project" \
--user "$user" --start "$2" --end "$3" \
--resource "$1" --counter instance --volume 1
}
dates=(early1 early2 start middle1 middle2 middle3 middle4 end late1 late2)
echo $project
for i in $(seq 0 $((${#dates[@]} - 2)) )
do
iname=${dates[$i]}
eval "ivalue=\$$iname"
for j in $(seq $((i + 1)) $((${#dates[@]} - 1)) )
do
jname=${dates[$j]}
eval "jvalue=\$$jname"
resource_id="${project_name}-$iname-$jname"
echo "$resource_id"
mkdata "$resource_id" "$ivalue" "$jvalue"
[ $? -eq 0 ] || exit $?
done
echo
done
ceilometer-6.0.0/tools/ceilometer-test-event.py 0000775 0005670 0005671 00000004630 12701406223 022747 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env python
#
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command line tool help you debug your event definitions.
Feed it a list of test notifications in json format, and it will show
you what events will be generated.
"""
import json
import sys
from oslo_config import cfg
from stevedore import extension
from ceilometer.event import converter
from ceilometer import service
cfg.CONF.register_cli_opts([
cfg.StrOpt('input-file',
short='i',
help='File to read test notifications from.'
' (Containing a json list of notifications.)'
' defaults to stdin.'),
cfg.StrOpt('output-file',
short='o',
help='File to write results to. Defaults to stdout.'),
])
TYPES = {1: 'text',
2: 'int',
3: 'float',
4: 'datetime'}
service.prepare_service()
output_file = cfg.CONF.output_file
input_file = cfg.CONF.input_file
if output_file is None:
out = sys.stdout
else:
out = open(output_file, 'w')
if input_file is None:
notifications = json.load(sys.stdin)
else:
with open(input_file, 'r') as f:
notifications = json.load(f)
out.write("Definitions file: %s\n" % cfg.CONF.event.definitions_cfg_file)
out.write("Notifications tested: %s\n" % len(notifications))
event_converter = converter.setup_events(
extension.ExtensionManager(
namespace='ceilometer.event.trait_plugin'))
for notification in notifications:
event = event_converter.to_event(notification)
if event is None:
out.write("Dropped notification: %s\n" %
notification['message_id'])
continue
out.write("Event: %s at %s\n" % (event.event_type, event.generated))
for trait in event.traits:
dtype = TYPES[trait.dtype]
out.write(" Trait: name: %s, type: %s, value: %s\n" % (
trait.name, dtype, trait.value))
ceilometer-6.0.0/tools/make_test_event_data.py 0000775 0005670 0005671 00000007332 12701406223 022673 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command line tool for creating event test data for Ceilometer.
Usage:
Generate testing data for e.g. for default time span
source .tox/py27/bin/activate
./tools/make_test_event_data.py --event_types 3
"""
import argparse
import datetime
import logging
import random
import sys
import uuid
from oslo_config import cfg
from oslo_utils import timeutils
from ceilometer.event.storage import models
from ceilometer import storage
def make_test_data(conn, start, end, interval, event_types):
# Compute start and end timestamps for the new data.
if isinstance(start, datetime.datetime):
timestamp = start
else:
timestamp = timeutils.parse_strtime(start)
if not isinstance(end, datetime.datetime):
end = timeutils.parse_strtime(end)
increment = datetime.timedelta(minutes=interval)
print('Adding new events')
n = 0
while timestamp <= end:
data = []
for i in range(event_types):
traits = [models.Trait('id1_%d' % i, 1, str(uuid.uuid4())),
models.Trait('id2_%d' % i, 2, random.randint(1, 10)),
models.Trait('id3_%d' % i, 3, random.random()),
models.Trait('id4_%d' % i, 4, timestamp)]
data.append(models.Event(str(uuid.uuid4()),
'event_type%d' % i,
timestamp,
traits,
{}))
n += 1
conn.record_events(data)
timestamp = timestamp + increment
print('Added %d new events' % n)
def main():
cfg.CONF([], project='ceilometer')
parser = argparse.ArgumentParser(
description='generate event data',
)
parser.add_argument(
'--interval',
default=10,
type=int,
help='The period between events, in minutes.',
)
parser.add_argument(
'--start',
default=31,
type=int,
help='The number of days in the past to start timestamps.',
)
parser.add_argument(
'--end',
default=2,
type=int,
help='The number of days into the future to continue timestamps.',
)
parser.add_argument(
'--event_types',
default=3,
type=int,
help='The number of unique event_types.',
)
args = parser.parse_args()
# Set up logging to use the console
console = logging.StreamHandler(sys.stderr)
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
root_logger = logging.getLogger('')
root_logger.addHandler(console)
root_logger.setLevel(logging.DEBUG)
# Connect to the event database
conn = storage.get_connection_from_config(cfg.CONF, 'event')
# Compute the correct time span
start = datetime.datetime.utcnow() - datetime.timedelta(days=args.start)
end = datetime.datetime.utcnow() + datetime.timedelta(days=args.end)
make_test_data(conn=conn,
start=start,
end=end,
interval=args.interval,
event_types=args.event_types)
if __name__ == '__main__':
main()
ceilometer-6.0.0/tools/show_data.py 0000775 0005670 0005671 00000007101 12701406223 020470 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env python
#
# Copyright 2012 New Dream Network (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
import six
from ceilometer import storage
def show_users(db, args):
for u in sorted(db.get_users()):
print(u)
def show_resources(db, args):
if args:
users = args
else:
users = sorted(db.get_users())
for u in users:
print(u)
for resource in db.get_resources(user=u):
print(' %(resource_id)s %(timestamp)s' % resource)
for k, v in sorted(six.iteritems(resource['metadata'])):
print(' %-10s : %s' % (k, v))
for meter in resource['meter']:
totals = db.get_statistics(storage.SampleFilter(
user=u,
meter=meter['counter_name'],
resource=resource['resource_id'],
))
# FIXME(dhellmann): Need a way to tell whether to use
# max() or sum() by meter name without hard-coding.
if meter['counter_name'] in ['cpu', 'disk']:
value = totals[0]['max']
else:
value = totals[0]['sum']
print(' %s (%s): %s' %
(meter['counter_name'], meter['counter_type'], value))
def show_total_resources(db, args):
if args:
users = args
else:
users = sorted(db.get_users())
for u in users:
print(u)
for meter in ['disk', 'cpu', 'instance']:
stats = db.get_statistics(storage.SampleFilter(
user=u,
meter=meter,
))
if meter in ['cpu', 'disk']:
total = stats['max']
else:
total = stats['sum']
print(' ', meter, total)
def show_raw(db, args):
fmt = ' %(timestamp)s %(counter_name)10s %(counter_volume)s'
for u in sorted(db.get_users()):
print(u)
for resource in db.get_resources(user=u):
print(' ', resource['resource_id'])
for sample in db.get_samples(storage.SampleFilter(
user=u,
resource=resource['resource_id'],
)):
print(fmt % sample)
def show_help(db, args):
print('COMMANDS:')
for name in sorted(COMMANDS.keys()):
print(name)
def show_projects(db, args):
for u in sorted(db.get_projects()):
print(u)
COMMANDS = {
'users': show_users,
'projects': show_projects,
'help': show_help,
'resources': show_resources,
'total_resources': show_total_resources,
'raw': show_raw,
}
def main(argv):
extra_args = cfg.CONF(
sys.argv[1:],
# NOTE(dhellmann): Read the configuration file(s) for the
# ceilometer collector by default.
default_config_files=['/etc/ceilometer/ceilometer.conf'],
)
db = storage.get_connection_from_config(cfg.CONF)
command = extra_args[0] if extra_args else 'help'
COMMANDS[command](db, extra_args[1:])
if __name__ == '__main__':
main(sys.argv)
ceilometer-6.0.0/tools/lintstack.sh 0000775 0005670 0005671 00000004144 12701406223 020501 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env bash
# Copyright (c) 2012-2013, AT&T Labs, Yun Mao
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Use lintstack.py to compare pylint errors.
# We run pylint twice, once on HEAD, once on the code before the latest
# commit for review.
set -e
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
# Get the current branch name.
GITHEAD=`git rev-parse --abbrev-ref HEAD`
if [[ "$GITHEAD" == "HEAD" ]]; then
# In detached head mode, get revision number instead
GITHEAD=`git rev-parse HEAD`
echo "Currently we are at commit $GITHEAD"
else
echo "Currently we are at branch $GITHEAD"
fi
cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py
if git rev-parse HEAD^2 2>/dev/null; then
# The HEAD is a Merge commit. Here, the patch to review is
# HEAD^2, the master branch is at HEAD^1, and the patch was
# written based on HEAD^2~1.
PREV_COMMIT=`git rev-parse HEAD^2~1`
git checkout HEAD~1
# The git merge is necessary for reviews with a series of patches.
# If not, this is a no-op so won't hurt either.
git merge $PREV_COMMIT
else
# The HEAD is not a merge commit. This won't happen on gerrit.
# Most likely you are running against your own patch locally.
# We assume the patch to examine is HEAD, and we compare it against
# HEAD~1
git checkout HEAD~1
fi
# First generate tools/pylint_exceptions from HEAD~1
$TOOLS_DIR/lintstack.head.py generate
# Then use that as a reference to compare against HEAD
git checkout $GITHEAD
$TOOLS_DIR/lintstack.head.py
echo "Check passed. FYI: the pylint exceptions are:"
cat $TOOLS_DIR/pylint_exceptions
ceilometer-6.0.0/tools/send_test_data.py 0000775 0005670 0005671 00000011016 12701406223 021500 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command line tool for sending test data for Ceilometer via oslo.messaging.
Usage:
Send messages with samples generated by make_test_data
source .tox/py27/bin/activate
./tools/send_test_data.py --count 1000 --resources_count 10 --topic metering
"""
import argparse
import datetime
import functools
import json
import random
import uuid
import make_test_data
from oslo_config import cfg
import oslo_messaging
from six import moves
from ceilometer import messaging
from ceilometer.publisher import utils
from ceilometer import service
def send_batch_notifier(notifier, topic, batch):
notifier.sample({}, event_type=topic, payload=batch)
def get_notifier(config_file):
service.prepare_service(argv=['/', '--config-file', config_file])
return oslo_messaging.Notifier(
messaging.get_transport(),
driver='messagingv2',
publisher_id='telemetry.publisher.test',
topic='metering',
)
def generate_data(send_batch, make_data_args, samples_count,
batch_size, resources_count, topic):
make_data_args.interval = 1
make_data_args.start = (datetime.datetime.utcnow() -
datetime.timedelta(minutes=samples_count))
make_data_args.end = datetime.datetime.utcnow()
make_data_args.resource_id = None
resources_list = [str(uuid.uuid4())
for _ in moves.xrange(resources_count)]
resource_samples = {resource: 0 for resource in resources_list}
batch = []
count = 0
for sample in make_test_data.make_test_data(**make_data_args.__dict__):
count += 1
resource = resources_list[random.randint(0, len(resources_list) - 1)]
resource_samples[resource] += 1
sample['resource_id'] = resource
# need to change the timestamp from datetime.datetime type to iso
# format (unicode type), because collector will change iso format
# timestamp to datetime.datetime type before recording to db.
sample['timestamp'] = sample['timestamp'].isoformat()
# need to recalculate signature because of the resource_id change
sig = utils.compute_signature(sample,
cfg.CONF.publisher.telemetry_secret)
sample['message_signature'] = sig
batch.append(sample)
if len(batch) == batch_size:
send_batch(topic, batch)
batch = []
if count == samples_count:
send_batch(topic, batch)
return resource_samples
send_batch(topic, batch)
return resource_samples
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch-size',
dest='batch_size',
type=int,
default=100
)
parser.add_argument(
'--config-file',
default='/etc/ceilometer/ceilometer.conf'
)
parser.add_argument(
'--topic',
default='perfmetering'
)
parser.add_argument(
'--samples-count',
dest='samples_count',
type=int,
default=1000
)
parser.add_argument(
'--resources-count',
dest='resources_count',
type=int,
default=100
)
parser.add_argument(
'--result-directory',
dest='result_dir',
default='/tmp'
)
return parser
def main():
args = get_parser().parse_known_args()[0]
make_data_args = make_test_data.get_parser().parse_known_args()[0]
notifier = get_notifier(args.config_file)
send_batch = functools.partial(send_batch_notifier, notifier)
result_dir = args.result_dir
del args.notify
del args.config_file
del args.result_dir
resource_writes = generate_data(send_batch, make_data_args,
**args.__dict__)
result_file = "%s/sample-by-resource-%s" % (result_dir,
random.getrandbits(32))
with open(result_file, 'w') as f:
f.write(json.dumps(resource_writes))
return result_file
if __name__ == '__main__':
main()
ceilometer-6.0.0/tools/test_hbase_table_utils.py 0000775 0005670 0005671 00000002547 12701406223 023240 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from oslo_config import cfg
from ceilometer import storage
def main(argv):
cfg.CONF([], project='ceilometer')
if os.getenv("CEILOMETER_TEST_STORAGE_URL", "").startswith("hbase://"):
url = ("%s?table_prefix=%s" %
(os.getenv("CEILOMETER_TEST_STORAGE_URL"),
os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test")))
conn = storage.get_connection(url, 'ceilometer.metering.storage')
event_conn = storage.get_connection(url, 'ceilometer.event.storage')
for arg in argv:
if arg == "--upgrade":
conn.upgrade()
event_conn.upgrade()
if arg == "--clear":
conn.clear()
event_conn.clear()
if __name__ == '__main__':
main(sys.argv[1:])
ceilometer-6.0.0/run-functional-tests.sh 0000775 0005670 0005671 00000000355 12701406223 021451 0 ustar jenkins jenkins 0000000 0000000 #!/bin/bash -x
set -e
# Use a mongodb backend by default
if [ -z $CEILOMETER_TEST_BACKEND ]; then
CEILOMETER_TEST_BACKEND="mongodb"
fi
for backend in $CEILOMETER_TEST_BACKEND; do
overtest $backend ./tools/pretty_tox.sh $*
done
ceilometer-6.0.0/doc/ 0000775 0005670 0005671 00000000000 12701406364 015556 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/doc/source/ 0000775 0005670 0005671 00000000000 12701406364 017056 5 ustar jenkins jenkins 0000000 0000000 ceilometer-6.0.0/doc/source/2-1-collection-notification.png 0000664 0005670 0005671 00000100776 12701406223 024705 0 ustar jenkins jenkins 0000000 0000000 ‰PNG
IHDR w ° âôVp bKGD ˙ ˙ ˙ ˝§“ pHYs šś tIMEß ;.ˇ IDATxÚěÝpTužď˙—÷úÝ˝b2_@‡8!YýN"VI΀5$^·ô’€SŽb9°&(+¬dw…pFćňÓ
Ě qä‚«w&żF«$ S€I ľ0żbC4®- agjoő÷Źđ99Ý}Nw'äWĂóQe•tNwźţôůôçsŢç}ŢźŰü~ż_ €ňźh =w Ü €Dp bÁ] Aw ÝNDÇçó©©©‰† 0 ˛˛˛Âţťŕn5a Ŕ€ILLÔW_}ĺúwĘ2Dˇ˘˘‚F 0 |>źęęę\˙NćnÝ™4ZwÝ=š† Đ/ÎýÉ««íť·#¸ŰCÓźČŇł‹ž¤! ô‹źäŻŃéăg"nGY Aw Ü €Dp bÁ] Aw ÝN ’ÓÇĎčÜŮóşg\ŠâFÄéŢq©4
0ČČÜ‹~°BŹŹ›s×DÜv͢
z|ÜlíŮú^źĽ÷ĹĎ/©˘ěýÇ7[ŹŹ›‹ź_˛ë¸rU‹~°B?É_Ł7_/ÓOň×hí˘Ť®Ű”Ž+Wµg[h{lzĺ—}ÚV}i~Îb=>n¶N;C @ź ¸;Ŕ:®\Őął^IŇ©cg"ű>ýÓyIŇ=÷ĄÜđ{W”˝Żů9‹uµ˝3ŕńO®ďĎđř8Ýu÷hëńŁţ¨sg˝§™ůŹifţcšţD–ëöᓳ^-ţÁ
=ň·sž®¶zŕÁű‡Üwn‚ŕ}ń= eÜąëÁÚáńqşÚŢ©ÚßÔkĽK0ňâç—ş‚}P
ˇr÷’BźwÝ=ZëĘVix|\ŔăGtPsžČŇóŻĚµď¸rŐqűđŃ?ęâç—”3++äoVäwµŐ šďüΤŃqÇp: úÁÝvúz¦î”G&ëÔ±3Ş¨×ł‹žtĚ€=ׇ˛öěŃŕ`ň;†;MÖđ”G&Gµý@0™ÎNŮąă‡XĆnđw>T÷ ±‰ŕî űäO^I]ٲSrľŁĘňTQţ~@f¬a2>ÝĘ|rÖ«Ź~וÉ:"~¸î—˘ďý×ď„d‡šŃĚűž>~&`a´ÓÇ»‚Ź—ž˘w·ţm‚Á·ÝÖµMđßÍżí:®\ŐŠCęhżŞ«W:5üŽ8ÍśókĆęĹĎ/©¶˘^WŻt•Š~Gś|H ô“ł^uvtęÓëĄ:Ű;öéâç—ôeë%Ý™4:$ŢqĺŞŐNçţt^÷Ü—âřÁďe^űÔ±3:}üŚ.~~I÷Ţ—ŞGfMëQ®ůÎďą/ĹjźOţäŐ]wŹVά,ÇŔ˝ů